Make doc buildable with new names
This commit is contained in:
		
							parent
							
								
									021ece34ca
								
							
						
					
					
						commit
						775f6e8d7f
					
				|  | @ -47,7 +47,7 @@ Once you have setup the `doc-builder` and additional packages with the pip insta | ||||||
| you can generate the documentation by typing the following command: | you can generate the documentation by typing the following command: | ||||||
| 
 | 
 | ||||||
| ```bash | ```bash | ||||||
| doc-builder build agents docs/source/ --build_dir ~/tmp/test-build | doc-builder build smolagents docs/source/ --build_dir ~/tmp/test-build | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate | You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate | ||||||
|  | @ -59,7 +59,7 @@ Markdown editor. | ||||||
| To preview the docs, run the following command: | To preview the docs, run the following command: | ||||||
| 
 | 
 | ||||||
| ```bash | ```bash | ||||||
| doc-builder preview agents docs/source/ | doc-builder preview smolagents docs/source/ | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| The docs will be viewable at [http://localhost:5173](http://localhost:5173). You can also preview the docs once you | The docs will be viewable at [http://localhost:5173](http://localhost:5173). You can also preview the docs once you | ||||||
|  |  | ||||||
|  | @ -30,21 +30,18 @@ contains the API docs for the underlying classes. | ||||||
| Our agents inherit from [`MultiStepAgent`], which means they can act in multiple steps, each step consisting of one thought, then one tool call and execution. Read more in [this conceptual guide](../conceptual_guides/react). | Our agents inherit from [`MultiStepAgent`], which means they can act in multiple steps, each step consisting of one thought, then one tool call and execution. Read more in [this conceptual guide](../conceptual_guides/react). | ||||||
| 
 | 
 | ||||||
| We provide two types of agents, based on the main [`Agent`] class. | We provide two types of agents, based on the main [`Agent`] class. | ||||||
|  |   - [`CodeAgent`] is the default agent, it writes its tool calls in Python code. | ||||||
|   - [`JsonAgent`] writes its tool calls in JSON. |   - [`JsonAgent`] writes its tool calls in JSON. | ||||||
|   - [`CodeAgent`] writes its tool calls in Python code. |  | ||||||
| 
 |  | ||||||
| ### BaseAgent |  | ||||||
| 
 |  | ||||||
| [[autodoc]] BaseAgent |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| ### React agents | ### Classes of agents | ||||||
| 
 | 
 | ||||||
| [[autodoc]] MultiStepAgent | [[autodoc]] MultiStepAgent | ||||||
| 
 | 
 | ||||||
|  | [[autodoc]] CodeAgent | ||||||
|  | 
 | ||||||
| [[autodoc]] JsonAgent | [[autodoc]] JsonAgent | ||||||
| 
 | 
 | ||||||
| [[autodoc]] CodeAgent |  | ||||||
| 
 | 
 | ||||||
| ### ManagedAgent | ### ManagedAgent | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -71,12 +71,12 @@ These types have three specific purposes: | ||||||
| 
 | 
 | ||||||
| ### AgentText | ### AgentText | ||||||
| 
 | 
 | ||||||
| [[autodoc]] agents.types.AgentText | [[autodoc]] smolagents.types.AgentText | ||||||
| 
 | 
 | ||||||
| ### AgentImage | ### AgentImage | ||||||
| 
 | 
 | ||||||
| [[autodoc]] agents.types.AgentImage | [[autodoc]] smolagents.types.AgentImage | ||||||
| 
 | 
 | ||||||
| ### AgentAudio | ### AgentAudio | ||||||
| 
 | 
 | ||||||
| [[autodoc]] agents.types.AgentAudio | [[autodoc]] smolagents.types.AgentAudio | ||||||
|  |  | ||||||
|  | @ -22,6 +22,7 @@ dependencies = [ | ||||||
|     "duckduckgo-search>=6.3.7", |     "duckduckgo-search>=6.3.7", | ||||||
|     "python-dotenv>=1.0.1", |     "python-dotenv>=1.0.1", | ||||||
|     "e2b-code-interpreter>=1.0.3", |     "e2b-code-interpreter>=1.0.3", | ||||||
|  |     "torch>=2.5.1", | ||||||
| ] | ] | ||||||
| 
 | 
 | ||||||
| [project.optional-dependencies] | [project.optional-dependencies] | ||||||
|  |  | ||||||
|  | @ -177,6 +177,7 @@ class MultiStepAgent: | ||||||
|     Agent class that solves the given task step by step, using the ReAct framework: |     Agent class that solves the given task step by step, using the ReAct framework: | ||||||
|     While the objective is not reached, the agent will perform a cycle of action (given by the LLM) and observation (obtained from the environment). |     While the objective is not reached, the agent will perform a cycle of action (given by the LLM) and observation (obtained from the environment). | ||||||
|     """ |     """ | ||||||
|  | 
 | ||||||
|     def __init__( |     def __init__( | ||||||
|         self, |         self, | ||||||
|         tools: Union[List[Tool], Toolbox], |         tools: Union[List[Tool], Toolbox], | ||||||
|  | @ -378,7 +379,6 @@ class MultiStepAgent: | ||||||
|             ) |             ) | ||||||
|         return rationale.strip(), action.strip() |         return rationale.strip(), action.strip() | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
|     def provide_final_answer(self, task) -> str: |     def provide_final_answer(self, task) -> str: | ||||||
|         """ |         """ | ||||||
|         This method provides a final answer to the task, based on the logs of the agent's interactions. |         This method provides a final answer to the task, based on the logs of the agent's interactions. | ||||||
|  | @ -1148,7 +1148,6 @@ class ManagedAgent: | ||||||
| 
 | 
 | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     "AgentError", |     "AgentError", | ||||||
|     "BaseAgent", |  | ||||||
|     "ManagedAgent", |     "ManagedAgent", | ||||||
|     "MultiStepAgent", |     "MultiStepAgent", | ||||||
|     "CodeAgent", |     "CodeAgent", | ||||||
|  |  | ||||||
|  | @ -18,11 +18,13 @@ import json | ||||||
| import re | import re | ||||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||||
| from typing import Dict | from typing import Dict | ||||||
| import torch |  | ||||||
| from huggingface_hub import hf_hub_download, list_spaces | from huggingface_hub import hf_hub_download, list_spaces | ||||||
| 
 | 
 | ||||||
| from transformers.utils import is_offline_mode | from transformers.utils import is_offline_mode | ||||||
| from transformers.models.whisper import WhisperProcessor, WhisperForConditionalGeneration | from transformers.models.whisper import ( | ||||||
|  |     WhisperProcessor, | ||||||
|  |     WhisperForConditionalGeneration, | ||||||
|  | ) | ||||||
| 
 | 
 | ||||||
| from .local_python_executor import ( | from .local_python_executor import ( | ||||||
|     BASE_BUILTIN_MODULES, |     BASE_BUILTIN_MODULES, | ||||||
|  | @ -136,10 +138,6 @@ class UserInputTool(Tool): | ||||||
|         user_input = input(f"{question} => ") |         user_input = input(f"{question} => ") | ||||||
|         return user_input |         return user_input | ||||||
| 
 | 
 | ||||||
| import re |  | ||||||
| 
 |  | ||||||
| from .tools import Tool |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| class DuckDuckGoSearchTool(Tool): | class DuckDuckGoSearchTool(Tool): | ||||||
|     name = "web_search" |     name = "web_search" | ||||||
|  | @ -221,4 +219,11 @@ class SpeechToTextTool(PipelineTool): | ||||||
|         return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0] |         return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0] | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| __all__ = ["PythonInterpreterTool", "FinalAnswerTool", "UserInputTool", "DuckDuckGoSearchTool", "VisitWebpageTool", "SpeechToTextTool"] | __all__ = [ | ||||||
|  |     "PythonInterpreterTool", | ||||||
|  |     "FinalAnswerTool", | ||||||
|  |     "UserInputTool", | ||||||
|  |     "DuckDuckGoSearchTool", | ||||||
|  |     "VisitWebpageTool", | ||||||
|  |     "SpeechToTextTool", | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | @ -15,7 +15,7 @@ | ||||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||||
| # limitations under the License. | # limitations under the License. | ||||||
| from .types import AgentAudio, AgentImage, AgentText, handle_agent_output_types | from .types import AgentAudio, AgentImage, AgentText, handle_agent_output_types | ||||||
| from .agents import BaseAgent, AgentStep, ActionStep | from .agents import MultiStepAgent, AgentStep, ActionStep | ||||||
| import gradio as gr | import gradio as gr | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -83,7 +83,7 @@ def stream_to_gradio( | ||||||
| class GradioUI: | class GradioUI: | ||||||
|     """A one-line interface to launch your agent in Gradio""" |     """A one-line interface to launch your agent in Gradio""" | ||||||
| 
 | 
 | ||||||
|     def __init__(self, agent: BaseAgent): |     def __init__(self, agent: MultiStepAgent): | ||||||
|         self.agent = agent |         self.agent = agent | ||||||
| 
 | 
 | ||||||
|     def interact_with_agent(self, prompt, messages): |     def interact_with_agent(self, prompt, messages): | ||||||
|  |  | ||||||
|  | @ -20,6 +20,7 @@ import inspect | ||||||
| import json | import json | ||||||
| import os | import os | ||||||
| import tempfile | import tempfile | ||||||
|  | import torch | ||||||
| import textwrap | import textwrap | ||||||
| from functools import lru_cache, wraps | from functools import lru_cache, wraps | ||||||
| from pathlib import Path | from pathlib import Path | ||||||
|  | @ -42,6 +43,7 @@ from transformers.utils import ( | ||||||
|     is_torch_available, |     is_torch_available, | ||||||
| ) | ) | ||||||
| from transformers.dynamic_module_utils import get_imports | from transformers.dynamic_module_utils import get_imports | ||||||
|  | from transformers import AutoProcessor | ||||||
| 
 | 
 | ||||||
| from .types import ImageType, handle_agent_input_types, handle_agent_output_types | from .types import ImageType, handle_agent_input_types, handle_agent_output_types | ||||||
| from .utils import instance_to_source | from .utils import instance_to_source | ||||||
|  | @ -753,7 +755,7 @@ def launch_gradio_demo(tool: Tool): | ||||||
| TOOL_MAPPING = { | TOOL_MAPPING = { | ||||||
|     "python_interpreter": "PythonInterpreterTool", |     "python_interpreter": "PythonInterpreterTool", | ||||||
|     "web_search": "DuckDuckGoSearchTool", |     "web_search": "DuckDuckGoSearchTool", | ||||||
|     "transcriber": "SpeechToTextTool" |     "transcriber": "SpeechToTextTool", | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -1004,8 +1006,6 @@ class Toolbox: | ||||||
|             toolbox_description += f"\t{tool.name}: {tool.description}\n" |             toolbox_description += f"\t{tool.name}: {tool.description}\n" | ||||||
|         return toolbox_description |         return toolbox_description | ||||||
| 
 | 
 | ||||||
| from transformers import AutoProcessor |  | ||||||
| from .types import handle_agent_input_types, handle_agent_output_types |  | ||||||
| 
 | 
 | ||||||
| class PipelineTool(Tool): | class PipelineTool(Tool): | ||||||
|     """ |     """ | ||||||
|  | @ -1073,7 +1073,9 @@ class PipelineTool(Tool): | ||||||
| 
 | 
 | ||||||
|         if model is None: |         if model is None: | ||||||
|             if self.default_checkpoint is None: |             if self.default_checkpoint is None: | ||||||
|                 raise ValueError("This tool does not implement a default checkpoint, you need to pass one.") |                 raise ValueError( | ||||||
|  |                     "This tool does not implement a default checkpoint, you need to pass one." | ||||||
|  |                 ) | ||||||
|             model = self.default_checkpoint |             model = self.default_checkpoint | ||||||
|         if pre_processor is None: |         if pre_processor is None: | ||||||
|             pre_processor = model |             pre_processor = model | ||||||
|  | @ -1098,15 +1100,21 @@ class PipelineTool(Tool): | ||||||
|         from accelerate import PartialState |         from accelerate import PartialState | ||||||
| 
 | 
 | ||||||
|         if isinstance(self.pre_processor, str): |         if isinstance(self.pre_processor, str): | ||||||
|             self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) |             self.pre_processor = self.pre_processor_class.from_pretrained( | ||||||
|  |                 self.pre_processor, **self.hub_kwargs | ||||||
|  |             ) | ||||||
| 
 | 
 | ||||||
|         if isinstance(self.model, str): |         if isinstance(self.model, str): | ||||||
|             self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) |             self.model = self.model_class.from_pretrained( | ||||||
|  |                 self.model, **self.model_kwargs, **self.hub_kwargs | ||||||
|  |             ) | ||||||
| 
 | 
 | ||||||
|         if self.post_processor is None: |         if self.post_processor is None: | ||||||
|             self.post_processor = self.pre_processor |             self.post_processor = self.pre_processor | ||||||
|         elif isinstance(self.post_processor, str): |         elif isinstance(self.post_processor, str): | ||||||
|             self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) |             self.post_processor = self.post_processor_class.from_pretrained( | ||||||
|  |                 self.post_processor, **self.hub_kwargs | ||||||
|  |             ) | ||||||
| 
 | 
 | ||||||
|         if self.device is None: |         if self.device is None: | ||||||
|             if self.device_map is not None: |             if self.device_map is not None: | ||||||
|  | @ -1149,8 +1157,12 @@ class PipelineTool(Tool): | ||||||
|         import torch |         import torch | ||||||
|         from accelerate.utils import send_to_device |         from accelerate.utils import send_to_device | ||||||
| 
 | 
 | ||||||
|         tensor_inputs = {k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor)} |         tensor_inputs = { | ||||||
|         non_tensor_inputs = {k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor)} |             k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor) | ||||||
|  |         } | ||||||
|  |         non_tensor_inputs = { | ||||||
|  |             k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor) | ||||||
|  |         } | ||||||
| 
 | 
 | ||||||
|         encoded_inputs = send_to_device(tensor_inputs, self.device) |         encoded_inputs = send_to_device(tensor_inputs, self.device) | ||||||
|         outputs = self.forward({**encoded_inputs, **non_tensor_inputs}) |         outputs = self.forward({**encoded_inputs, **non_tensor_inputs}) | ||||||
|  | @ -1159,6 +1171,7 @@ class PipelineTool(Tool): | ||||||
| 
 | 
 | ||||||
|         return handle_agent_output_types(decoded_outputs, self.output_type) |         return handle_agent_output_types(decoded_outputs, self.output_type) | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     "AUTHORIZED_TYPES", |     "AUTHORIZED_TYPES", | ||||||
|     "Tool", |     "Tool", | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue