From b3dfaddd434a4610d0ffa6d18e9ca0f96fb2e170 Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Mon, 30 Dec 2024 12:41:01 +0100 Subject: [PATCH 1/3] make compatible with remote openai-compatible servers --- src/smolagents/models.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/smolagents/models.py b/src/smolagents/models.py index 13e0f52..deeb7fc 100644 --- a/src/smolagents/models.py +++ b/src/smolagents/models.py @@ -410,11 +410,13 @@ class TransformersModel(Model): class LiteLLMModel(Model): - def __init__(self, model_id="anthropic/claude-3-5-sonnet-20240620"): + def __init__(self, model_id="anthropic/claude-3-5-sonnet-20240620", api_base=None, api_key=None): super().__init__() self.model_id = model_id # IMPORTANT - Set this to TRUE to add the function to the prompt for Non OpenAI LLMs litellm.add_function_to_prompt = True + self.api_base = api_base + self.api_key = api_key def __call__( self, @@ -432,6 +434,8 @@ class LiteLLMModel(Model): messages=messages, stop=stop_sequences, max_tokens=max_tokens, + api_base=self.api_base, + api_key=self.api_key, ) self.last_input_token_count = response.usage.prompt_tokens self.last_output_token_count = response.usage.completion_tokens @@ -454,6 +458,8 @@ class LiteLLMModel(Model): tool_choice="required", stop=stop_sequences, max_tokens=max_tokens, + api_base=self.api_base, + api_key=self.api_key, ) tool_calls = response.choices[0].message.tool_calls[0] self.last_input_token_count = response.usage.prompt_tokens From 56a4592521e3692e259f5854e19afc73b54fa608 Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Mon, 30 Dec 2024 12:41:20 +0100 Subject: [PATCH 2/3] add example using ollama --- examples/tool_calling_agent_ollama.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 examples/tool_calling_agent_ollama.py diff --git a/examples/tool_calling_agent_ollama.py b/examples/tool_calling_agent_ollama.py new file mode 100644 index 0000000..58b65e0 --- /dev/null +++ b/examples/tool_calling_agent_ollama.py @@ -0,0 +1,25 @@ +from smolagents.agents import ToolCallingAgent +from smolagents import tool, HfApiModel, TransformersModel, LiteLLMModel, Model +from smolagents.tools import Tool +from smolagents.models import get_clean_message_list, tool_role_conversions +from typing import Optional + +model = LiteLLMModel(model_id="openai/llama3.2", + api_base="http://localhost:11434/v1", # replace with remote open-ai compatible server if necessary + api_key="your-api-key") # replace with API key if necessary + +@tool +def get_weather(location: str, celsius: Optional[bool] = False) -> str: + """ + Get weather in the next days at given location. + Secretly this tool does not care about the location, it hates the weather everywhere. + + Args: + location: the location + celsius: the temperature + """ + return "The weather is UNGODLY with torrential rains and temperatures below -10°C" + +agent = ToolCallingAgent(tools=[get_weather], model=model) + +print(agent.run("What's the weather like in Paris?")) \ No newline at end of file From 8d388ed217adfc49816fba07609a1393b6ef8e68 Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Mon, 30 Dec 2024 12:48:56 +0100 Subject: [PATCH 3/3] simplify example code --- examples/tool_calling_agent_ollama.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/tool_calling_agent_ollama.py b/examples/tool_calling_agent_ollama.py index 58b65e0..0393549 100644 --- a/examples/tool_calling_agent_ollama.py +++ b/examples/tool_calling_agent_ollama.py @@ -1,7 +1,5 @@ from smolagents.agents import ToolCallingAgent -from smolagents import tool, HfApiModel, TransformersModel, LiteLLMModel, Model -from smolagents.tools import Tool -from smolagents.models import get_clean_message_list, tool_role_conversions +from smolagents import tool, LiteLLMModel from typing import Optional model = LiteLLMModel(model_id="openai/llama3.2",