From 3b8e519f77dfd557c96aa29cdfaf480eaa7d8a8c Mon Sep 17 00:00:00 2001 From: RolandJAAI <38503289+RolandJAAI@users.noreply.github.com> Date: Thu, 30 Jan 2025 01:04:30 +0100 Subject: [PATCH] minor example fix (#423) --- examples/agent_from_any_llm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/agent_from_any_llm.py b/examples/agent_from_any_llm.py index eb07991..48675e4 100644 --- a/examples/agent_from_any_llm.py +++ b/examples/agent_from_any_llm.py @@ -22,6 +22,7 @@ elif chosen_inference == "ollama": model_id="ollama_chat/llama3.2", api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary api_key="your-api-key", # replace with API key if necessary + num_ctx=8192 # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. ) elif chosen_inference == "litellm": @@ -48,4 +49,4 @@ print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) agent = CodeAgent(tools=[get_weather], model=model) -print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) +print("CodeAgent:", agent.run("What's the weather like in Paris?"))