Add Azure OpenAI support (#282)
* Added support for connecting to Azure OpenAI via AzureOpenAIServerModel
This commit is contained in:
		
							parent
							
								
									ec45d6766a
								
							
						
					
					
						commit
						a721837c57
					
				|  | @ -27,10 +27,11 @@ To initialize a minimal agent, you need at least these two arguments: | |||
|     - [`TransformersModel`] takes a pre-initialized `transformers` pipeline to run inference on your local machine using `transformers`. | ||||
|     - [`HfApiModel`] leverages a `huggingface_hub.InferenceClient` under the hood. | ||||
|     - [`LiteLLMModel`] lets you call 100+ different models through [LiteLLM](https://docs.litellm.ai/)! | ||||
|     - [`AzureOpenAIServerModel`] allows you to use OpenAI models deployed in [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service). | ||||
| 
 | ||||
| - `tools`, a list of `Tools` that the agent can use to solve the task. It can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`. | ||||
| 
 | ||||
| Once you have these two arguments, `tools` and `model`,  you can create an agent and run it. You can use any LLM you'd like, either through [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), or [LiteLLM](https://www.litellm.ai/). | ||||
| Once you have these two arguments, `tools` and `model`,  you can create an agent and run it. You can use any LLM you'd like, either through [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), [LiteLLM](https://www.litellm.ai/), or [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service). | ||||
| 
 | ||||
| <hfoptions id="Pick a LLM"> | ||||
| <hfoption id="Hugging Face API"> | ||||
|  | @ -103,6 +104,49 @@ agent.run( | |||
|     "Could you give me the 118th number in the Fibonacci sequence?", | ||||
| ) | ||||
| ``` | ||||
| </hfoption> | ||||
| <hfoption id="Azure OpenAI"> | ||||
| 
 | ||||
| To connect to Azure OpenAI, you can either use `AzureOpenAIServerModel` directly, or use `LiteLLMModel` and configure it accordingly. | ||||
| 
 | ||||
| To initialize an instance of `AzureOpenAIServerModel`, you need to pass your model deployment name and then either pass the `azure_endpoint`, `api_key`, and `api_version` arguments, or set the environment variables `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`. | ||||
| 
 | ||||
| ```python | ||||
| # !pip install smolagents[openai] | ||||
| from smolagents import CodeAgent, AzureOpenAIServerModel | ||||
| 
 | ||||
| model = AzureOpenAIServerModel(model_id="gpt-4o-mini") | ||||
| agent = CodeAgent(tools=[], model=model, add_base_tools=True) | ||||
| 
 | ||||
| agent.run( | ||||
|     "Could you give me the 118th number in the Fibonacci sequence?", | ||||
| ) | ||||
| ``` | ||||
| 
 | ||||
| Similarly, you can configure `LiteLLMModel` to connect to Azure OpenAI as follows: | ||||
| 
 | ||||
| - pass your model deployment name as `model_id`, and make sure to prefix it with `azure/` | ||||
| - make sure to set the environment variable `AZURE_API_VERSION` | ||||
| - either pass the `api_base` and `api_key` arguments, or set the environment variables `AZURE_API_KEY`, and `AZURE_API_BASE` | ||||
| 
 | ||||
| ```python | ||||
| import os | ||||
| from smolagents import CodeAgent, LiteLLMModel | ||||
| 
 | ||||
| AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo-16k-deployment" # example of deployment name | ||||
| 
 | ||||
| os.environ["AZURE_API_KEY"] = "" # api_key | ||||
| os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com" | ||||
| os.environ["AZURE_API_VERSION"] = "" # "2024-10-01-preview" | ||||
| 
 | ||||
| model = LiteLLMModel(model_id="azure/" + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME) | ||||
| agent = CodeAgent(tools=[], model=model, add_base_tools=True) | ||||
| 
 | ||||
| agent.run( | ||||
|    "Could you give me the 118th number in the Fibonacci sequence?", | ||||
| ) | ||||
| ``` | ||||
| 
 | ||||
| </hfoption> | ||||
| </hfoptions> | ||||
| 
 | ||||
|  |  | |||
|  | @ -148,7 +148,7 @@ print(model(messages)) | |||
| 
 | ||||
| [[autodoc]] LiteLLMModel | ||||
| 
 | ||||
| ### OpenAiServerModel | ||||
| ### OpenAIServerModel | ||||
| 
 | ||||
| This class lets you call any OpenAIServer compatible model. | ||||
| Here's how you can set it (you can customise the `api_base` url to point to another server): | ||||
|  | @ -161,3 +161,28 @@ model = OpenAIServerModel( | |||
|     api_key=os.environ["OPENAI_API_KEY"], | ||||
| ) | ||||
| ``` | ||||
| 
 | ||||
| [[autodoc]] OpenAIServerModel | ||||
| 
 | ||||
| ### AzureOpenAIServerModel | ||||
| 
 | ||||
| `AzureOpenAIServerModel` allows you to connect to any Azure OpenAI deployment.  | ||||
| 
 | ||||
| Below you can find an example of how to set it up, note that you can omit the `azure_endpoint`, `api_key`, and `api_version` arguments, provided you've set the corresponding environment variables -- `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`. | ||||
| 
 | ||||
| Pay attention to the lack of an `AZURE_` prefix for `OPENAI_API_VERSION`, this is due to the way the underlying [openai](https://github.com/openai/openai-python) package is designed.  | ||||
| 
 | ||||
| ```py | ||||
| import os | ||||
| 
 | ||||
| from smolagents import AzureOpenAIServerModel | ||||
| 
 | ||||
| model = AzureOpenAIServerModel( | ||||
|     model_id = os.environ.get("AZURE_OPENAI_MODEL"), | ||||
|     azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"), | ||||
|     api_key=os.environ.get("AZURE_OPENAI_API_KEY"), | ||||
|     api_version=os.environ.get("OPENAI_API_VERSION")     | ||||
| ) | ||||
| ``` | ||||
| 
 | ||||
| [[autodoc]] AzureOpenAIServerModel | ||||
|  | @ -620,6 +620,45 @@ class OpenAIServerModel(Model): | |||
|         return message | ||||
| 
 | ||||
| 
 | ||||
| class AzureOpenAIServerModel(OpenAIServerModel): | ||||
|     """This model connects to an Azure OpenAI deployment. | ||||
| 
 | ||||
|     Parameters: | ||||
|         model_id (`str`): | ||||
|             The model deployment name to use when connecting (e.g. "gpt-4o-mini"). | ||||
|         azure_endpoint (`str`, *optional*): | ||||
|             The Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`. If not provided, it will be inferred from the `AZURE_OPENAI_ENDPOINT` environment variable. | ||||
|         api_key (`str`, *optional*): | ||||
|             The API key to use for authentication. If not provided, it will be inferred from the `AZURE_OPENAI_API_KEY` environment variable. | ||||
|         api_version (`str`, *optional*): | ||||
|             The API version to use. If not provided, it will be inferred from the `OPENAI_API_VERSION` environment variable. | ||||
|         custom_role_conversions (`Dict[str, str]`, *optional*): | ||||
|             Custom role conversion mapping to convert message roles in others. | ||||
|             Useful for specific models that do not support specific message roles like "system". | ||||
|         **kwargs: | ||||
|             Additional keyword arguments to pass to the Azure OpenAI API. | ||||
|     """ | ||||
| 
 | ||||
|     def __init__( | ||||
|         self, | ||||
|         model_id: str, | ||||
|         azure_endpoint: Optional[str] = None, | ||||
|         api_key: Optional[str] = None, | ||||
|         api_version: Optional[str] = None, | ||||
|         custom_role_conversions: Optional[Dict[str, str]] = None, | ||||
|         **kwargs, | ||||
|     ): | ||||
|         # read the api key manually, to avoid super().__init__() trying to use the wrong api_key (OPENAI_API_KEY) | ||||
|         if api_key is None: | ||||
|             api_key = os.environ.get("AZURE_OPENAI_API_KEY") | ||||
| 
 | ||||
|         super().__init__(model_id=model_id, api_key=api_key, custom_role_conversions=custom_role_conversions, **kwargs) | ||||
|         # if we've reached this point, it means the openai package is available (checked in baseclass) so go ahead and import it | ||||
|         import openai | ||||
| 
 | ||||
|         self.client = openai.AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=azure_endpoint) | ||||
| 
 | ||||
| 
 | ||||
| __all__ = [ | ||||
|     "MessageRole", | ||||
|     "tool_role_conversions", | ||||
|  | @ -629,5 +668,6 @@ __all__ = [ | |||
|     "HfApiModel", | ||||
|     "LiteLLMModel", | ||||
|     "OpenAIServerModel", | ||||
|     "AzureOpenAIServerModel", | ||||
|     "ChatMessage", | ||||
| ] | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue