Add required Ollama setting
This commit is contained in:
parent
94ef38cbba
commit
f469b4619d
|
@ -210,8 +210,8 @@ class OllamaSettings(BaseModel):
|
||||||
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
|
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
|
||||||
)
|
)
|
||||||
embedding_api_base: str = Field(
|
embedding_api_base: str = Field(
|
||||||
api_base, # default is same as api_base, unless specified differently
|
"http://localhost:11434",
|
||||||
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
|
description="Base URL of Ollama embedding API. Example: 'https://localhost:11434'.",
|
||||||
)
|
)
|
||||||
llm_model: str = Field(
|
llm_model: str = Field(
|
||||||
None,
|
None,
|
||||||
|
|
|
@ -14,8 +14,8 @@ ollama:
|
||||||
llm_model: mistral
|
llm_model: mistral
|
||||||
embedding_model: nomic-embed-text
|
embedding_model: nomic-embed-text
|
||||||
api_base: http://localhost:11434
|
api_base: http://localhost:11434
|
||||||
|
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama
|
||||||
keep_alive: 5m
|
keep_alive: 5m
|
||||||
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
|
|
||||||
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
|
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
|
||||||
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
||||||
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
||||||
|
|
|
@ -99,8 +99,8 @@ ollama:
|
||||||
llm_model: llama2
|
llm_model: llama2
|
||||||
embedding_model: nomic-embed-text
|
embedding_model: nomic-embed-text
|
||||||
api_base: http://localhost:11434
|
api_base: http://localhost:11434
|
||||||
|
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama
|
||||||
keep_alive: 5m
|
keep_alive: 5m
|
||||||
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
|
|
||||||
request_timeout: 120.0
|
request_timeout: 120.0
|
||||||
|
|
||||||
azopenai:
|
azopenai:
|
||||||
|
|
Loading…
Reference in New Issue