feat: make llama3.1 as default (#2022)

* feat: change ollama default model to llama3.1

* chore: bump versions

* feat: Change default model in local mode to llama3.1

* chore: make sure last poetry version is used

* fix: mypy

* fix: do not add BOS (with last llamacpp-python version)
This commit is contained in:
Javier Martinez 2024-07-31 14:35:36 +02:00 committed by GitHub
parent e54a8fe043
commit 9027d695c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 2227 additions and 2419 deletions

View File

@ -8,7 +8,7 @@ inputs:
poetry_version: poetry_version:
required: true required: true
type: string type: string
default: "1.5.1" default: "1.8.3"
runs: runs:
using: composite using: composite

View File

@ -2,7 +2,8 @@ FROM python:3.11.6-slim-bookworm as base
# Install poetry # Install poetry
RUN pip install pipx RUN pip install pipx
RUN pipx install poetry RUN python3 -m pipx ensurepath
RUN pipx install poetry==1.8.3
ENV PATH="/root/.local/bin:$PATH" ENV PATH="/root/.local/bin:$PATH"
ENV PATH=".venv/bin/:$PATH" ENV PATH=".venv/bin/:$PATH"

View File

@ -4,7 +4,8 @@ FROM python:3.11.6-slim-bookworm as base
# Install poetry # Install poetry
RUN pip install pipx RUN pip install pipx
RUN pipx install poetry RUN python3 -m pipx ensurepath
RUN pipx install poetry==1.8.3
ENV PATH="/root/.local/bin:$PATH" ENV PATH="/root/.local/bin:$PATH"
ENV PATH=".venv/bin/:$PATH" ENV PATH=".venv/bin/:$PATH"

View File

@ -28,6 +28,11 @@ pyenv local 3.11
Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management: Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management:
Follow the instructions on the official Poetry website to install it. Follow the instructions on the official Poetry website to install it.
<Callout intent="warning">
A bug exists in Poetry versions 1.7.0 and earlier. We strongly recommend upgrading to a tested version.
To upgrade Poetry to latest tested version, run `poetry self update 1.8.3` after installing it.
</Callout>
### 4. Optional: Install `make` ### 4. Optional: Install `make`
To run various scripts, you need to install `make`. Follow the instructions for your operating system: To run various scripts, you need to install `make`. Follow the instructions for your operating system:
#### macOS #### macOS
@ -135,14 +140,14 @@ Now, start Ollama service (it will start a local inference server, serving both
ollama serve ollama serve
``` ```
Install the models to be used, the default settings-ollama.yaml is configured to user mistral 7b LLM (~4GB) and nomic-embed-text Embeddings (~275MB) Install the models to be used, the default settings-ollama.yaml is configured to user llama3.1 8b LLM (~4GB) and nomic-embed-text Embeddings (~275MB)
By default, PGPT will automatically pull models as needed. This behavior can be changed by modifying the `ollama.autopull_models` property. By default, PGPT will automatically pull models as needed. This behavior can be changed by modifying the `ollama.autopull_models` property.
In any case, if you want to manually pull models, run the following commands: In any case, if you want to manually pull models, run the following commands:
```bash ```bash
ollama pull mistral ollama pull llama3.1
ollama pull nomic-embed-text ollama pull nomic-embed-text
``` ```

View File

@ -24,7 +24,7 @@ PrivateGPT uses the `AutoTokenizer` library to tokenize input text accurately. I
In your `settings.yaml` file, specify the model you want to use: In your `settings.yaml` file, specify the model you want to use:
```yaml ```yaml
llm: llm:
tokenizer: mistralai/Mistral-7B-Instruct-v0.2 tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
``` ```
2. **Set Access Token for Gated Models:** 2. **Set Access Token for Gated Models:**
If you are using a gated model, ensure the `access_token` is set as mentioned in the previous section. If you are using a gated model, ensure the `access_token` is set as mentioned in the previous section.

4581
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -169,7 +169,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
""" """
def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str: def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str:
prompt = self.BOS prompt = ""
has_system_message = False has_system_message = False
for i, message in enumerate(messages): for i, message in enumerate(messages):
@ -189,8 +189,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
# Add default system prompt if no system message was provided # Add default system prompt if no system message was provided
if not has_system_message: if not has_system_message:
prompt = ( prompt = (
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}" f"{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}" + prompt
+ prompt[len(self.BOS) :]
) )
# TODO: Implement tool handling logic # TODO: Implement tool handling logic
@ -199,7 +198,7 @@ class Llama3PromptStyle(AbstractPromptStyle):
def _completion_to_prompt(self, completion: str) -> str: def _completion_to_prompt(self, completion: str) -> str:
return ( return (
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}" f"{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
f"{self.B_INST}user{self.E_INST}\n\n{completion.strip()}{self.EOT}" f"{self.B_INST}user{self.E_INST}\n\n{completion.strip()}{self.EOT}"
f"{self.ASSISTANT_INST}\n\n" f"{self.ASSISTANT_INST}\n\n"
) )

View File

@ -37,6 +37,7 @@ def create_app(root_injector: Injector) -> FastAPI:
# Add LlamaIndex simple observability # Add LlamaIndex simple observability
global_handler = create_global_handler("simple") global_handler = create_global_handler("simple")
if global_handler is not None:
LlamaIndexSettings.callback_manager = CallbackManager([global_handler]) LlamaIndexSettings.callback_manager = CallbackManager([global_handler])
settings = root_injector.get(Settings) settings = root_injector.get(Settings)

View File

@ -9,8 +9,8 @@ embedding:
mode: ${PGPT_EMBED_MODE:mock} mode: ${PGPT_EMBED_MODE:mock}
llamacpp: llamacpp:
llm_hf_repo_id: ${PGPT_HF_REPO_ID:TheBloke/Mistral-7B-Instruct-v0.1-GGUF} llm_hf_repo_id: ${PGPT_HF_REPO_ID:lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF}
llm_hf_model_file: ${PGPT_HF_MODEL_FILE:mistral-7b-instruct-v0.1.Q4_K_M.gguf} llm_hf_model_file: ${PGPT_HF_MODEL_FILE:Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf}
huggingface: huggingface:
embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5} embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5}
@ -20,7 +20,7 @@ sagemaker:
embedding_endpoint_name: ${PGPT_SAGEMAKER_EMBEDDING_ENDPOINT_NAME:} embedding_endpoint_name: ${PGPT_SAGEMAKER_EMBEDDING_ENDPOINT_NAME:}
ollama: ollama:
llm_model: ${PGPT_OLLAMA_LLM_MODEL:mistral} llm_model: ${PGPT_OLLAMA_LLM_MODEL:llama3.1}
embedding_model: ${PGPT_OLLAMA_EMBEDDING_MODEL:nomic-embed-text} embedding_model: ${PGPT_OLLAMA_EMBEDDING_MODEL:nomic-embed-text}
api_base: ${PGPT_OLLAMA_API_BASE:http://ollama:11434} api_base: ${PGPT_OLLAMA_API_BASE:http://ollama:11434}
embedding_api_base: ${PGPT_OLLAMA_EMBEDDING_API_BASE:http://ollama:11434} embedding_api_base: ${PGPT_OLLAMA_EMBEDDING_API_BASE:http://ollama:11434}

View File

@ -7,12 +7,12 @@ llm:
# Should be matching the selected model # Should be matching the selected model
max_new_tokens: 512 max_new_tokens: 512
context_window: 3900 context_window: 3900
tokenizer: mistralai/Mistral-7B-Instruct-v0.2 tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
prompt_style: "mistral" prompt_style: "llama3"
llamacpp: llamacpp:
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
embedding: embedding:
mode: huggingface mode: huggingface

View File

@ -14,7 +14,7 @@ embedding:
embed_dim: 768 embed_dim: 768
ollama: ollama:
llm_model: mistral llm_model: llama3.1
embedding_model: nomic-embed-text embedding_model: nomic-embed-text
api_base: http://localhost:11434 api_base: http://localhost:11434

View File

@ -11,7 +11,7 @@ embedding:
mode: ollama mode: ollama
ollama: ollama:
llm_model: mistral llm_model: llama3.1
embedding_model: nomic-embed-text embedding_model: nomic-embed-text
api_base: http://localhost:11434 api_base: http://localhost:11434
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama

View File

@ -4,7 +4,7 @@ server:
llm: llm:
mode: openailike mode: openailike
max_new_tokens: 512 max_new_tokens: 512
tokenizer: mistralai/Mistral-7B-Instruct-v0.2 tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
temperature: 0.1 temperature: 0.1
embedding: embedding:

View File

@ -39,12 +39,12 @@ ui:
llm: llm:
mode: llamacpp mode: llamacpp
prompt_style: "mistral" prompt_style: "llama3"
# Should be matching the selected model # Should be matching the selected model
max_new_tokens: 512 max_new_tokens: 512
context_window: 3900 context_window: 3900
# Select your tokenizer. Llama-index tokenizer is the default. # Select your tokenizer. Llama-index tokenizer is the default.
# tokenizer: mistralai/Mistral-7B-Instruct-v0.2 # tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
rag: rag:
@ -65,8 +65,8 @@ clickhouse:
database: embeddings database: embeddings
llamacpp: llamacpp:
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
top_p: 1.0 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) top_p: 1.0 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
@ -114,7 +114,7 @@ openai:
embedding_api_key: ${OPENAI_API_KEY:} embedding_api_key: ${OPENAI_API_KEY:}
ollama: ollama:
llm_model: llama2 llm_model: llama3.1
embedding_model: nomic-embed-text embedding_model: nomic-embed-text
api_base: http://localhost:11434 api_base: http://localhost:11434
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama

View File

@ -150,7 +150,7 @@ def test_llama3_prompt_style_format():
] ]
expected_prompt = ( expected_prompt = (
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n" "<|start_header_id|>system<|end_header_id|>\n\n"
"You are a helpful assistant<|eot_id|>" "You are a helpful assistant<|eot_id|>"
"<|start_header_id|>user<|end_header_id|>\n\n" "<|start_header_id|>user<|end_header_id|>\n\n"
"Hello, how are you doing?<|eot_id|>" "Hello, how are you doing?<|eot_id|>"
@ -166,7 +166,7 @@ def test_llama3_prompt_style_with_default_system():
ChatMessage(content="Hello!", role=MessageRole.USER), ChatMessage(content="Hello!", role=MessageRole.USER),
] ]
expected = ( expected = (
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n" "<|start_header_id|>system<|end_header_id|>\n\n"
f"{prompt_style.DEFAULT_SYSTEM_PROMPT}<|eot_id|>" f"{prompt_style.DEFAULT_SYSTEM_PROMPT}<|eot_id|>"
"<|start_header_id|>user<|end_header_id|>\n\nHello!<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\nHello!<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n" "<|start_header_id|>assistant<|end_header_id|>\n\n"
@ -185,7 +185,7 @@ def test_llama3_prompt_style_with_assistant_response():
] ]
expected_prompt = ( expected_prompt = (
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n" "<|start_header_id|>system<|end_header_id|>\n\n"
"You are a helpful assistant<|eot_id|>" "You are a helpful assistant<|eot_id|>"
"<|start_header_id|>user<|end_header_id|>\n\n" "<|start_header_id|>user<|end_header_id|>\n\n"
"What is the capital of France?<|eot_id|>" "What is the capital of France?<|eot_id|>"