From d17c34e81a84518086b93605b15032e2482377f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Mart=C3=ADnez?= Date: Wed, 13 Mar 2024 09:53:40 +0100 Subject: [PATCH] fix(settings): set default tokenizer to avoid running make setup fail (#1709) --- settings.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/settings.yaml b/settings.yaml index a9a676b..0a3121f 100644 --- a/settings.yaml +++ b/settings.yaml @@ -39,6 +39,7 @@ llm: # Should be matching the selected model max_new_tokens: 512 context_window: 3900 + tokenizer: mistralai/Mistral-7B-Instruct-v0.2 temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) llamacpp: