From e98e86ee994562008425fa9a69fcaaeb98d1c794 Mon Sep 17 00:00:00 2001 From: Saurabh <61317144+html-css-js-art@users.noreply.github.com> Date: Fri, 21 Jul 2023 20:37:37 +0530 Subject: [PATCH] Update privateGPT.py --- privateGPT.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/privateGPT.py b/privateGPT.py index 0e3b9d0..a11fe24 100755 --- a/privateGPT.py +++ b/privateGPT.py @@ -33,9 +33,9 @@ def main(): # Prepare the LLM match model_type: case "LlamaCpp": - llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks, verbose=False) + llm = LlamaCpp(model_path=model_path, max_tokens=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks, verbose=False) case "GPT4All": - llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False) + llm = GPT4All(model=model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False) case _default: # raise exception if model_type is not supported raise Exception(f"Model type {model_type} is not supported. Please choose one of the following: LlamaCpp, GPT4All")