diff --git a/privateGPT.py b/privateGPT.py index 0e3b9d0..a11fe24 100755 --- a/privateGPT.py +++ b/privateGPT.py @@ -33,9 +33,9 @@ def main(): # Prepare the LLM match model_type: case "LlamaCpp": - llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks, verbose=False) + llm = LlamaCpp(model_path=model_path, max_tokens=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks, verbose=False) case "GPT4All": - llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False) + llm = GPT4All(model=model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False) case _default: # raise exception if model_type is not supported raise Exception(f"Model type {model_type} is not supported. Please choose one of the following: LlamaCpp, GPT4All")