commit
39df61ca07
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
from dotenv import load_dotenv
|
||||
from langchain.document_loaders import TextLoader, PDFMinerLoader, CSVLoader
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.vectorstores import Chroma
|
||||
|
@ -6,6 +7,8 @@ from langchain.embeddings import LlamaCppEmbeddings
|
|||
from constants import PERSIST_DIRECTORY
|
||||
from constants import CHROMA_SETTINGS
|
||||
|
||||
load_dotenv()
|
||||
|
||||
def main():
|
||||
llama_embeddings_model = os.environ.get('LLAMA_EMBEDDINGS_MODEL')
|
||||
persist_directory = os.environ.get('PERSIST_DIRECTORY')
|
||||
|
@ -30,4 +33,4 @@ def main():
|
|||
db = None
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from dotenv import load_dotenv
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain.embeddings import LlamaCppEmbeddings
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
|
@ -5,6 +6,8 @@ from langchain.vectorstores import Chroma
|
|||
from langchain.llms import GPT4All, LlamaCpp
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
llama_embeddings_model = os.environ.get("LLAMA_EMBEDDINGS_MODEL")
|
||||
persist_directory = os.environ.get('PERSIST_DIRECTORY')
|
||||
|
||||
|
@ -51,4 +54,4 @@ def main():
|
|||
print(document.page_content)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
|
Loading…
Reference in New Issue