import os from dotenv import load_dotenv from langchain.document_loaders import TextLoader, PDFMinerLoader, CSVLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.embeddings import LlamaCppEmbeddings from constants import PERSIST_DIRECTORY from constants import CHROMA_SETTINGS load_dotenv() def main(): llama_embeddings_model = os.environ.get('LLAMA_EMBEDDINGS_MODEL') persist_directory = os.environ.get('PERSIST_DIRECTORY') model_n_ctx = os.environ.get('MODEL_N_CTX') # Load document and split in chunks for root, dirs, files in os.walk("source_documents"): for file in files: if file.endswith(".txt"): loader = TextLoader(os.path.join(root, file), encoding="utf8") elif file.endswith(".pdf"): loader = PDFMinerLoader(os.path.join(root, file)) elif file.endswith(".csv"): loader = CSVLoader(os.path.join(root, file)) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) texts = text_splitter.split_documents(documents) # Create embeddings llama = LlamaCppEmbeddings(model_path=llama_embeddings_model, n_ctx=model_n_ctx) # Create and store locally vectorstore db = Chroma.from_documents(texts, llama, persist_directory=PERSIST_DIRECTORY, client_settings=CHROMA_SETTINGS) db.persist() db = None if __name__ == "__main__": main()