[EMBEDDINGS] embeddingModel = BAAI/bge-large-en-v1.5 device = cuda normalize_embeddings = true [VECTORSTORE] chunkSize = 1250 chunkOverlap = 250 addStartIndex = true [LLM] llmModel = llama-3.1-70b-versatile maxTokens = 512 temperature = 0.75 [RETRIEVER] searchType = mmr k = 5 fetchK = 10 [WEBCRAWLER] timeout = 30 [EASYOCR] gpu = true