chabo_test / params.cfg
mtyrrell's picture
ingestor fix
5c394c9
[hf_endpoints]
embedding_endpoint_url = https://f4veaarnbmqjhve9.eu-west-1.aws.endpoints.huggingface.cloud
reranker_endpoint_url = https://whikfgijnuog8fjv.eu-west-1.aws.endpoints.huggingface.cloud
[qdrant]
# Modes: 'native' for direct QdrantClient, 'gradio' for Gradio Gateway access
# for native just give url
mode = native
url = https://de438521-e2dd-43d9-b41b-b2e18299a2c0.europe-west3-0.gcp.cloud.qdrant.io:6333
# NOTE: The API key should be set via QDRANT_API_KEY environment variable.
port = 443
collection = allreports
[retrieval]
initial_k = 20
final_k = 5
[generator]
PROVIDER = huggingface
MODEL = meta-llama/Meta-Llama-3-8B-Instruct
MAX_TOKENS = 2048
TEMPERATURE = 0.1
INFERENCE_PROVIDER = novita
ORGANIZATION = GIZ
CONTEXT_META_FIELDS = filename,project_id,document_source
TITLE_META_FIELDS = filename,page
[ingestor]
# Size of each text chunk in characters
chunk_size = 700
# Overlap between consecutive chunks in characters
chunk_overlap = 50
# Maximum number of chunks to send to LLM (prevents context overflow)
max_chunks = 20
# Text separators for splitting, comma-separated (order of preference)
separators = \n\n,\n,. ,! ,? , ,