parameters: extract_collection_name: test_conversation_documents fetch_limit: 0 # No limit - get all conversations load_collection_name: rag_conversations content_quality_score_threshold: 0.0 retriever_type: contextual embedding_model_id: text-embedding-3-small embedding_model_type: openai embedding_model_dim: 1536 chunk_size: 640 contextual_summarization_type: contextual contextual_agent_model_id: gpt-4o-mini contextual_agent_max_characters: 200 mock: false processing_batch_size: 5 processing_max_workers: 2 device: mps # or cuda (for Nvidia GPUs) or mps (for Apple M1/M2/M3 chips)