File size: 3,866 Bytes
a63c61f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# ═══════════════════════════════════════════════════════════════════════════
# RAG API Configuration
# ═══════════════════════════════════════════════════════════════════════════

# ── Vector Database (Qdrant) ──────────────────────────────────────────────
QDRANT_HOST=localhost
QDRANT_PORT=6333
QDRANT_URL=                    # Cloud URL (overrides host/port)
QDRANT_API_KEY=                # Cloud API Key
QDRANT_COLLECTION=news_articles_hybrid

# ── Analytics Database (ClickHouse) ────────────────────────────────────────
CLICKHOUSE_HOST=localhost
CLICKHOUSE_PORT=8123
CLICKHOUSE_USER=default
CLICKHOUSE_PASSWORD=
CLICKHOUSE_DB=default
CLICKHOUSE_SECURE=false

# ── User Database (PostgreSQL/Neon) ────────────────────────────────────────
DATABASE_URL=                  # Full Neon URL (overrides individual fields)
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_SERVER=localhost
POSTGRES_PORT=5432
POSTGRES_DB=rag_interactions

# ── Embedding & Reranking Models ───────────────────────────────────────────
EMBEDDING_MODEL=BAAI/bge-m3
VECTOR_SIZE=1024
RERANKER_MODEL=BAAI/bge-reranker-v2-m3

# ── LLM Provider ───────────────────────────────────────────────────────────
# Supported: "groq", "gemini", "together", "openai", "ollama"
LLM_PROVIDER=gemini

# Groq (free, 200+ tok/s)
GROQ_API_KEY=
GROQ_MODEL=llama-3.3-70b-versatile

# Google Gemini (free tier: 15 RPM / 1M TPM)
GEMINI_API_KEY=
GEMINI_MODEL=gemini-1.5-flash

# Together AI (free $25 credit)
TOGETHER_API_KEY=
TOGETHER_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo

# HuggingFace Inference API
HF_TOKEN=
HF_MODEL=meta-llama/Llama-3.1-8B-Instruct

# Ollama (local)
OLLAMA_HOST=http://localhost:11434
OLLAMA_MODEL=llama3.2

# OpenAI
OPENAI_API_KEY=

# ── Redis Cache ────────────────────────────────────────────────────────────
REDIS_URL=                     # Full URL (Upstash) - overrides host/port
REDIS_HOST=localhost
REDIS_PORT=6380
REDIS_DB=0
REDIS_PASSWORD=

# ── Hybrid Search Settings ─────────────────────────────────────────────────
ENABLE_HYBRID_SEARCH=true
LIVE_SEARCH_TIMEOUT=2.0
LIVE_SEARCH_MAX_RESULTS=5
LIVE_SEARCH_WEIGHT=0.5
DB_SEARCH_WEIGHT=0.5

# ── Cache Settings (TTL in seconds) ────────────────────────────────────────
CACHE_RESPONSE_TTL=300         # 5 minutes - full response cache
CACHE_LIVE_TTL=600             # 10 minutes - live search results
CACHE_TRANSLATION_TTL=3600     # 1 hour - translated queries
CACHE_INTENT_TTL=3600          # 1 hour - intent classification

# ── Security ───────────────────────────────────────────────────────────────
SECRET_KEY=change_me_in_production_to_a_very_long_random_string
ACCESS_TOKEN_EXPIRE_MINUTES=60