rag-api-node-1 / src /core /config.py
Peterase's picture
feat: hybrid RAG pipeline upgrade
daf250b
import os
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_file=".env", env_file_encoding='utf-8', extra='ignore')
PROJECT_NAME: str = "RAG API Service"
API_V1_STR: str = "/api/v1"
QDRANT_HOST: str = os.getenv("QDRANT_HOST", "localhost")
QDRANT_PORT: int = int(os.getenv("QDRANT_PORT", "6333"))
QDRANT_URL: str = os.getenv("QDRANT_URL", "") # Cloud URL (overrides host/port)
QDRANT_API_KEY: str = os.getenv("QDRANT_API_KEY", "") # Cloud API Key
QDRANT_COLLECTION: str = os.getenv("QDRANT_COLLECTION", "news_articles")
CLICKHOUSE_HOST: str = os.getenv("CLICKHOUSE_HOST", "localhost")
CLICKHOUSE_PORT: int = int(os.getenv("CLICKHOUSE_PORT", "8123"))
CLICKHOUSE_USER: str = os.getenv("CLICKHOUSE_USER", "default")
CLICKHOUSE_PASSWORD: str = os.getenv("CLICKHOUSE_PASSWORD", "")
CLICKHOUSE_DB: str = os.getenv("CLICKHOUSE_DB", "default")
CLICKHOUSE_SECURE: bool = os.getenv("CLICKHOUSE_SECURE", "false").lower() == "true"
# Embedding Model Config
EMBEDDING_MODEL: str = os.getenv("EMBEDDING_MODEL", "BAAI/bge-m3")
VECTOR_SIZE: int = int(os.getenv("VECTOR_SIZE", "1024"))
RERANKER_MODEL: str = os.getenv("RERANKER_MODEL", "BAAI/bge-reranker-v2-m3")
# PostgreSQL / Neon Config
DATABASE_URL: str = os.getenv("DATABASE_URL", "") # Full Neon URL (overrides individual fields)
POSTGRES_USER: str = os.getenv("POSTGRES_USER", "postgres")
POSTGRES_PASSWORD: str = os.getenv("POSTGRES_PASSWORD", "postgres")
POSTGRES_SERVER: str = os.getenv("POSTGRES_SERVER", "localhost")
POSTGRES_PORT: str = os.getenv("POSTGRES_PORT", "5432")
POSTGRES_DB: str = os.getenv("POSTGRES_DB", "rag_interactions")
@property
def SQLALCHEMY_DATABASE_URI(self) -> str:
if self.DATABASE_URL:
return self.DATABASE_URL
return f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@{self.POSTGRES_SERVER}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
# LLM Settings
# Supported providers: "groq", "gemini", "together", "openai", "ollama"
LLM_PROVIDER: str = os.getenv("LLM_PROVIDER", "groq")
# Groq β€” free, 200+ tok/s, llama-3.3-70b-versatile | https://console.groq.com
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
GROQ_API_KEY: str = os.getenv("GROQ_API_KEY", "")
GROQ_MODEL: str = os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile")
# Google Gemini β€” free tier (15 RPM / 1M TPM) | https://aistudio.google.com/apikey
GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "")
GEMINI_MODEL: str = os.getenv("GEMINI_MODEL", "gemini-1.5-flash")
# Together AI β€” free $25 credit | https://api.together.ai
TOGETHER_API_KEY: str = os.getenv("TOGETHER_API_KEY", "")
TOGETHER_MODEL: str = os.getenv("TOGETHER_MODEL", "meta-llama/Llama-3.3-70B-Instruct-Turbo")
# HuggingFace Inference API β€” free with HF token | https://huggingface.co/settings/tokens
HF_TOKEN: str = os.getenv("HF_TOKEN", "")
HF_MODEL: str = os.getenv("HF_MODEL", "meta-llama/Llama-3.1-8B-Instruct")
# OpenRouter β€” free model pool | https://openrouter.ai/keys
OPENROUTER_API_KEY: str = os.getenv("OPENROUTER_API_KEY", "")
# Ollama β€” local inference
OLLAMA_HOST: str = os.getenv("OLLAMA_HOST", "http://localhost:11434")
OLLAMA_MODEL: str = os.getenv("OLLAMA_MODEL", "llama3.2")
# Redis Settings
REDIS_URL: str = os.getenv("REDIS_URL", "") # Full URL (Upstash) - overrides host/port
REDIS_HOST: str = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT: int = int(os.getenv("REDIS_PORT", "6380"))
REDIS_DB: int = int(os.getenv("REDIS_DB", "0"))
REDIS_PASSWORD: str = os.getenv("REDIS_PASSWORD", "")
# Hybrid Search Settings
ENABLE_HYBRID_SEARCH: bool = os.getenv("ENABLE_HYBRID_SEARCH", "true").lower() == "true"
LIVE_SEARCH_TIMEOUT: float = float(os.getenv("LIVE_SEARCH_TIMEOUT", "1.5"))
LIVE_SEARCH_MAX_RESULTS: int = int(os.getenv("LIVE_SEARCH_MAX_RESULTS", "15"))
LIVE_SEARCH_WEIGHT: float = float(os.getenv("LIVE_SEARCH_WEIGHT", "0.5"))
DB_SEARCH_WEIGHT: float = float(os.getenv("DB_SEARCH_WEIGHT", "0.5"))
# Jina Reader Settings (Full Article Extraction)
ENABLE_JINA_READER: bool = os.getenv("ENABLE_JINA_READER", "true").lower() == "true"
JINA_READER_TIMEOUT: float = float(os.getenv("JINA_READER_TIMEOUT", "8.0"))
JINA_READER_MAX_CONCURRENT: int = int(os.getenv("JINA_READER_MAX_CONCURRENT", "10"))
JINA_API_KEY: str = os.getenv("JINA_API_KEY", "") # Get free key at https://jina.ai
# Jina Reranker API
JINA_RERANKER_ENABLED: bool = os.getenv("JINA_RERANKER_ENABLED", "true").lower() == "true"
JINA_RERANKER_MODEL: str = os.getenv("JINA_RERANKER_MODEL", "jina-reranker-v3")
JINA_RERANKER_TIMEOUT: float = float(os.getenv("JINA_RERANKER_TIMEOUT", "5.0"))
# NewsAPI Settings (Real-Time News Search)
NEWSAPI_KEY: str = os.getenv("NEWSAPI_KEY", "") # Get free key at https://newsapi.org/register
NEWSAPI_ENABLED: bool = os.getenv("NEWSAPI_ENABLED", "true").lower() == "true"
NEWSAPI_TIMEOUT: float = float(os.getenv("NEWSAPI_TIMEOUT", "2.0"))
NEWSAPI_MAX_RESULTS: int = int(os.getenv("NEWSAPI_MAX_RESULTS", "20"))
# Cache Settings (TTL in seconds)
CACHE_RESPONSE_TTL: int = int(os.getenv("CACHE_RESPONSE_TTL", "300")) # 5 minutes
CACHE_LIVE_TTL: int = int(os.getenv("CACHE_LIVE_TTL", "600")) # 10 minutes
CACHE_TRANSLATION_TTL: int = int(os.getenv("CACHE_TRANSLATION_TTL", "3600")) # 1 hour
CACHE_INTENT_TTL: int = int(os.getenv("CACHE_INTENT_TTL", "3600")) # 1 hour
# Security Settings
SECRET_KEY: str = os.getenv("SECRET_KEY", "a_very_secret_key_change_me_in_production")
ACCESS_TOKEN_EXPIRE_MINUTES: int = int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES", "60"))
# Kafka Settings (for Top Stories β€” read-only consumer)
KAFKA_BOOTSTRAP_SERVERS: str = os.getenv("KAFKA_BOOTSTRAP_SERVERS", "")
KAFKA_SSL_CA: str = os.getenv("KAFKA_SSL_CA", "")
KAFKA_SSL_CERT: str = os.getenv("KAFKA_SSL_CERT", "")
KAFKA_SSL_KEY: str = os.getenv("KAFKA_SSL_KEY", "")
KAFKA_TOPIC_PROCESSED: str = os.getenv("KAFKA_TOPIC_PROCESSED", "news.processed")
settings = Settings()