|
|
|
|
|
import os |
|
|
from typing import Optional, List |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
class Settings: |
|
|
"""Simple settings class that reads environment variables directly""" |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
|
|
|
|
|
|
self.PORT = int(os.getenv("PORT", 8000)) |
|
|
self.HOST = os.getenv("HOST", "0.0.0.0") |
|
|
self.ENVIRONMENT = os.getenv("ENVIRONMENT", "development") |
|
|
self.DEBUG = os.getenv("DEBUG", "true").lower() == "true" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cors_origins = os.getenv("CORS_ORIGINS", '["http://localhost:3000","http://localhost:5173","http://localhost:8080"]') |
|
|
self.CORS_ORIGINS = self._parse_list(cors_origins) |
|
|
self.CORS_ALLOW_CREDENTIALS = os.getenv("CORS_ALLOW_CREDENTIALS", "true").lower() == "true" |
|
|
|
|
|
cors_methods = os.getenv("CORS_ALLOW_METHODS", '["GET","POST","PUT","DELETE","OPTIONS"]') |
|
|
self.CORS_ALLOW_METHODS = self._parse_list(cors_methods) |
|
|
|
|
|
cors_headers = os.getenv("CORS_ALLOW_HEADERS", '["*"]') |
|
|
self.CORS_ALLOW_HEADERS = self._parse_list(cors_headers) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.LLM_PROVIDER = os.getenv("LLM_PROVIDER", "google") |
|
|
self.EMBEDDING_PROVIDER = os.getenv("EMBEDDING_PROVIDER", self.LLM_PROVIDER) |
|
|
|
|
|
|
|
|
self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5-nano") |
|
|
self.OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", "0.7")) |
|
|
self.OPENAI_MAX_TOKENS = int(os.getenv("OPENAI_MAX_TOKENS", "1000")) |
|
|
|
|
|
|
|
|
self.GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") |
|
|
self.GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.5-flash") |
|
|
self.GOOGLE_TEMPERATURE = float(os.getenv("GOOGLE_TEMPERATURE", "0.7")) |
|
|
self.GOOGLE_MAX_TOKENS = int(os.getenv("GOOGLE_MAX_TOKENS", "1000")) |
|
|
|
|
|
|
|
|
self.HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") |
|
|
self.HUGGINGFACE_MODEL = os.getenv("HUGGINGFACE_MODEL", "microsoft/DialoGPT-medium") |
|
|
self.HUGGINGFACE_API_URL = os.getenv("HUGGINGFACE_API_URL", "https://api-inference.huggingface.co/models/") |
|
|
self.HUGGINGFACE_USE_GPU = os.getenv("HUGGINGFACE_USE_GPU", "false").lower() == "true" |
|
|
self.HUGGINGFACE_USE_API = os.getenv("HUGGINGFACE_USE_API", "false").lower() == "true" |
|
|
|
|
|
|
|
|
self.OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") |
|
|
self.OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3.1:8b") |
|
|
self.OLLAMA_TEMPERATURE = float(os.getenv("OLLAMA_TEMPERATURE", "0.7")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.OPENAI_EMBEDDING_MODEL = os.getenv("OPENAI_EMBEDDING_MODEL", "text-embedding-ada-002") |
|
|
|
|
|
|
|
|
self.GOOGLE_EMBEDDING_MODEL = os.getenv("GOOGLE_EMBEDDING_MODEL", "models/embedding-001") |
|
|
|
|
|
|
|
|
self.HUGGINGFACE_EMBEDDING_MODEL = os.getenv("HUGGINGFACE_EMBEDDING_MODEL", "sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
|
self.OLLAMA_EMBEDDING_MODEL = os.getenv("OLLAMA_EMBEDDING_MODEL", "nomic-embed-text") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO") |
|
|
self.LOG_FORMAT = os.getenv("LOG_FORMAT", "%(asctime)s - %(name)s - %(levelname)s - %(message)s") |
|
|
self.LOG_FILE = os.getenv("LOG_FILE", "./logs/app.log") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.LANGCHAIN_DEBUG = os.getenv("LANGCHAIN_DEBUG", "false").lower() == "true" |
|
|
|
|
|
def _parse_list(self, value: str) -> List[str]: |
|
|
"""Parse a string representation of a list into an actual list""" |
|
|
try: |
|
|
|
|
|
if value.startswith('[') and value.endswith(']'): |
|
|
value = value[1:-1] |
|
|
items = [item.strip().strip('"').strip("'") for item in value.split(',')] |
|
|
return [item for item in items if item] |
|
|
except: |
|
|
return ["*"] |
|
|
|
|
|
def get_llm_config(self): |
|
|
"""Get LLM configuration based on selected provider""" |
|
|
if self.LLM_PROVIDER == "openai": |
|
|
return { |
|
|
"provider": "openai", |
|
|
"api_key": self.OPENAI_API_KEY, |
|
|
"model": self.OPENAI_MODEL, |
|
|
"temperature": self.OPENAI_TEMPERATURE, |
|
|
"max_tokens": self.OPENAI_MAX_TOKENS |
|
|
} |
|
|
elif self.LLM_PROVIDER == "google": |
|
|
return { |
|
|
"provider": "google", |
|
|
"api_key": self.GOOGLE_API_KEY, |
|
|
"model": self.GOOGLE_MODEL, |
|
|
"temperature": self.GOOGLE_TEMPERATURE, |
|
|
"max_tokens": self.GOOGLE_MAX_TOKENS |
|
|
} |
|
|
elif self.LLM_PROVIDER == "huggingface": |
|
|
return { |
|
|
"provider": "huggingface", |
|
|
"api_token": self.HUGGINGFACE_API_TOKEN, |
|
|
"model": self.HUGGINGFACE_MODEL, |
|
|
"api_url": self.HUGGINGFACE_API_URL, |
|
|
"use_gpu": self.HUGGINGFACE_USE_GPU, |
|
|
"use_api": self.HUGGINGFACE_USE_API |
|
|
} |
|
|
elif self.LLM_PROVIDER == "ollama": |
|
|
return { |
|
|
"provider": "ollama", |
|
|
"base_url": self.OLLAMA_BASE_URL, |
|
|
"model": self.OLLAMA_MODEL, |
|
|
"temperature": self.OLLAMA_TEMPERATURE |
|
|
} |
|
|
else: |
|
|
raise ValueError(f"Unsupported LLM provider: {self.LLM_PROVIDER}") |
|
|
|
|
|
def get_embedding_config(self): |
|
|
"""Get embedding configuration based on EMBEDDING_PROVIDER setting""" |
|
|
provider = self.EMBEDDING_PROVIDER |
|
|
|
|
|
if provider == "openai": |
|
|
return { |
|
|
"provider": "openai", |
|
|
"api_key": self.OPENAI_API_KEY, |
|
|
"model": self.OPENAI_EMBEDDING_MODEL |
|
|
} |
|
|
elif provider == "google": |
|
|
return { |
|
|
"provider": "google", |
|
|
"api_key": self.GOOGLE_API_KEY, |
|
|
"model": self.GOOGLE_EMBEDDING_MODEL |
|
|
} |
|
|
elif provider == "huggingface": |
|
|
return { |
|
|
"provider": "huggingface", |
|
|
"model": self.HUGGINGFACE_EMBEDDING_MODEL |
|
|
} |
|
|
elif provider == "ollama": |
|
|
return { |
|
|
"provider": "ollama", |
|
|
"base_url": self.OLLAMA_BASE_URL, |
|
|
"model": self.OLLAMA_EMBEDDING_MODEL |
|
|
} |
|
|
else: |
|
|
raise ValueError(f"Unsupported provider: {provider}. Supported providers: openai, google, huggingface, ollama") |
|
|
|
|
|
|
|
|
settings = Settings() |
|
|
|
|
|
|
|
|
|
|
|
|