| # =========================================== | |
| # Recipe Recommendation Bot - Environment Configuration | |
| # =========================================== | |
| # Server Configuration | |
| PORT=8080 | |
| HOST=0.0.0.0 | |
| ENVIRONMENT=development | |
| DEBUG=true | |
| LANGCHAIN_DEBUG=true | |
| # CORS Configuration | |
| CORS_ORIGINS=["http://localhost:3000","http://localhost:5173","http://localhost:8000"] | |
| CORS_ALLOW_CREDENTIALS=true | |
| CORS_ALLOW_METHODS=["GET","POST","PUT","DELETE","OPTIONS"] | |
| CORS_ALLOW_HEADERS=["*"] | |
| # =========================================== | |
| # LLM & Embedding Provider Configuration | |
| # =========================================== | |
| # Supported providers: openai, google, huggingface, ollama | |
| # This provider will be used for both LLM and embeddings | |
| LLM_PROVIDER=google | |
| EMBEDDING_PROVIDER=google | |
| # OpenAI Configuration | |
| # Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'openai' | |
| OPENAI_API_KEY=YOUR_OPENAI_API_KEY_HERE | |
| OPENAI_MODEL=gpt-5-nano | |
| OPENAI_TEMPERATURE=0.7 | |
| OPENAI_MAX_TOKENS=1000 | |
| # Google AI Configuration (Gemini) | |
| # Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'google' | |
| GOOGLE_API_KEY=YOUR_GOOGLE_API_KEY_HERE | |
| GOOGLE_MODEL=gemini-2.0-flash | |
| GOOGLE_TEMPERATURE=0.7 | |
| GOOGLE_MAX_TOKENS=1000 | |
| # Hugging Face Configuration | |
| # Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'huggingface' | |
| HUGGINGFACE_API_TOKEN=YOUR_HUGGINGFACE_API_TOKEN_HERE | |
| HUGGINGFACE_MODEL=deepseek-ai/DeepSeek-V3.1 | |
| HUGGINGFACE_API_URL=https://api-inference.huggingface.co/models/ | |
| HUGGINGFACE_USE_API=true | |
| HUGGINGFACE_USE_GPU=false | |
| # Ollama Configuration (local inference) | |
| # Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'ollama' | |
| OLLAMA_BASE_URL=http://localhost:11434 | |
| OLLAMA_MODEL=llama3.1:8b | |
| OLLAMA_TEMPERATURE=0.7 | |
| # =========================================== | |
| # Vector Store Configuration | |
| # =========================================== | |
| # Supported stores: chromadb, mongodb | |
| VECTOR_STORE_PROVIDER=mongodb | |
| # ChromaDB Configuration | |
| DB_PATH=./data/chromadb | |
| DB_COLLECTION_NAME=recipes | |
| DB_PERSIST_DIRECTORY=./data/chromadb_persist | |
| # Set to true to delete and recreate DB on startup (useful for adding new recipes) | |
| DB_REFRESH_ON_START=false | |
| # MongoDB Atlas Configuration (for vector search) | |
| # Provide your connection string and collection settings when using MongoDB | |
| MONGODB_URI=mongodb+srv://<username>:<password>@<cluster>.mongodb.net/?retryWrites=true&w=majority&appName=<AppName> | |
| MONGODB_DATABASE=food_recommendation | |
| MONGODB_COLLECTION=AI_DB | |
| MONGODB_INDEX_NAME=foodInstructionIndex | |
| MONGODB_VECTOR_FIELD=ingredients_emb | |
| MONGODB_TEXT_FIELD=title | |
| MONGODB_SIMILARITY_METRIC=dotProduct | |
| MONGODB_NUM_CANDIDATES=100 | |
| # =========================================== | |
| # Model Configuration | |
| # =========================================== | |
| # The LLM_PROVIDER setting above controls both LLM and embedding models | |
| # OpenAI Models | |
| OPENAI_EMBEDDING_MODEL=text-embedding-3-small | |
| # Google Models | |
| GOOGLE_EMBEDDING_MODEL=models/embedding-001 | |
| # HuggingFace Models | |
| HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 | |
| # Ollama Models | |
| OLLAMA_EMBEDDING_MODEL=nomic-embed-text:v1.5 | |
| # =========================================== | |
| # Logging Configuration | |
| # =========================================== | |
| LOG_LEVEL=INFO | |
| LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s | |
| LOG_FILE=./logs/app.log | |