File size: 3,284 Bytes
c59d808 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
# ===========================================
# Recipe Recommendation Bot - Environment Configuration
# ===========================================
# Server Configuration
PORT=8080
HOST=0.0.0.0
ENVIRONMENT=development
DEBUG=true
LANGCHAIN_DEBUG=true
# CORS Configuration
CORS_ORIGINS=["http://localhost:3000","http://localhost:5173","http://localhost:8000"]
CORS_ALLOW_CREDENTIALS=true
CORS_ALLOW_METHODS=["GET","POST","PUT","DELETE","OPTIONS"]
CORS_ALLOW_HEADERS=["*"]
# ===========================================
# LLM & Embedding Provider Configuration
# ===========================================
# Supported providers: openai, google, huggingface, ollama
# This provider will be used for both LLM and embeddings
LLM_PROVIDER=google
EMBEDDING_PROVIDER=google
# OpenAI Configuration
# Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'openai'
OPENAI_API_KEY=YOUR_OPENAI_API_KEY_HERE
OPENAI_MODEL=gpt-5-nano
OPENAI_TEMPERATURE=0.7
OPENAI_MAX_TOKENS=1000
# Google AI Configuration (Gemini)
# Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'google'
GOOGLE_API_KEY=YOUR_GOOGLE_API_KEY_HERE
GOOGLE_MODEL=gemini-2.0-flash
GOOGLE_TEMPERATURE=0.7
GOOGLE_MAX_TOKENS=1000
# Hugging Face Configuration
# Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'huggingface'
HUGGINGFACE_API_TOKEN=YOUR_HUGGINGFACE_API_TOKEN_HERE
HUGGINGFACE_MODEL=deepseek-ai/DeepSeek-V3.1
HUGGINGFACE_API_URL=https://api-inference.huggingface.co/models/
HUGGINGFACE_USE_API=true
HUGGINGFACE_USE_GPU=false
# Ollama Configuration (local inference)
# Use only if LLM_PROVIDER or EMBEDDING_PROVIDER is set to 'ollama'
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3.1:8b
OLLAMA_TEMPERATURE=0.7
# ===========================================
# Vector Store Configuration
# ===========================================
# Supported stores: chromadb, mongodb
VECTOR_STORE_PROVIDER=mongodb
# ChromaDB Configuration
DB_PATH=./data/chromadb
DB_COLLECTION_NAME=recipes
DB_PERSIST_DIRECTORY=./data/chromadb_persist
# Set to true to delete and recreate DB on startup (useful for adding new recipes)
DB_REFRESH_ON_START=false
# MongoDB Atlas Configuration (for vector search)
# Provide your connection string and collection settings when using MongoDB
MONGODB_URI=mongodb+srv://<username>:<password>@<cluster>.mongodb.net/?retryWrites=true&w=majority&appName=<AppName>
MONGODB_DATABASE=food_recommendation
MONGODB_COLLECTION=AI_DB
MONGODB_INDEX_NAME=foodInstructionIndex
MONGODB_VECTOR_FIELD=ingredients_emb
MONGODB_TEXT_FIELD=title
MONGODB_SIMILARITY_METRIC=dotProduct
MONGODB_NUM_CANDIDATES=100
# ===========================================
# Model Configuration
# ===========================================
# The LLM_PROVIDER setting above controls both LLM and embedding models
# OpenAI Models
OPENAI_EMBEDDING_MODEL=text-embedding-3-small
# Google Models
GOOGLE_EMBEDDING_MODEL=models/embedding-001
# HuggingFace Models
HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# Ollama Models
OLLAMA_EMBEDDING_MODEL=nomic-embed-text:v1.5
# ===========================================
# Logging Configuration
# ===========================================
LOG_LEVEL=INFO
LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
LOG_FILE=./logs/app.log
|