Spaces:
Paused
Paused
| # Universal Deep Research Backend (UDR-B) - Environment Configuration | |
| # Copy this file to .env and customize the values for your deployment | |
| # Server Configuration | |
| HOST=0.0.0.0 | |
| PORT=8000 | |
| LOG_LEVEL=info | |
| # CORS Configuration | |
| FRONTEND_URL=http://localhost:3000 | |
| ALLOW_CREDENTIALS=true | |
| # Model Configuration | |
| DEFAULT_MODEL=llama-3.1-nemotron-253b | |
| LLM_BASE_URL=https://integrate.api.nvidia.com/v1 | |
| LLM_API_KEY_FILE=nvdev_api.txt | |
| LLM_TEMPERATURE=0.2 | |
| LLM_TOP_P=0.7 | |
| LLM_MAX_TOKENS=2048 | |
| # Search Configuration | |
| TAVILY_API_KEY_FILE=tavily_api.txt | |
| MAX_SEARCH_RESULTS=10 | |
| # Research Configuration | |
| MAX_TOPICS=1 | |
| MAX_SEARCH_PHRASES=1 | |
| MOCK_DIRECTORY=mock_instances/stocks_24th_3_sections | |
| RANDOM_SEED=42 | |
| # Logging Configuration | |
| LOG_DIR=logs | |
| TRACE_ENABLED=true | |
| COPY_INTO_STDOUT=false | |
| # FrameV4 Configuration | |
| LONG_CONTEXT_CUTOFF=8192 | |
| FORCE_LONG_CONTEXT=false | |
| MAX_ITERATIONS=1024 | |
| INTERACTION_LEVEL=none | |
| # Model-specific overrides (optional) | |
| # LLAMA_3_1_8B_BASE_URL=https://integrate.api.nvidia.com/v1 | |
| # LLAMA_3_1_8B_MODEL=nvdev/meta/llama-3.1-8b-instruct | |
| # LLAMA_3_1_8B_TEMPERATURE=0.2 | |
| # LLAMA_3_1_8B_TOP_P=0.7 | |
| # LLAMA_3_1_8B_MAX_TOKENS=2048 |