Spaces:
Sleeping
Sleeping
| # NEXUS Backend — Environment Configuration | |
| # OLLAMA (for local development only) | |
| OLLAMA_BASE_URL=http://localhost:11434/v1 | |
| OLLAMA_API_KEY=ollama | |
| # HUGGINGFACE INFERENCE (PRIMARY - free tier) | |
| HF_TOKEN= | |
| HF_INFERENCE_URL=https://router.huggingface.co/v1 | |
| # OPENAI (optional - requires paid account) | |
| OPENAI_API_KEY= | |
| OPENAI_BASE_URL=https://api.openai.com/v1 | |
| # AGENTS - HuggingFace models (work with HF Inference API) | |
| # Supports agents a through j (10 agents max via env vars) | |
| # Additional agents can be configured via AGENTS_JSON env var | |
| AGENT_A_MODEL=meta-llama/Llama-3.1-8B-Instruct | |
| AGENT_B_MODEL=meta-llama/Llama-3.2-1B-Instruct | |
| AGENT_C_MODEL= | |
| AGENT_D_MODEL= | |
| AGENT_A_PROVIDER=hf | |
| AGENT_B_PROVIDER=hf | |
| AGENT_C_PROVIDER=hf | |
| AGENT_D_PROVIDER=hf | |
| AGENT_A_ROLE=INVESTIGATOR | |
| AGENT_B_ROLE=VALIDATOR | |
| AGENT_C_ROLE=FORENSIC_ANALYST | |
| AGENT_D_ROLE=NETWORK_ENGINEER | |
| AGENT_A_TEMPERATURE=0.7 | |
| AGENT_B_TEMPERATURE=0.6 | |
| AGENT_C_TEMPERATURE=0.5 | |
| AGENT_D_TEMPERATURE=0.5 | |
| AGENT_A_MAX_TOKENS=512 | |
| AGENT_B_MAX_TOKENS=512 | |
| AGENT_C_MAX_TOKENS=512 | |
| AGENT_D_MAX_TOKENS=512 | |
| # EXECUTION ENVIRONMENT | |
| EXECUTION_MODE=simulated | |
| ENVIRONMENT=production | |
| # SERVER | |
| HOST=0.0.0.0 | |
| PORT=7860 | |
| DEBUG=false | |
| # EPISODE SETTINGS | |
| MAX_STEPS=8 | |
| SUCCESS_SCORE_THRESHOLD=0.5 | |
| # INFERENCE SCRIPT (for competition submission) | |
| API_BASE_URL=https://router.huggingface.co/v1 | |
| MODEL_NAME=meta-llama/Llama-3.1-8B-Instruct | |