File size: 1,412 Bytes
cb0d29a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# NEXUS Backend — Environment Configuration

# OLLAMA (for local development only)
OLLAMA_BASE_URL=http://localhost:11434/v1
OLLAMA_API_KEY=ollama

# HUGGINGFACE INFERENCE (PRIMARY - free tier)
HF_TOKEN=
HF_INFERENCE_URL=https://router.huggingface.co/v1

# OPENAI (optional - requires paid account)
OPENAI_API_KEY=
OPENAI_BASE_URL=https://api.openai.com/v1

# AGENTS - HuggingFace models (work with HF Inference API)
# Supports agents a through j (10 agents max via env vars)
# Additional agents can be configured via AGENTS_JSON env var
AGENT_A_MODEL=meta-llama/Llama-3.1-8B-Instruct
AGENT_B_MODEL=meta-llama/Llama-3.2-1B-Instruct
AGENT_C_MODEL=
AGENT_D_MODEL=
AGENT_A_PROVIDER=hf
AGENT_B_PROVIDER=hf
AGENT_C_PROVIDER=hf
AGENT_D_PROVIDER=hf
AGENT_A_ROLE=INVESTIGATOR
AGENT_B_ROLE=VALIDATOR
AGENT_C_ROLE=FORENSIC_ANALYST
AGENT_D_ROLE=NETWORK_ENGINEER
AGENT_A_TEMPERATURE=0.7
AGENT_B_TEMPERATURE=0.6
AGENT_C_TEMPERATURE=0.5
AGENT_D_TEMPERATURE=0.5
AGENT_A_MAX_TOKENS=512
AGENT_B_MAX_TOKENS=512
AGENT_C_MAX_TOKENS=512
AGENT_D_MAX_TOKENS=512

# EXECUTION ENVIRONMENT
EXECUTION_MODE=simulated
ENVIRONMENT=production

# SERVER
HOST=0.0.0.0
PORT=7860
DEBUG=false

# EPISODE SETTINGS
MAX_STEPS=8
SUCCESS_SCORE_THRESHOLD=0.5

# INFERENCE SCRIPT (for competition submission)
API_BASE_URL=https://router.huggingface.co/v1
MODEL_NAME=meta-llama/Llama-3.1-8B-Instruct