nikita200 commited on
Commit
6912aec
·
1 Parent(s): 304f799

Add scaffold files, fix compliance gaps for submission

Browse files

- Add __init__.py: exports Action, ServerState, EnvClient
- Add client.py: Python EnvClient for programmatic environment access
- Add pyproject.toml: package metadata and dependencies
- Fix inference.py [START] log: remove non-standard capacity field
- Update openenv.yaml: document configurable environment parameters
- Update README: add configurable environment docs, update project structure

Files changed (6) hide show
  1. README.md +32 -9
  2. __init__.py +6 -0
  3. client.py +67 -0
  4. inference.py +1 -1
  5. openenv.yaml +27 -1
  6. pyproject.toml +20 -0
README.md CHANGED
@@ -90,6 +90,27 @@ The environment simulates a backend server receiving variable traffic. The agent
90
 
91
  ---
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  ## Setup
94
 
95
  ### Local (Python)
@@ -97,10 +118,10 @@ The environment simulates a backend server receiving variable traffic. The agent
97
  ```bash
98
  pip install -r requirements.txt
99
 
100
- # Start the environment server
101
- uvicorn environment:app --host 0.0.0.0 --port 7860
102
 
103
- # In another terminal, run a quick smoke test
104
  curl -s localhost:7860/health
105
  curl -s -X POST localhost:7860/reset -H "Content-Type: application/json" \
106
  -d '{"task_id": "task_easy"}' | python -m json.tool
@@ -115,8 +136,6 @@ curl -s localhost:7860/openenv.yaml
115
  ```bash
116
  docker build -t traffic-controller .
117
  docker run -p 7860:7860 traffic-controller
118
-
119
- # Same smoke tests work on localhost:7860
120
  ```
121
 
122
  ---
@@ -189,13 +208,17 @@ Measured on the deterministic simulator. Scores are in **0.0 – 1.0**.
189
 
190
  ```
191
  .
 
192
  ├── environment.py # FastAPI app + episode logic
193
- ├── tasks.py # Traffic patterns + task metadata
194
- ├── graders.py # Per-task scoring functions
195
  ├── simulator.py # Backend physics (latency, CPU, memory, crash)
196
- ├── models.py # Pydantic models (state, action, request/response)
197
- ├── inference.py # LLM agent runner
 
 
 
 
198
  ├── openenv.yaml # OpenEnv spec
 
199
  ├── Dockerfile
200
  ├── requirements.txt
201
  └── README.md
 
90
 
91
  ---
92
 
93
+ ## Configurable Environment
94
+
95
+ The environment is fully configurable via the `/reset` endpoint. Pass a `config` object to simulate different server profiles:
96
+
97
+ ```bash
98
+ curl -X POST localhost:7860/reset -H "Content-Type: application/json" \
99
+ -d '{"task_id": "task_easy", "config": {"server_capacity": 200, "base_latency": 30}}'
100
+ ```
101
+
102
+ | Parameter | Default | Description |
103
+ |---|---|---|
104
+ | `server_capacity` | 100.0 | Max requests/sec the server can handle |
105
+ | `base_latency` | 50.0 | Response time at zero load (ms) |
106
+ | `crash_load_ratio` | 1.3 | Server crashes at this multiple of capacity |
107
+ | `max_queue` | 500 | Maximum pending request queue size |
108
+ | `traffic_scale` | 1.0 | Multiplier for traffic patterns (2.0 = double traffic) |
109
+
110
+ The LLM agent adapts automatically — the system prompt includes the configured capacity so the model knows the server's limits.
111
+
112
+ ---
113
+
114
  ## Setup
115
 
116
  ### Local (Python)
 
118
  ```bash
119
  pip install -r requirements.txt
120
 
121
+ # Start the environment + Gradio UI
122
+ python app.py
123
 
124
+ # Smoke tests
125
  curl -s localhost:7860/health
126
  curl -s -X POST localhost:7860/reset -H "Content-Type: application/json" \
127
  -d '{"task_id": "task_easy"}' | python -m json.tool
 
136
  ```bash
137
  docker build -t traffic-controller .
138
  docker run -p 7860:7860 traffic-controller
 
 
139
  ```
140
 
141
  ---
 
208
 
209
  ```
210
  .
211
+ ├── app.py # Gradio UI + mounts FastAPI endpoints
212
  ├── environment.py # FastAPI app + episode logic
 
 
213
  ├── simulator.py # Backend physics (latency, CPU, memory, crash)
214
+ ├── models.py # Pydantic models (state, action, config, request/response)
215
+ ├── tasks.py # Traffic patterns + task metadata
216
+ ├── graders.py # Per-task scoring functions (0.0–1.0)
217
+ ├── inference.py # LLM agent runner (OpenAI client)
218
+ ├── client.py # Python EnvClient for programmatic access
219
+ ├── __init__.py # Exports Action, ServerState, EnvClient
220
  ├── openenv.yaml # OpenEnv spec
221
+ ├── pyproject.toml # Package metadata
222
  ├── Dockerfile
223
  ├── requirements.txt
224
  └── README.md
__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """Adaptive Backend Traffic Controller — OpenEnv Environment."""
2
+
3
+ from models import Action, ServerState
4
+ from client import EnvClient
5
+
6
+ __all__ = ["Action", "ServerState", "EnvClient"]
client.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ OpenEnv client for the Adaptive Traffic Controller.
3
+
4
+ Provides a Python API to interact with the environment server
5
+ without needing to make raw HTTP calls.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+ import httpx
13
+
14
+ from models import Action, EnvConfig, ServerState
15
+
16
+
17
+ class EnvClient:
18
+ """Client for the Adaptive Traffic Controller environment."""
19
+
20
+ def __init__(self, base_url: str = "http://localhost:7860", timeout: float = 30.0):
21
+ self.http = httpx.Client(base_url=base_url, timeout=timeout)
22
+
23
+ def health(self) -> bool:
24
+ resp = self.http.get("/health")
25
+ return resp.status_code == 200
26
+
27
+ def reset(
28
+ self,
29
+ task_id: str = "task_easy",
30
+ config: EnvConfig | None = None,
31
+ ) -> dict[str, Any]:
32
+ """Reset environment. Returns {state, task_id, max_steps, config}."""
33
+ payload: dict[str, Any] = {"task_id": task_id}
34
+ if config is not None:
35
+ payload["config"] = config.model_dump()
36
+ resp = self.http.post("/reset", json=payload)
37
+ resp.raise_for_status()
38
+ return resp.json()
39
+
40
+ def step(self, action: str | Action) -> dict[str, Any]:
41
+ """Take one step. Returns {state, reward, done, info}."""
42
+ if isinstance(action, Action):
43
+ action = action.value
44
+ resp = self.http.post("/step", json={"action": action})
45
+ resp.raise_for_status()
46
+ return resp.json()
47
+
48
+ def state(self) -> dict[str, Any]:
49
+ """Get current server state."""
50
+ resp = self.http.get("/state")
51
+ resp.raise_for_status()
52
+ return resp.json()
53
+
54
+ def tasks(self) -> list[dict[str, Any]]:
55
+ """List available tasks."""
56
+ resp = self.http.get("/tasks")
57
+ resp.raise_for_status()
58
+ return resp.json()["tasks"]
59
+
60
+ def close(self) -> None:
61
+ self.http.close()
62
+
63
+ def __enter__(self):
64
+ return self
65
+
66
+ def __exit__(self, *args):
67
+ self.close()
inference.py CHANGED
@@ -128,7 +128,7 @@ def run_task(task_id: str, env_url: str) -> float:
128
  capacity = data.get("config", {}).get("server_capacity", 100.0)
129
  system_prompt = SYSTEM_PROMPT_TEMPLATE.format(capacity=capacity)
130
 
131
- print(f"[START] task={task_id} max_steps={max_steps} model={MODEL_NAME} capacity={capacity}")
132
 
133
  total_reward = 0.0
134
  final_score = 0.0
 
128
  capacity = data.get("config", {}).get("server_capacity", 100.0)
129
  system_prompt = SYSTEM_PROMPT_TEMPLATE.format(capacity=capacity)
130
 
131
+ print(f"[START] task={task_id} max_steps={max_steps} model={MODEL_NAME}")
132
 
133
  total_reward = 0.0
134
  final_score = 0.0
openenv.yaml CHANGED
@@ -84,11 +84,37 @@ tasks:
84
  stability_bonus: "crash zeroes out primary score (partial credit * 0.3)"
85
  queue_factor: "fraction of steps with queue_length < 100"
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  endpoints:
88
  reset:
89
  method: POST
90
  path: /reset
91
- description: Reset environment, returns initial state
92
  step:
93
  method: POST
94
  path: /step
 
84
  stability_bonus: "crash zeroes out primary score (partial credit * 0.3)"
85
  queue_factor: "fraction of steps with queue_length < 100"
86
 
87
+ configuration:
88
+ description: >
89
+ The environment is fully configurable via the /reset endpoint.
90
+ Pass a config object to simulate different server profiles.
91
+ parameters:
92
+ server_capacity:
93
+ type: float
94
+ default: 100.0
95
+ description: Maximum requests/sec the server can handle
96
+ base_latency:
97
+ type: float
98
+ default: 50.0
99
+ description: Baseline latency in ms at zero load
100
+ crash_load_ratio:
101
+ type: float
102
+ default: 1.3
103
+ description: Load ratio that causes a crash (1.3 = 130% of capacity)
104
+ max_queue:
105
+ type: int
106
+ default: 500
107
+ description: Maximum queue size
108
+ traffic_scale:
109
+ type: float
110
+ default: 1.0
111
+ description: Multiplier for traffic patterns (2.0 = double traffic)
112
+
113
  endpoints:
114
  reset:
115
  method: POST
116
  path: /reset
117
+ description: Reset environment, returns initial state. Accepts optional config object.
118
  step:
119
  method: POST
120
  path: /step
pyproject.toml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "adaptive-traffic-controller"
3
+ version = "1.0.0"
4
+ description = "OpenEnv environment — LLM agent controls backend traffic throttling to prevent server crashes"
5
+ requires-python = ">=3.10"
6
+ dependencies = [
7
+ "fastapi>=0.111.0",
8
+ "uvicorn[standard]>=0.29.0",
9
+ "pydantic>=2.7.0",
10
+ "openai>=1.30.0",
11
+ "httpx>=0.27.0",
12
+ "numpy>=1.26.0",
13
+ "pyyaml>=6.0.1",
14
+ "gradio>=4.30.0",
15
+ "plotly>=5.22.0",
16
+ ]
17
+
18
+ [build-system]
19
+ requires = ["setuptools>=68.0"]
20
+ build-backend = "setuptools.build_meta"