Aksh Parekh Claude Sonnet 4.6 commited on
Commit
71efc66
Β·
1 Parent(s): dd5f8b8

feat: continuous PPO training + live web dashboard

Browse files

- Replace server/app.py with FastAPI continuous training loop
- PPO training runs in background thread (no manual stepping)
- Live dashboard: 2D road canvas on left, reward chart + episode log on right
- SSE + polling for real-time updates (0.5s heartbeat)
- Capped reward mode: min(reward, 2.0) per step
- Uncapped reward mode: base + token_count * 0.001 (frontier LLM scaling)
- Toggle capped/uncapped via UI button or POST /api/mode
- Curriculum stage display with pip indicators
- PPO losses (pg/vf/entropy) shown live
- Dockerfile cleaned: removed openenv-core dep to avoid websockets conflict

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>

Dockerfile CHANGED
@@ -2,21 +2,33 @@ FROM python:3.11-slim
2
 
3
  WORKDIR /app
4
 
5
- # Install system deps
6
  RUN apt-get update && \
7
  apt-get install -y --no-install-recommends git curl && \
8
  rm -rf /var/lib/apt/lists/*
9
 
10
- # Copy environment code into a proper package directory
11
  COPY . /app/overflow_env
12
 
13
- # Install dependencies via pip using requirements.txt
14
- RUN pip install --no-cache-dir -r /app/overflow_env/server/requirements.txt
 
 
 
 
 
 
 
 
 
 
15
 
16
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
 
 
 
 
17
  CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
18
 
19
  EXPOSE 8000
20
 
21
- ENV ENABLE_WEB_INTERFACE=true
22
  CMD ["uvicorn", "overflow_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
 
2
 
3
  WORKDIR /app
4
 
 
5
  RUN apt-get update && \
6
  apt-get install -y --no-install-recommends git curl && \
7
  rm -rf /var/lib/apt/lists/*
8
 
9
+ # Copy the package into /app/overflow_env
10
  COPY . /app/overflow_env
11
 
12
+ # Install runtime dependencies (no openenv-core to avoid websockets conflict)
13
+ RUN pip install --no-cache-dir \
14
+ --extra-index-url https://download.pytorch.org/whl/cpu \
15
+ "fastapi>=0.115.0" \
16
+ "pydantic>=2.0.0" \
17
+ "uvicorn[standard]>=0.24.0" \
18
+ "requests>=2.31.0" \
19
+ "torch==2.5.1+cpu" \
20
+ "numpy>=1.24.0" \
21
+ "gymnasium>=0.29.0" \
22
+ "matplotlib>=3.8.0" \
23
+ "pillow==10.4.0"
24
 
25
+ # Make overflow_env importable as a top-level package
26
+ ENV PYTHONPATH=/app
27
+ ENV ENABLE_WEB_INTERFACE=true
28
+
29
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
30
  CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
31
 
32
  EXPOSE 8000
33
 
 
34
  CMD ["uvicorn", "overflow_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Overflow Environment Server
3
  emoji: πŸš—
4
  colorFrom: red
5
  colorTo: yellow
 
1
  ---
2
+ title: Overflow OpenENV
3
  emoji: πŸš—
4
  colorFrom: red
5
  colorTo: yellow
__init__.py CHANGED
@@ -1,6 +1,10 @@
1
  """Overflow Environment β€” Autonomous vehicle fleet oversight for OpenEnv."""
2
 
3
- from .client import OverflowEnv
 
 
 
 
4
  from .models import (
5
  CarStateData,
6
  LaneOccupancyData,
 
1
  """Overflow Environment β€” Autonomous vehicle fleet oversight for OpenEnv."""
2
 
3
+ try:
4
+ from .client import OverflowEnv
5
+ except ImportError:
6
+ OverflowEnv = None # openenv-core not installed; training-only mode
7
+
8
  from .models import (
9
  CarStateData,
10
  LaneOccupancyData,
models.py CHANGED
@@ -12,7 +12,16 @@ from typing import Any, Dict, List, Optional
12
 
13
  from pydantic import BaseModel, Field
14
 
15
- from openenv.core.env_server.types import Action, Observation, State
 
 
 
 
 
 
 
 
 
16
 
17
  # ── Structured sub-models (frontend-compatible) ─────────────────────────
18
 
 
12
 
13
  from pydantic import BaseModel, Field
14
 
15
+ try:
16
+ from openenv.core.env_server.types import Action, Observation, State
17
+ except ImportError:
18
+ class Action(BaseModel): pass
19
+ class Observation(BaseModel):
20
+ done: bool = False
21
+ reward: float = 0.0
22
+ class State(BaseModel):
23
+ episode_id: str = ""
24
+ step_count: int = 0
25
 
26
  # ── Structured sub-models (frontend-compatible) ─────────────────────────
27
 
policies/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .base_policy import BasePolicy
2
+ from .flat_mlp_policy import FlatMLPPolicy
3
+ from .ticket_attention_policy import TicketAttentionPolicy
4
+
5
+ __all__ = ["BasePolicy", "FlatMLPPolicy", "TicketAttentionPolicy"]
policies/__pycache__/__init__.cpython-314.pyc ADDED
Binary file (381 Bytes). View file
 
policies/__pycache__/base_policy.cpython-314.pyc ADDED
Binary file (4.05 kB). View file
 
policies/__pycache__/flat_mlp_policy.cpython-314.pyc ADDED
Binary file (3.71 kB). View file
 
policies/__pycache__/policy_spec.cpython-314.pyc ADDED
Binary file (18.4 kB). View file
 
policies/__pycache__/ticket_attention_policy.cpython-314.pyc ADDED
Binary file (11.3 kB). View file
 
policies/base_policy.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ BasePolicy β€” abstract interface all policies implement.
3
+
4
+ All policies expose the same predict() and train_step() API so the
5
+ curriculum trainer can swap them out transparently.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import abc
11
+ from typing import Any, Dict, Optional, Tuple
12
+
13
+ import numpy as np
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+
18
+ class BasePolicy(nn.Module, abc.ABC):
19
+ """
20
+ Abstract base for all driving policies.
21
+
22
+ Subclasses implement:
23
+ forward(obs_tensor) β†’ action_tensor, value_tensor
24
+ encode_obs(obs_np) β†’ torch.Tensor
25
+ """
26
+
27
+ def __init__(self, obs_dim: int, action_dim: int = 3):
28
+ super().__init__()
29
+ self.obs_dim = obs_dim
30
+ self.action_dim = action_dim
31
+
32
+ @abc.abstractmethod
33
+ def forward(
34
+ self, obs: torch.Tensor
35
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
36
+ """
37
+ Returns:
38
+ action_mean β€” shape (B, action_dim)
39
+ value β€” shape (B, 1)
40
+ """
41
+ ...
42
+
43
+ def predict(
44
+ self,
45
+ obs: np.ndarray,
46
+ deterministic: bool = False,
47
+ ) -> np.ndarray:
48
+ """Numpy in, numpy out. Used by the env during rollout."""
49
+ self.eval()
50
+ with torch.no_grad():
51
+ t = torch.as_tensor(obs, dtype=torch.float32).unsqueeze(0)
52
+ mean, _ = self.forward(t)
53
+ if deterministic:
54
+ action = mean
55
+ else:
56
+ action = mean + torch.randn_like(mean) * 0.1
57
+ return action.squeeze(0).numpy()
58
+
59
+ @staticmethod
60
+ def _mlp(dims: list[int], activation=nn.Tanh) -> nn.Sequential:
61
+ layers = []
62
+ for i in range(len(dims) - 1):
63
+ layers.append(nn.Linear(dims[i], dims[i + 1]))
64
+ if i < len(dims) - 2:
65
+ layers.append(activation())
66
+ return nn.Sequential(*layers)
policies/flat_mlp_policy.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FlatMLPPolicy β€” sanity-check baseline.
3
+
4
+ Concatenates the full observation (ego + all tickets flattened) and passes
5
+ it through a standard MLP. No attention, no structure.
6
+
7
+ Use this to:
8
+ 1. Verify the reward signal and environment are working
9
+ 2. Establish a performance floor
10
+ 3. Confirm that TicketAttentionPolicy actually improves over this
11
+
12
+ If FlatMLPPolicy can't learn Stage 1 survival, the reward or env is broken.
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from .base_policy import BasePolicy
21
+
22
+
23
+ class FlatMLPPolicy(BasePolicy):
24
+ """Standard 3-layer MLP over the full flat observation."""
25
+
26
+ def __init__(self, obs_dim: int, hidden: int = 256):
27
+ super().__init__(obs_dim)
28
+
29
+ self.actor = nn.Sequential(
30
+ nn.Linear(obs_dim, hidden), nn.LayerNorm(hidden), nn.Tanh(),
31
+ nn.Linear(hidden, hidden), nn.Tanh(),
32
+ nn.Linear(hidden, hidden // 2), nn.Tanh(),
33
+ nn.Linear(hidden // 2, 3), nn.Tanh(),
34
+ )
35
+ self.critic = nn.Sequential(
36
+ nn.Linear(obs_dim, hidden), nn.Tanh(),
37
+ nn.Linear(hidden, hidden // 2), nn.Tanh(),
38
+ nn.Linear(hidden // 2, 1),
39
+ )
40
+ self._init_weights()
41
+
42
+ def _init_weights(self):
43
+ for m in self.modules():
44
+ if isinstance(m, nn.Linear):
45
+ nn.init.orthogonal_(m.weight, gain=1.0)
46
+ nn.init.zeros_(m.bias)
47
+ nn.init.orthogonal_(self.actor[-2].weight, gain=0.01)
48
+
49
+ def forward(self, obs: torch.Tensor):
50
+ return self.actor(obs), self.critic(obs)
policies/policy_spec.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Policy data input specifications β€” formal contracts for observation, action, and ticket data.
3
+
4
+ This module defines the exact data shapes, normalization ranges, and semantic meaning
5
+ of every field consumed by OpenENV policies. Use this as the reference when:
6
+
7
+ 1. Building a new environment that targets these policies
8
+ 2. Writing a bridge/adapter from a different simulator
9
+ 3. Implementing a new policy that must interoperate with the existing set
10
+
11
+ All policies share the same raw observation layout (EGO + ticket matrix).
12
+ Specialized policies (ThreatAvoidance, SystemFailure) select subsets internally.
13
+
14
+ Example usage:
15
+ from openenv.policies.policy_spec import ObsSpec, ActionSpec, validate_obs
16
+
17
+ spec = ObsSpec()
18
+ obs = my_env.get_observation()
19
+ validate_obs(obs, spec) # raises ValueError on shape/range mismatch
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ from dataclasses import dataclass, field
25
+ from typing import Any, Dict, List, Optional, Tuple
26
+
27
+ import numpy as np
28
+
29
+
30
+ # ── Ego state specification ──────────────────────────────────────────────────
31
+
32
+ EGO_STATE_DIM = 11
33
+
34
+ @dataclass(frozen=True)
35
+ class EgoField:
36
+ """Description of a single ego state field."""
37
+ index: int
38
+ name: str
39
+ unit: str
40
+ raw_range: Tuple[float, float] # physical range before normalization
41
+ norm_divisor: float # obs_value = raw_value / norm_divisor
42
+ description: str
43
+
44
+ EGO_FIELDS: List[EgoField] = [
45
+ EgoField(0, "x", "m", (-5000, 5000), 1000.0, "Forward displacement from episode start"),
46
+ EgoField(1, "y", "m", (-6.0, 6.0), 3.7, "Lateral displacement (0 = lane center, + = left)"),
47
+ EgoField(2, "z", "m", (-10, 10), 10.0, "Vertical position (flat road = 0)"),
48
+ EgoField(3, "vx", "m/s", (-20, 20), 20.0, "Forward velocity in world frame"),
49
+ EgoField(4, "vy", "m/s", (-20, 20), 20.0, "Lateral velocity in world frame"),
50
+ EgoField(5, "vz", "m/s", (0, 0), 1.0, "Vertical velocity (always 0 on flat road)"),
51
+ EgoField(6, "heading_sin", "rad", (-1, 1), 1.0, "sin(heading angle), 0 = forward"),
52
+ EgoField(7, "heading_cos", "rad", (-1, 1), 1.0, "cos(heading angle), 1 = forward"),
53
+ EgoField(8, "speed", "m/s", (0, 20), 20.0, "Scalar speed = sqrt(vx^2 + vy^2)"),
54
+ EgoField(9, "steer", "norm", (-1, 1), 1.0, "Current steering command [-1=full left, 1=full right]"),
55
+ EgoField(10, "net_drive", "norm", (-1, 1), 1.0, "throttle - brake [-1=full brake, 1=full throttle]"),
56
+ ]
57
+
58
+
59
+ # ── Ticket vector specification ──────────────────────────────────────────────
60
+
61
+ TICKET_VECTOR_DIM = 37 # 18 fixed + 14 type one-hot + 5 entity one-hot
62
+ MAX_TICKETS = 16
63
+
64
+ # Ticket types (14 total) β€” one-hot encoded starting at index 18
65
+ TICKET_TYPES = [
66
+ "collision_risk", "sudden_brake", "side_impact", "head_on",
67
+ "merge_cut", "rear_end_risk",
68
+ "pedestrian_crossing", "cyclist_lane",
69
+ "tire_blowout", "brake_fade", "steering_loss", "sensor_occlusion",
70
+ "road_hazard", "weather_visibility",
71
+ ]
72
+
73
+ # Entity types (5 total) β€” one-hot encoded after ticket types
74
+ ENTITY_TYPES = ["vehicle", "pedestrian", "cyclist", "obstacle", "system"]
75
+
76
+ # Verify dimension
77
+ assert 18 + len(TICKET_TYPES) + len(ENTITY_TYPES) == TICKET_VECTOR_DIM, (
78
+ f"Ticket vector dim mismatch: 18 + {len(TICKET_TYPES)} + {len(ENTITY_TYPES)} "
79
+ f"!= {TICKET_VECTOR_DIM}"
80
+ )
81
+
82
+ @dataclass(frozen=True)
83
+ class TicketField:
84
+ """Description of a single ticket vector field."""
85
+ offset: int # index within the TICKET_VECTOR_DIM vector
86
+ length: int # number of floats
87
+ name: str
88
+ unit: str
89
+ raw_range: Tuple[float, float]
90
+ norm_divisor: float
91
+ description: str
92
+
93
+ TICKET_FIELDS: List[TicketField] = [
94
+ TicketField(0, 1, "severity_weight", "norm", (0, 1), 1.0, "Severity: 0.25=LOW, 0.5=MED, 0.75=HIGH, 1.0=CRITICAL"),
95
+ TicketField(1, 1, "ttl_norm", "s", (0, 10), 10.0, "Time-to-live remaining, clamped to [0,1]"),
96
+ TicketField(2, 1, "pos_x", "m", (-100, 100), 100.0, "Ego-relative X (forward positive)"),
97
+ TicketField(3, 1, "pos_y", "m", (-50, 50), 50.0, "Ego-relative Y (left positive)"),
98
+ TicketField(4, 1, "pos_z", "m", (-10, 10), 10.0, "Ego-relative Z (up positive)"),
99
+ TicketField(5, 1, "vel_x", "m/s", (-30, 30), 30.0, "Entity velocity X in world frame"),
100
+ TicketField(6, 1, "vel_y", "m/s", (-30, 30), 30.0, "Entity velocity Y in world frame"),
101
+ TicketField(7, 1, "vel_z", "m/s", (-10, 10), 10.0, "Entity velocity Z in world frame"),
102
+ TicketField(8, 1, "heading_sin", "rad", (-1, 1), 1.0, "sin(entity heading relative to ego)"),
103
+ TicketField(9, 1, "heading_cos", "rad", (-1, 1), 1.0, "cos(entity heading relative to ego)"),
104
+ TicketField(10, 1, "size_length", "m", (0, 10), 10.0, "Entity bounding box length"),
105
+ TicketField(11, 1, "size_width", "m", (0, 5), 5.0, "Entity bounding box width"),
106
+ TicketField(12, 1, "size_height", "m", (0, 4), 4.0, "Entity bounding box height"),
107
+ TicketField(13, 1, "distance_norm", "m", (0, 100), 100.0, "Euclidean distance to ego, clamped to [0,1]"),
108
+ TicketField(14, 1, "ttc_norm", "s", (0, 30), 30.0, "Time-to-collision, clamped to [0,1]. 1.0 = no collision"),
109
+ TicketField(15, 1, "bearing_sin", "rad", (-1, 1), 1.0, "sin(bearing angle from ego forward axis)"),
110
+ TicketField(16, 1, "bearing_cos", "rad", (-1, 1), 1.0, "cos(bearing angle from ego forward axis)"),
111
+ TicketField(17, 1, "confidence", "norm", (0, 1), 1.0, "Perception confidence [0=unreliable, 1=certain]"),
112
+ TicketField(18, len(TICKET_TYPES), "type_onehot", "bool", (0, 1), 1.0, "One-hot ticket type"),
113
+ TicketField(18 + len(TICKET_TYPES), len(ENTITY_TYPES), "entity_onehot", "bool", (0, 1), 1.0, "One-hot entity type"),
114
+ ]
115
+
116
+
117
+ # ── Full observation specification ───────────────────────────────────────────
118
+
119
+ OBS_DIM = EGO_STATE_DIM + MAX_TICKETS * TICKET_VECTOR_DIM # 11 + 16*37 = 603
120
+
121
+ @dataclass(frozen=True)
122
+ class ObsSpec:
123
+ """Complete observation space specification."""
124
+ ego_dim: int = EGO_STATE_DIM
125
+ ticket_dim: int = TICKET_VECTOR_DIM
126
+ max_tickets: int = MAX_TICKETS
127
+ total_dim: int = OBS_DIM
128
+ dtype: str = "float32"
129
+ value_range: Tuple[float, float] = (-1.0, 1.0)
130
+
131
+ # Layout: obs[0:ego_dim] = ego state
132
+ # obs[ego_dim:] reshaped to (max_tickets, ticket_dim)
133
+ # Tickets are sorted by severity desc, distance asc. Zero-padded rows = empty slots.
134
+
135
+
136
+ # ── Action specification ─────────────────────────────────────────────────────
137
+
138
+ @dataclass(frozen=True)
139
+ class ActionField:
140
+ index: int
141
+ name: str
142
+ raw_range: Tuple[float, float]
143
+ description: str
144
+
145
+ ACTION_DIM = 3
146
+
147
+ ACTION_FIELDS: List[ActionField] = [
148
+ ActionField(0, "steer", (-1.0, 1.0), "Steering command. -1=full left, +1=full right. Scaled by MAX_STEER=0.6 rad"),
149
+ ActionField(1, "throttle", (-1.0, 1.0), "Throttle command. Only positive values used (clipped to [0,1]). Scaled by MAX_ACCEL=4.0 m/s^2"),
150
+ ActionField(2, "brake", (-1.0, 1.0), "Brake command. Only positive values used (clipped to [0,1]). Scaled by MAX_BRAKE=8.0 m/s^2"),
151
+ ]
152
+
153
+ @dataclass(frozen=True)
154
+ class ActionSpec:
155
+ """Action space specification."""
156
+ dim: int = ACTION_DIM
157
+ dtype: str = "float32"
158
+ value_range: Tuple[float, float] = (-1.0, 1.0)
159
+
160
+
161
+ # ── Policy input requirements ────────────────────────────────────────────────
162
+
163
+ @dataclass(frozen=True)
164
+ class PolicyInputSpec:
165
+ """Describes what a specific policy reads from the observation."""
166
+ name: str
167
+ reads_ego: bool
168
+ ego_indices: Tuple[int, ...] # which ego fields are used
169
+ reads_tickets: bool
170
+ ticket_filter: Optional[str] # None = all, or "kinematic" / "failure"
171
+ max_tickets_used: int # how many ticket slots the policy actually reads
172
+ requires_history: bool # whether GRU/recurrent hidden state is needed
173
+ description: str
174
+
175
+ POLICY_SPECS: Dict[str, PolicyInputSpec] = {
176
+ "SurvivalPolicy": PolicyInputSpec(
177
+ name="SurvivalPolicy",
178
+ reads_ego=True,
179
+ ego_indices=tuple(range(EGO_STATE_DIM)),
180
+ reads_tickets=False,
181
+ ticket_filter=None,
182
+ max_tickets_used=0,
183
+ requires_history=False,
184
+ description="Stage 1 baseline. Reads only ego state (first 11 dims). "
185
+ "Ticket portion of obs is ignored entirely.",
186
+ ),
187
+ "FlatMLPPolicy": PolicyInputSpec(
188
+ name="FlatMLPPolicy",
189
+ reads_ego=True,
190
+ ego_indices=tuple(range(EGO_STATE_DIM)),
191
+ reads_tickets=True,
192
+ ticket_filter=None,
193
+ max_tickets_used=MAX_TICKETS,
194
+ requires_history=False,
195
+ description="Sanity-check baseline. Reads full flat observation (ego + all tickets "
196
+ "concatenated). No attention or structure.",
197
+ ),
198
+ "TicketAttentionPolicy": PolicyInputSpec(
199
+ name="TicketAttentionPolicy",
200
+ reads_ego=True,
201
+ ego_indices=tuple(range(EGO_STATE_DIM)),
202
+ reads_tickets=True,
203
+ ticket_filter=None,
204
+ max_tickets_used=MAX_TICKETS,
205
+ requires_history=False,
206
+ description="Main policy (Stage 2+). Cross-attention: ego queries ticket set. "
207
+ "Order-invariant over tickets. Padding mask on zero-rows.",
208
+ ),
209
+ "ThreatAvoidancePolicy": PolicyInputSpec(
210
+ name="ThreatAvoidancePolicy",
211
+ reads_ego=True,
212
+ ego_indices=tuple(range(EGO_STATE_DIM)),
213
+ reads_tickets=True,
214
+ ticket_filter="kinematic",
215
+ max_tickets_used=1,
216
+ requires_history=False,
217
+ description="Specialist for kinematic threats (collision_risk, sudden_brake, "
218
+ "side_impact, head_on, merge_cut, rear_end_risk). Extracts the "
219
+ "highest-severity kinematic ticket and gates between brake/evade branches.",
220
+ ),
221
+ "SystemFailurePolicy": PolicyInputSpec(
222
+ name="SystemFailurePolicy",
223
+ reads_ego=True,
224
+ ego_indices=tuple(range(EGO_STATE_DIM)),
225
+ reads_tickets=True,
226
+ ticket_filter="failure",
227
+ max_tickets_used=1,
228
+ requires_history=False,
229
+ description="Specialist for onboard failures (tire_blowout, brake_fade, steering_loss). "
230
+ "Mixture-of-experts with one expert per failure type. Initialized with "
231
+ "domain-correct response priors.",
232
+ ),
233
+ "RecurrentPolicy": PolicyInputSpec(
234
+ name="RecurrentPolicy",
235
+ reads_ego=True,
236
+ ego_indices=tuple(range(EGO_STATE_DIM)),
237
+ reads_tickets=True,
238
+ ticket_filter=None,
239
+ max_tickets_used=MAX_TICKETS,
240
+ requires_history=True,
241
+ description="GRU-based policy for partial observability (Stage 4+). Carries hidden "
242
+ "state across timesteps. Requires h_prev to be tracked by caller.",
243
+ ),
244
+ }
245
+
246
+
247
+ # ── Validation helpers ───────────────────────────────────────────────────────
248
+
249
+ def validate_obs(obs: np.ndarray, spec: Optional[ObsSpec] = None) -> None:
250
+ """
251
+ Validate an observation array against the spec.
252
+ Raises ValueError with a descriptive message on any mismatch.
253
+ """
254
+ spec = spec or ObsSpec()
255
+ if obs.ndim != 1:
256
+ raise ValueError(f"Observation must be 1D, got shape {obs.shape}")
257
+ if obs.shape[0] != spec.total_dim:
258
+ raise ValueError(
259
+ f"Observation dim mismatch: expected {spec.total_dim}, got {obs.shape[0]}. "
260
+ f"Check ego_dim ({spec.ego_dim}) + max_tickets ({spec.max_tickets}) "
261
+ f"* ticket_dim ({spec.ticket_dim})"
262
+ )
263
+ if obs.dtype != np.float32:
264
+ raise ValueError(f"Observation dtype must be float32, got {obs.dtype}")
265
+
266
+
267
+ def validate_action(action: np.ndarray) -> None:
268
+ """Validate an action array."""
269
+ if action.shape != (ACTION_DIM,):
270
+ raise ValueError(f"Action shape mismatch: expected ({ACTION_DIM},), got {action.shape}")
271
+ if np.any(action < -1.0) or np.any(action > 1.0):
272
+ raise ValueError(f"Action values must be in [-1, 1], got min={action.min()}, max={action.max()}")
273
+
274
+
275
+ def build_obs(
276
+ ego_x: float, ego_y: float, ego_z: float,
277
+ ego_vx: float, ego_vy: float,
278
+ heading: float, speed: float,
279
+ steer: float, throttle: float, brake: float,
280
+ ticket_vectors: Optional[np.ndarray] = None,
281
+ max_tickets: int = MAX_TICKETS,
282
+ ) -> np.ndarray:
283
+ """
284
+ Build a valid observation vector from raw values.
285
+
286
+ This is the primary entry point for external environments that want to
287
+ produce observations compatible with OpenENV policies.
288
+
289
+ Parameters
290
+ ----------
291
+ ego_x : forward displacement from episode start (metres)
292
+ ego_y : lateral displacement from lane center (metres, + = left)
293
+ ego_z : vertical position (metres)
294
+ ego_vx : forward velocity (m/s)
295
+ ego_vy : lateral velocity (m/s)
296
+ heading : heading angle (radians, 0 = forward)
297
+ speed : scalar speed (m/s)
298
+ steer : current steering command [-1, 1]
299
+ throttle : current throttle command [0, 1]
300
+ brake : current brake command [0, 1]
301
+ ticket_vectors : (N, TICKET_VECTOR_DIM) array of ticket vectors, or None.
302
+ Use EventTicket.to_vector() or build_ticket_vector() to create these.
303
+ max_tickets : number of ticket slots (must match policy expectation, default 16)
304
+
305
+ Returns
306
+ -------
307
+ obs : np.ndarray of shape (EGO_STATE_DIM + max_tickets * TICKET_VECTOR_DIM,)
308
+ """
309
+ import math
310
+
311
+ ego = np.array([
312
+ ego_x / 1000.0,
313
+ ego_y / 3.7, # ROAD_HALF_WIDTH
314
+ ego_z / 10.0,
315
+ ego_vx / 20.0, # MAX_SPEED
316
+ ego_vy / 20.0,
317
+ 0.0, # vz (flat road)
318
+ math.sin(heading),
319
+ math.cos(heading),
320
+ speed / 20.0,
321
+ steer,
322
+ throttle - brake, # net drive signal
323
+ ], dtype=np.float32)
324
+
325
+ ticket_matrix = np.zeros((max_tickets, TICKET_VECTOR_DIM), dtype=np.float32)
326
+ if ticket_vectors is not None:
327
+ n = min(len(ticket_vectors), max_tickets)
328
+ ticket_matrix[:n] = ticket_vectors[:n]
329
+
330
+ return np.concatenate([ego, ticket_matrix.flatten()])
331
+
332
+
333
+ def build_ticket_vector(
334
+ severity_weight: float,
335
+ ttl: float,
336
+ pos_x: float, pos_y: float, pos_z: float,
337
+ vel_x: float, vel_y: float, vel_z: float,
338
+ heading: float,
339
+ size_length: float, size_width: float, size_height: float,
340
+ distance: float,
341
+ time_to_collision: Optional[float],
342
+ bearing: float,
343
+ ticket_type: str,
344
+ entity_type: str,
345
+ confidence: float = 1.0,
346
+ ) -> np.ndarray:
347
+ """
348
+ Build a single ticket vector from raw values without needing the full
349
+ EventTicket class. Use this when adapting a different simulator.
350
+
351
+ Parameters
352
+ ----------
353
+ severity_weight : 0.25 (LOW), 0.5 (MEDIUM), 0.75 (HIGH), 1.0 (CRITICAL)
354
+ ttl : seconds remaining until ticket expires
355
+ pos_x/y/z : ego-relative position (metres)
356
+ vel_x/y/z : entity velocity in world frame (m/s)
357
+ heading : entity heading relative to ego (radians)
358
+ size_length/width/height : entity bounding box (metres)
359
+ distance : euclidean distance to ego (metres)
360
+ time_to_collision : seconds until collision, or None if no collision course
361
+ bearing : angle from ego forward axis (radians)
362
+ ticket_type : one of TICKET_TYPES (e.g., "collision_risk")
363
+ entity_type : one of ENTITY_TYPES (e.g., "vehicle")
364
+ confidence : perception confidence [0, 1]
365
+
366
+ Returns
367
+ -------
368
+ vec : np.ndarray of shape (TICKET_VECTOR_DIM,) = (37,)
369
+ """
370
+ import math
371
+
372
+ ttc_norm = min((time_to_collision if time_to_collision is not None else 30.0) / 30.0, 1.0)
373
+
374
+ type_oh = [0.0] * len(TICKET_TYPES)
375
+ entity_oh = [0.0] * len(ENTITY_TYPES)
376
+
377
+ if ticket_type in TICKET_TYPES:
378
+ type_oh[TICKET_TYPES.index(ticket_type)] = 1.0
379
+ else:
380
+ raise ValueError(f"Unknown ticket_type '{ticket_type}'. Must be one of {TICKET_TYPES}")
381
+
382
+ if entity_type in ENTITY_TYPES:
383
+ entity_oh[ENTITY_TYPES.index(entity_type)] = 1.0
384
+ else:
385
+ raise ValueError(f"Unknown entity_type '{entity_type}'. Must be one of {ENTITY_TYPES}")
386
+
387
+ vec = [
388
+ severity_weight,
389
+ min(ttl / 10.0, 1.0),
390
+ pos_x / 100.0,
391
+ pos_y / 50.0,
392
+ pos_z / 10.0,
393
+ vel_x / 30.0,
394
+ vel_y / 30.0,
395
+ vel_z / 10.0,
396
+ math.sin(heading),
397
+ math.cos(heading),
398
+ size_length / 10.0,
399
+ size_width / 5.0,
400
+ size_height / 4.0,
401
+ min(distance / 100.0, 1.0),
402
+ ttc_norm,
403
+ math.sin(bearing),
404
+ math.cos(bearing),
405
+ confidence,
406
+ *type_oh,
407
+ *entity_oh,
408
+ ]
409
+ return np.array(vec, dtype=np.float32)
policies/ticket_attention_policy.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TicketAttentionPolicy β€” the main policy (Stage 2+).
3
+
4
+ Architecture: two-pass "reflective" cross-attention.
5
+
6
+ Pass 1: ego queries tickets β†’ raw threat context
7
+ Pass 2: (ego + raw context) queries tickets again β†’ refined context
8
+ This forces the policy to "think twice" β€” first perceive, then plan.
9
+
10
+ [ego | refined_context] β†’ steer head β†’ steer action
11
+ β†’ drive head β†’ throttle, brake
12
+ β†’ critic head β†’ value
13
+
14
+ Why two-pass:
15
+ The first pass gathers what threats exist. The second pass re-examines
16
+ tickets knowing what the overall threat picture looks like. This prevents
17
+ the impulsive single-shot responses that cause wild oscillation.
18
+
19
+ Why separate heads:
20
+ Steering requires smooth, conservative output (off-road = death).
21
+ Throttle/brake can be more aggressive. Separate heads + separate
22
+ noise levels let each dimension learn at its own pace.
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import torch
28
+ import torch.nn as nn
29
+ import torch.nn.functional as F
30
+
31
+ from .base_policy import BasePolicy
32
+ EGO_STATE_DIM = 11
33
+ MAX_TICKETS = 16
34
+ TICKET_VECTOR_DIM = 37
35
+
36
+
37
+ class TicketAttentionPolicy(BasePolicy):
38
+ """
39
+ Two-pass reflective attention policy.
40
+
41
+ Pass 1: perceive β€” what threats exist?
42
+ Pass 2: plan β€” given what I see, which threats matter most?
43
+ Output: separate steer head (conservative) + drive head (throttle/brake)
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ obs_dim: int,
49
+ ego_embed: int = 64,
50
+ ticket_embed: int = 64,
51
+ n_heads: int = 4,
52
+ hidden: int = 256,
53
+ ):
54
+ super().__init__(obs_dim)
55
+ assert ego_embed % n_heads == 0
56
+ assert ticket_embed == ego_embed
57
+
58
+ self.ego_embed = ego_embed
59
+ self.max_tickets = MAX_TICKETS
60
+ self.ticket_dim = TICKET_VECTOR_DIM
61
+
62
+ # ── Encoders ──────────────────────────────────────────────────────
63
+ self.ego_encoder = nn.Sequential(
64
+ nn.Linear(EGO_STATE_DIM, hidden // 2),
65
+ nn.LayerNorm(hidden // 2),
66
+ nn.Tanh(),
67
+ nn.Linear(hidden // 2, ego_embed),
68
+ nn.LayerNorm(ego_embed),
69
+ )
70
+ self.ticket_encoder = nn.Sequential(
71
+ nn.Linear(TICKET_VECTOR_DIM, hidden // 2),
72
+ nn.LayerNorm(hidden // 2),
73
+ nn.ReLU(),
74
+ nn.Linear(hidden // 2, ticket_embed),
75
+ nn.LayerNorm(ticket_embed),
76
+ )
77
+
78
+ # ── Pass 1: perceive (ego queries tickets) ───────────────────────
79
+ self.attn_pass1 = nn.MultiheadAttention(
80
+ embed_dim=ego_embed, num_heads=n_heads,
81
+ dropout=0.0, batch_first=True,
82
+ )
83
+ self.norm1 = nn.LayerNorm(ego_embed)
84
+
85
+ # ── Reflection gate: fuse ego + pass1 context for second query ───
86
+ self.reflect_proj = nn.Sequential(
87
+ nn.Linear(ego_embed * 2, ego_embed),
88
+ nn.LayerNorm(ego_embed),
89
+ nn.Tanh(),
90
+ )
91
+
92
+ # ── Pass 2: plan (refined query re-attends to tickets) ───────────
93
+ self.attn_pass2 = nn.MultiheadAttention(
94
+ embed_dim=ego_embed, num_heads=n_heads,
95
+ dropout=0.0, batch_first=True,
96
+ )
97
+ self.norm2 = nn.LayerNorm(ego_embed)
98
+
99
+ # ── Fused representation ─────────────────────────────────────────
100
+ fused_dim = ego_embed + ego_embed # ego + refined context
101
+
102
+ # ── Steer head (conservative, smooth output) ─────────────────────
103
+ self.steer_head = nn.Sequential(
104
+ nn.Linear(fused_dim, hidden // 2),
105
+ nn.LayerNorm(hidden // 2),
106
+ nn.Tanh(),
107
+ nn.Linear(hidden // 2, hidden // 4),
108
+ nn.Tanh(),
109
+ nn.Linear(hidden // 4, 1),
110
+ nn.Tanh(),
111
+ )
112
+
113
+ # ── Drive head (throttle + brake) ────────────────────────────────
114
+ self.drive_head = nn.Sequential(
115
+ nn.Linear(fused_dim, hidden // 2),
116
+ nn.LayerNorm(hidden // 2),
117
+ nn.Tanh(),
118
+ nn.Linear(hidden // 2, hidden // 4),
119
+ nn.Tanh(),
120
+ nn.Linear(hidden // 4, 2),
121
+ nn.Tanh(),
122
+ )
123
+
124
+ # ── Critic head ──────────────────────────────────────────────────
125
+ self.critic = nn.Sequential(
126
+ nn.Linear(fused_dim, hidden),
127
+ nn.LayerNorm(hidden),
128
+ nn.Tanh(),
129
+ nn.Linear(hidden, hidden // 2),
130
+ nn.Tanh(),
131
+ nn.Linear(hidden // 2, 1),
132
+ )
133
+
134
+ self._init_weights()
135
+
136
+ def _init_weights(self):
137
+ for m in self.modules():
138
+ if isinstance(m, nn.Linear):
139
+ nn.init.orthogonal_(m.weight, gain=1.0)
140
+ if m.bias is not None:
141
+ nn.init.zeros_(m.bias)
142
+ # Very small initial actions β€” start by doing almost nothing
143
+ nn.init.orthogonal_(self.steer_head[-2].weight, gain=0.01)
144
+ nn.init.orthogonal_(self.drive_head[-2].weight, gain=0.01)
145
+ # Critic starts near zero
146
+ nn.init.orthogonal_(self.critic[-1].weight, gain=0.1)
147
+
148
+ def _attend(self, attn_module, norm_module, query, tk_emb, is_padding, all_empty):
149
+ """Run one attention pass with NaN-safe masking."""
150
+ B = query.shape[0]
151
+ q = query if query.dim() == 3 else query.unsqueeze(1)
152
+
153
+ if all_empty.all():
154
+ return torch.zeros(B, self.ego_embed, device=query.device)
155
+
156
+ safe_mask = is_padding.clone()
157
+ safe_mask[all_empty, 0] = False
158
+ attn_out, _ = attn_module(
159
+ query=q, key=tk_emb, value=tk_emb,
160
+ key_padding_mask=safe_mask,
161
+ )
162
+ context = attn_out.squeeze(1)
163
+ context[all_empty] = 0.0
164
+ return norm_module(context)
165
+
166
+ def forward(self, obs: torch.Tensor):
167
+ B = obs.shape[0]
168
+
169
+ # Split observation
170
+ ego_raw = obs[:, :EGO_STATE_DIM]
171
+ tk_raw = obs[:, EGO_STATE_DIM:].view(B, self.max_tickets, self.ticket_dim)
172
+
173
+ # Encode
174
+ ego_emb = self.ego_encoder(ego_raw)
175
+ tk_emb = self.ticket_encoder(tk_raw)
176
+
177
+ # Padding mask
178
+ is_padding = (tk_raw.abs().sum(dim=-1) == 0)
179
+ all_empty = is_padding.all(dim=-1)
180
+
181
+ # ── Pass 1: perceive ─────────────────────────────────────────────
182
+ ctx1 = self._attend(self.attn_pass1, self.norm1,
183
+ ego_emb, tk_emb, is_padding, all_empty)
184
+
185
+ # ── Reflect: combine ego + initial context into refined query ────
186
+ reflected = self.reflect_proj(torch.cat([ego_emb, ctx1], dim=-1))
187
+
188
+ # ── Pass 2: plan (re-attend with richer query) ───────────────────
189
+ ctx2 = self._attend(self.attn_pass2, self.norm2,
190
+ reflected, tk_emb, is_padding, all_empty)
191
+
192
+ # ── Fuse and decode ──────────────────────────────────────────────
193
+ fused = torch.cat([ego_emb, ctx2], dim=-1)
194
+
195
+ steer = self.steer_head(fused) # (B, 1)
196
+ drive = self.drive_head(fused) # (B, 2)
197
+ action = torch.cat([steer, drive], dim=-1) # (B, 3)
198
+ value = self.critic(fused) # (B, 1)
199
+
200
+ return action, value
201
+
202
+ def get_attention_weights(self, obs: torch.Tensor) -> torch.Tensor:
203
+ """Returns pass-2 attention weights for interpretability."""
204
+ B = obs.shape[0]
205
+ ego_raw = obs[:, :EGO_STATE_DIM]
206
+ tk_raw = obs[:, EGO_STATE_DIM:].view(B, self.max_tickets, self.ticket_dim)
207
+ ego_emb = self.ego_encoder(ego_raw)
208
+ tk_emb = self.ticket_encoder(tk_raw)
209
+ is_padding = (tk_raw.abs().sum(dim=-1) == 0)
210
+ all_empty = is_padding.all(dim=-1)
211
+
212
+ # Pass 1
213
+ ctx1 = self._attend(self.attn_pass1, self.norm1,
214
+ ego_emb, tk_emb, is_padding, all_empty)
215
+ reflected = self.reflect_proj(torch.cat([ego_emb, ctx1], dim=-1))
216
+
217
+ # Pass 2 β€” get weights
218
+ safe_mask = is_padding.clone()
219
+ safe_mask[all_empty, 0] = False
220
+ query = reflected.unsqueeze(1)
221
+ _, weights = self.attn_pass2(
222
+ query=query, key=tk_emb, value=tk_emb,
223
+ key_padding_mask=safe_mask,
224
+ need_weights=True, average_attn_weights=False,
225
+ )
226
+ weights[all_empty] = 0.0
227
+ return weights
pyproject.toml CHANGED
@@ -13,6 +13,14 @@ dependencies = [
13
  "pydantic>=2.0.0",
14
  "uvicorn[standard]>=0.24.0",
15
  "requests>=2.31.0",
 
 
 
 
 
 
 
 
16
  ]
17
 
18
  [project.optional-dependencies]
 
13
  "pydantic>=2.0.0",
14
  "uvicorn[standard]>=0.24.0",
15
  "requests>=2.31.0",
16
+ "torch>=2.10.0",
17
+ "numpy>=2.2.6",
18
+ "pillow>=12.1.1",
19
+ "gymnasium>=1.2.3",
20
+ "matplotlib>=3.10.8",
21
+ "gradio>=6.9.0",
22
+ "uvloop>=0.22.1",
23
+ "pyaudioop",
24
  ]
25
 
26
  [project.optional-dependencies]
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cpu
2
+ torch==2.5.1+cpu
3
+ numpy>=1.24.0
4
+ pillow==10.4.0
5
+ matplotlib>=3.8.0
6
+ pydantic>=2.0.0
7
+ requests>=2.31.0
8
+ gymnasium>=0.29.0
9
+ pyaudioop>=0.1.0
server/app.py CHANGED
@@ -1,44 +1,882 @@
1
  """
2
- FastAPI application for the Overflow Environment.
3
 
4
- Exposes the OverflowEnvironment over HTTP and WebSocket endpoints.
 
 
 
 
5
 
6
- Usage:
7
- uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
 
 
8
  """
9
 
10
- import inspect
11
 
12
- from openenv.core.env_server.http_server import create_app
 
 
 
 
 
 
 
 
 
13
 
14
- from ..models import OverflowAction, OverflowObservation
15
- from .overflow_environment import OverflowEnvironment
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- def _create_overflow_app():
19
- """Build app across create_app variants that may expect a factory or an instance."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  try:
21
- first_param = next(iter(inspect.signature(create_app).parameters.values()))
22
- annotation_text = str(first_param.annotation)
23
- except (StopIteration, TypeError, ValueError):
24
- annotation_text = "typing.Callable"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- expects_instance = (
27
- "Environment" in annotation_text and "Callable" not in annotation_text
28
- )
29
- env_arg = OverflowEnvironment() if expects_instance else OverflowEnvironment
30
- return create_app(
31
- env_arg, OverflowAction, OverflowObservation, env_name="overflow_env"
32
- )
33
 
 
34
 
35
- app = _create_overflow_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  def main():
39
- """Entry point for direct execution via uv run or python -m."""
40
  import uvicorn
41
-
42
  uvicorn.run(app, host="0.0.0.0", port=8000)
43
 
44
 
 
1
  """
2
+ Overflow OpenENV β€” Continuous PPO Training + Live Web Dashboard
3
 
4
+ Runs PPO training in a background thread and exposes:
5
+ GET / β†’ live HTML dashboard (road animation + reward charts)
6
+ GET /api/state β†’ JSON snapshot of current training state
7
+ GET /api/stream β†’ SSE stream of state updates
8
+ POST /api/mode β†’ switch reward mode {capped, uncapped}
9
 
10
+ Reward modes:
11
+ capped β€” standard shaped reward, capped at REWARD_CAP per step
12
+ uncapped β€” base reward + token_bonus (scales with LLM reasoning token count)
13
+ frontier models that produce more reasoning tokens earn more reward
14
  """
15
 
16
+ from __future__ import annotations
17
 
18
+ import asyncio
19
+ import json
20
+ import math
21
+ import sys
22
+ import threading
23
+ import time
24
+ from collections import deque
25
+ from dataclasses import asdict, dataclass, field
26
+ from pathlib import Path
27
+ from typing import Any, Deque, Dict, List, Optional
28
 
29
+ import numpy as np
30
+ import torch
31
 
32
+ # ── Absolute-import fallback ──────────────────────────────────────────────────
33
+ try:
34
+ from ..training.overflow_gym_env import OverflowGymEnv, _obs_to_vector, _action_to_decision
35
+ from ..training.curriculum import CurriculumManager, STAGES
36
+ from ..training.reward import compute_reward, compute_episode_bonus, W_COLLISION
37
+ from ..training.ppo_trainer import PPOTrainer, RolloutBuffer
38
+ from ..policies.flat_mlp_policy import FlatMLPPolicy
39
+ from ..policies.ticket_attention_policy import TicketAttentionPolicy
40
+ from ..policies.policy_spec import OBS_DIM
41
+ except ImportError:
42
+ sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
43
+ from training.overflow_gym_env import OverflowGymEnv, _obs_to_vector, _action_to_decision
44
+ from training.curriculum import CurriculumManager, STAGES
45
+ from training.reward import compute_reward, compute_episode_bonus, W_COLLISION
46
+ from training.ppo_trainer import PPOTrainer, RolloutBuffer
47
+ from policies.flat_mlp_policy import FlatMLPPolicy
48
+ from policies.ticket_attention_policy import TicketAttentionPolicy
49
+ from policies.policy_spec import OBS_DIM
50
 
51
+ from fastapi import FastAPI
52
+ from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse
53
+ from pydantic import BaseModel
54
+
55
+ # ── Reward mode config ────────────────────────────────────────────────────────
56
+
57
+ REWARD_CAP = 2.0 # per-step ceiling in capped mode
58
+ TOKEN_SCALE = 0.001 # uncapped: reward += tokens * TOKEN_SCALE
59
+ MAX_TOKEN_BONUS = 5.0 # uncapped mode bonus ceiling per step
60
+
61
+ # ── Shared training state (updated by training thread, read by API) ───────────
62
+
63
+ @dataclass
64
+ class CarSnapshot:
65
+ car_id: int
66
+ x: float
67
+ y: float
68
+ lane: int
69
+ speed: float
70
+
71
+ @dataclass
72
+ class EpisodeRecord:
73
+ episode: int
74
+ steps: int
75
+ reward: float
76
+ outcome: str # "crash" | "goal" | "timeout"
77
+ stage: int
78
+ reward_mode: str
79
+
80
+ @dataclass
81
+ class TrainingState:
82
+ # Current frame
83
+ cars: List[CarSnapshot] = field(default_factory=list)
84
+ ego_x: float = 0.0
85
+ ego_lane: int = 2
86
+ # Live metrics
87
+ total_steps: int = 0
88
+ n_updates: int = 0
89
+ n_episodes: int = 0
90
+ episode_reward: float = 0.0
91
+ episode_steps: int = 0
92
+ mean_reward_100: float = 0.0
93
+ mean_ep_len: float = 0.0
94
+ stage: int = 1
95
+ stage_name: str = "Survival"
96
+ reward_mode: str = "capped"
97
+ steps_per_sec: float = 0.0
98
+ # History (capped at 500 for the chart)
99
+ reward_history: List[float] = field(default_factory=list)
100
+ episode_history: List[Dict] = field(default_factory=list)
101
+ # PPO update metrics
102
+ last_pg_loss: float = 0.0
103
+ last_vf_loss: float = 0.0
104
+ last_entropy: float = 0.0
105
+ running: bool = True
106
+ error: Optional[str] = None
107
+
108
+
109
+ _state = TrainingState()
110
+ _state_lock = threading.Lock()
111
+ _sse_queue: Deque[str] = deque(maxlen=50)
112
+
113
+ # ── Reward mode switch (thread-safe) ─────────────────────────────────────────
114
+
115
+ def get_reward_mode() -> str:
116
+ with _state_lock:
117
+ return _state.reward_mode
118
+
119
+ def set_reward_mode(mode: str) -> None:
120
+ with _state_lock:
121
+ _state.reward_mode = mode
122
+
123
+ def apply_reward_mode(base_reward: float, token_count: int = 0) -> float:
124
+ mode = get_reward_mode()
125
+ if mode == "uncapped":
126
+ bonus = min(token_count * TOKEN_SCALE, MAX_TOKEN_BONUS)
127
+ return base_reward + bonus
128
+ else:
129
+ return min(base_reward, REWARD_CAP) if base_reward > 0 else base_reward
130
+
131
+
132
+ # ── Training thread ───────────────────────────────────────────────────────────
133
+
134
+ def _push_sse(data: dict) -> None:
135
+ _sse_queue.append(json.dumps(data))
136
+
137
+ def _snapshot_cars(overflow_obs) -> List[CarSnapshot]:
138
+ snaps = []
139
+ if not overflow_obs or not overflow_obs.cars:
140
+ return snaps
141
+ for c in overflow_obs.cars:
142
+ snaps.append(CarSnapshot(
143
+ car_id=c.carId,
144
+ x=c.position.x,
145
+ y=c.position.y if hasattr(c.position, "y") else (c.lane - 2) * 3.7,
146
+ lane=c.lane,
147
+ speed=c.speed,
148
+ ))
149
+ return snaps
150
+
151
+ def _training_loop() -> None:
152
+ global _state
153
  try:
154
+ # Build policy + env + curriculum
155
+ policy = TicketAttentionPolicy(obs_dim=OBS_DIM)
156
+ env = OverflowGymEnv()
157
+ curriculum = CurriculumManager()
158
+
159
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
160
+ policy.to(device)
161
+
162
+ optimizer = torch.optim.Adam(policy.parameters(), lr=3e-4, eps=1e-5)
163
+
164
+ # PPO hyperparams
165
+ GAMMA = 0.99
166
+ GAE_LAMBDA = 0.95
167
+ CLIP = 0.2
168
+ ENT_COEF = 0.02
169
+ VF_COEF = 0.5
170
+ MAX_GRAD = 0.5
171
+ N_STEPS = 512
172
+ BATCH_SIZE = 128
173
+ N_EPOCHS = 6
174
+
175
+ buf = RolloutBuffer(N_STEPS, OBS_DIM, device)
176
+
177
+ ep_rewards: Deque[float] = deque(maxlen=100)
178
+ ep_lengths: Deque[int] = deque(maxlen=100)
179
+
180
+ obs, _ = env.reset()
181
+ ep_reward = 0.0
182
+ ep_steps = 0
183
+ total_steps = 0
184
+ n_updates = 0
185
+ n_episodes = 0
186
+ t0 = time.time()
187
+
188
+ while True:
189
+ buf.reset()
190
+ policy.eval()
191
+
192
+ # ── Collect rollout ──────────────────────────────────────────────
193
+ for _ in range(N_STEPS):
194
+ curriculum.step(env._sim_time)
195
+
196
+ obs_t = torch.as_tensor(obs, dtype=torch.float32, device=device)
197
+ with torch.no_grad():
198
+ act_mean, val = policy(obs_t.unsqueeze(0))
199
+ act_mean = act_mean.squeeze(0)
200
+ val = val.squeeze(0)
201
+
202
+ dist = torch.distributions.Normal(act_mean, torch.ones_like(act_mean) * 0.3)
203
+ action = dist.sample().clamp(-1, 1)
204
+ logp = dist.log_prob(action).sum()
205
+
206
+ next_obs, base_reward, term, trunc, info = env.step(action.cpu().numpy())
207
+
208
+ # Apply reward mode
209
+ reward = apply_reward_mode(base_reward)
210
+
211
+ buf.add(obs, action.cpu().numpy(), reward, float(val), float(logp), float(term or trunc))
212
+
213
+ obs = next_obs
214
+ ep_reward += reward
215
+ ep_steps += 1
216
+ total_steps += 1
217
+
218
+ # Update live car positions from OverflowEnvironment._cars
219
+ cars = []
220
+ overflow_env = env._env # OverflowEnvironment instance
221
+ if hasattr(overflow_env, "_cars"):
222
+ for c in overflow_env._cars:
223
+ cars.append(CarSnapshot(
224
+ car_id=c.car_id,
225
+ x=c.position,
226
+ y=(c.lane - 2) * 3.7,
227
+ lane=c.lane,
228
+ speed=c.speed,
229
+ ))
230
+
231
+ with _state_lock:
232
+ _state.total_steps = total_steps
233
+ _state.episode_reward = ep_reward
234
+ _state.episode_steps = ep_steps
235
+ _state.steps_per_sec = total_steps / max(time.time() - t0, 1.0)
236
+ if cars:
237
+ _state.cars = cars
238
+ ego = next((c for c in cars if c.car_id == 0), None)
239
+ if ego:
240
+ _state.ego_x = ego.x
241
+ _state.ego_lane = ego.lane
242
+
243
+ if term or trunc:
244
+ bonus = compute_episode_bonus(
245
+ total_steps=ep_steps,
246
+ survived=not info.get("collision", False),
247
+ )
248
+ ep_reward += bonus
249
+ ep_rewards.append(ep_reward)
250
+ ep_lengths.append(ep_steps)
251
+ n_episodes += 1
252
+
253
+ advanced = curriculum.record_episode_reward(ep_reward)
254
+ outcome = (
255
+ "crash" if info.get("collision") else
256
+ ("goal" if info.get("goal_reached") else "timeout")
257
+ )
258
+
259
+ ep_rec = {
260
+ "episode": n_episodes,
261
+ "steps": ep_steps,
262
+ "reward": round(ep_reward, 3),
263
+ "outcome": outcome,
264
+ "stage": curriculum.current_stage,
265
+ "reward_mode": get_reward_mode(),
266
+ }
267
+
268
+ with _state_lock:
269
+ _state.n_episodes = n_episodes
270
+ _state.stage = curriculum.current_stage
271
+ _state.stage_name = curriculum.config.name
272
+ _state.mean_reward_100 = float(np.mean(ep_rewards))
273
+ _state.mean_ep_len = float(np.mean(ep_lengths))
274
+ _state.reward_history.append(round(ep_reward, 3))
275
+ if len(_state.reward_history) > 500:
276
+ _state.reward_history = _state.reward_history[-500:]
277
+ _state.episode_history.append(ep_rec)
278
+ if len(_state.episode_history) > 200:
279
+ _state.episode_history = _state.episode_history[-200:]
280
+
281
+ _push_sse({"type": "episode", "data": ep_rec})
282
+
283
+ obs, _ = env.reset()
284
+ ep_reward = 0.0
285
+ ep_steps = 0
286
+
287
+ # ── PPO update ───────────────────────────────────────────────────
288
+ with torch.no_grad():
289
+ obs_t = torch.as_tensor(obs, dtype=torch.float32, device=device)
290
+ _, last_val = policy(obs_t.unsqueeze(0))
291
+ buf.compute_returns(float(last_val), GAMMA, GAE_LAMBDA)
292
+
293
+ policy.train()
294
+
295
+ all_obs = buf.obs
296
+ all_acts = buf.acts
297
+ old_logp = buf.logp
298
+ adv = buf.ret - buf.val
299
+ adv = (adv - adv.mean()) / (adv.std() + 1e-8)
300
+ ret = buf.ret
301
+ old_val = buf.val
302
+ indices = torch.randperm(N_STEPS, device=device)
303
+
304
+ pg_losses, vf_losses, entropies = [], [], []
305
+
306
+ for _ in range(N_EPOCHS):
307
+ for start in range(0, N_STEPS, BATCH_SIZE):
308
+ idx = indices[start: start + BATCH_SIZE]
309
+ act_mean, val = policy(all_obs[idx])
310
+ val = val.squeeze(-1)
311
+
312
+ dist = torch.distributions.Normal(act_mean, torch.ones_like(act_mean) * 0.3)
313
+ logp = dist.log_prob(all_acts[idx]).sum(dim=-1)
314
+ entropy = dist.entropy().sum(dim=-1).mean()
315
+
316
+ ratio = torch.exp(logp - old_logp[idx])
317
+ pg_loss = torch.max(-adv[idx] * ratio, -adv[idx] * ratio.clamp(1 - CLIP, 1 + CLIP)).mean()
318
+
319
+ val_clip = old_val[idx] + (val - old_val[idx]).clamp(-CLIP, CLIP)
320
+ vf_loss = 0.5 * torch.max((val - ret[idx]) ** 2, (val_clip - ret[idx]) ** 2).mean()
321
+
322
+ loss = pg_loss + VF_COEF * vf_loss - ENT_COEF * entropy
323
+ optimizer.zero_grad()
324
+ loss.backward()
325
+ torch.nn.utils.clip_grad_norm_(policy.parameters(), MAX_GRAD)
326
+ optimizer.step()
327
+
328
+ pg_losses.append(float(pg_loss))
329
+ vf_losses.append(float(vf_loss))
330
+ entropies.append(float(entropy))
331
+
332
+ n_updates += 1
333
+ with _state_lock:
334
+ _state.n_updates = n_updates
335
+ _state.last_pg_loss = round(float(np.mean(pg_losses)), 5)
336
+ _state.last_vf_loss = round(float(np.mean(vf_losses)), 5)
337
+ _state.last_entropy = round(float(np.mean(entropies)), 5)
338
+
339
+ _push_sse({
340
+ "type": "update",
341
+ "data": {
342
+ "n_updates": n_updates,
343
+ "total_steps": total_steps,
344
+ "mean_reward": round(float(np.mean(ep_rewards)) if ep_rewards else 0.0, 3),
345
+ "stage": curriculum.current_stage,
346
+ "pg_loss": round(float(np.mean(pg_losses)), 5),
347
+ "vf_loss": round(float(np.mean(vf_losses)), 5),
348
+ "entropy": round(float(np.mean(entropies)), 5),
349
+ }
350
+ })
351
+
352
+ except Exception as exc:
353
+ import traceback
354
+ tb = traceback.format_exc()
355
+ with _state_lock:
356
+ _state.running = False
357
+ _state.error = f"{exc}\n\n{tb}"
358
+ print(f"[Training] ERROR: {exc}\n{tb}", flush=True)
359
+
360
 
361
+ # ── FastAPI app ───────────────────────────────────────────────────────────────
 
 
 
 
 
 
362
 
363
+ app = FastAPI(title="Overflow OpenENV")
364
 
365
+ @app.on_event("startup")
366
+ def _start_training():
367
+ t = threading.Thread(target=_training_loop, daemon=True)
368
+ t.start()
369
+
370
+
371
+ @app.get("/health")
372
+ def health():
373
+ return {"status": "ok"}
374
+
375
+
376
+ class ModeRequest(BaseModel):
377
+ mode: str # "capped" or "uncapped"
378
+
379
+ @app.post("/api/mode")
380
+ def set_mode(req: ModeRequest):
381
+ if req.mode not in ("capped", "uncapped"):
382
+ return JSONResponse({"error": "mode must be 'capped' or 'uncapped'"}, status_code=400)
383
+ set_reward_mode(req.mode)
384
+ return {"mode": req.mode}
385
+
386
+
387
+ @app.get("/api/state")
388
+ def get_state():
389
+ with _state_lock:
390
+ s = _state
391
+ return {
392
+ "total_steps": s.total_steps,
393
+ "n_updates": s.n_updates,
394
+ "n_episodes": s.n_episodes,
395
+ "episode_reward": round(s.episode_reward, 3),
396
+ "episode_steps": s.episode_steps,
397
+ "mean_reward": round(s.mean_reward_100, 3),
398
+ "mean_ep_len": round(s.mean_ep_len, 1),
399
+ "stage": s.stage,
400
+ "stage_name": s.stage_name,
401
+ "reward_mode": s.reward_mode,
402
+ "steps_per_sec": round(s.steps_per_sec, 1),
403
+ "pg_loss": s.last_pg_loss,
404
+ "vf_loss": s.last_vf_loss,
405
+ "entropy": s.last_entropy,
406
+ "reward_history": s.reward_history[-200:],
407
+ "episode_history": s.episode_history[-50:],
408
+ "cars": [asdict(c) for c in s.cars],
409
+ "ego_x": s.ego_x,
410
+ "ego_lane": s.ego_lane,
411
+ "running": s.running,
412
+ "error": s.error,
413
+ }
414
+
415
+
416
+ @app.get("/api/stream")
417
+ async def sse_stream():
418
+ async def generator():
419
+ last_idx = len(_sse_queue)
420
+ while True:
421
+ current = list(_sse_queue)
422
+ for msg in current[last_idx:]:
423
+ yield f"data: {msg}\n\n"
424
+ last_idx = len(current)
425
+ # Also send a heartbeat state snapshot every 2s
426
+ with _state_lock:
427
+ s = _state
428
+ snap = {
429
+ "type": "tick",
430
+ "data": {
431
+ "total_steps": s.total_steps,
432
+ "episode_reward": round(s.episode_reward, 3),
433
+ "episode_steps": s.episode_steps,
434
+ "stage": s.stage,
435
+ "stage_name": s.stage_name,
436
+ "reward_mode": s.reward_mode,
437
+ "cars": [asdict(c) for c in s.cars],
438
+ "ego_x": s.ego_x,
439
+ }
440
+ }
441
+ yield f"data: {json.dumps(snap)}\n\n"
442
+ await asyncio.sleep(0.5)
443
+
444
+ return StreamingResponse(generator(), media_type="text/event-stream")
445
+
446
+
447
+ # ── HTML Dashboard ────────────────────────────────────────────────────────────
448
+
449
+ DASHBOARD_HTML = r"""<!DOCTYPE html>
450
+ <html lang="en">
451
+ <head>
452
+ <meta charset="UTF-8">
453
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
454
+ <title>Overflow OpenENV β€” Live Training</title>
455
+ <style>
456
+ * { box-sizing: border-box; margin: 0; padding: 0; }
457
+ body { background: #0a0a0f; color: #e0e0e0; font-family: 'Courier New', monospace; height: 100vh; display: flex; flex-direction: column; }
458
+ header { background: #12121a; border-bottom: 1px solid #2a2a40; padding: 10px 20px; display: flex; align-items: center; gap: 16px; }
459
+ header h1 { font-size: 16px; color: #7eb8ff; letter-spacing: 2px; }
460
+ .badge { padding: 3px 10px; border-radius: 12px; font-size: 11px; font-weight: bold; }
461
+ .badge-running { background: #1a3a1a; color: #4caf50; border: 1px solid #4caf50; }
462
+ .badge-error { background: #3a1a1a; color: #f44336; border: 1px solid #f44336; }
463
+ .mode-toggle { margin-left: auto; display: flex; gap: 8px; align-items: center; }
464
+ .mode-btn { padding: 4px 14px; border-radius: 8px; border: 1px solid #444; background: #1a1a2a; color: #aaa; cursor: pointer; font-size: 12px; transition: all 0.2s; }
465
+ .mode-btn.active { background: #1a3a5a; color: #7eb8ff; border-color: #7eb8ff; }
466
+ main { flex: 1; display: flex; gap: 0; overflow: hidden; }
467
+ .left-panel { flex: 0 0 55%; display: flex; flex-direction: column; padding: 16px; gap: 12px; border-right: 1px solid #2a2a40; overflow: hidden; }
468
+ .right-panel { flex: 1; display: flex; flex-direction: column; padding: 16px; gap: 12px; overflow: hidden; }
469
+ .panel-title { font-size: 11px; color: #667; letter-spacing: 2px; text-transform: uppercase; margin-bottom: 4px; }
470
+ canvas { border-radius: 8px; }
471
+ #road-canvas { width: 100%; height: 200px; background: #111118; border: 1px solid #2a2a40; border-radius: 8px; }
472
+ .metrics-grid { display: grid; grid-template-columns: repeat(4, 1fr); gap: 8px; }
473
+ .metric-card { background: #12121a; border: 1px solid #2a2a40; border-radius: 8px; padding: 10px; }
474
+ .metric-label { font-size: 10px; color: #667; text-transform: uppercase; letter-spacing: 1px; }
475
+ .metric-value { font-size: 18px; color: #7eb8ff; font-weight: bold; margin-top: 2px; }
476
+ .metric-sub { font-size: 10px; color: #556; margin-top: 2px; }
477
+ .stage-bar { background: #12121a; border: 1px solid #2a2a40; border-radius: 8px; padding: 12px; }
478
+ .stage-stages { display: flex; gap: 6px; margin-top: 8px; }
479
+ .stage-pip { flex: 1; height: 6px; border-radius: 3px; background: #2a2a40; transition: background 0.5s; }
480
+ .stage-pip.active { background: #7eb8ff; }
481
+ .stage-pip.done { background: #4caf50; }
482
+ #reward-canvas { width: 100%; flex: 1; min-height: 160px; background: #12121a; border: 1px solid #2a2a40; border-radius: 8px; }
483
+ .episode-table { flex: 1; overflow-y: auto; background: #12121a; border: 1px solid #2a2a40; border-radius: 8px; min-height: 0; }
484
+ table { width: 100%; border-collapse: collapse; font-size: 11px; }
485
+ th { padding: 6px 10px; color: #667; text-align: left; border-bottom: 1px solid #2a2a40; position: sticky; top: 0; background: #1a1a28; }
486
+ td { padding: 5px 10px; border-bottom: 1px solid #1a1a28; }
487
+ tr:hover td { background: #1a1a2a; }
488
+ .outcome-crash { color: #f44336; }
489
+ .outcome-goal { color: #4caf50; }
490
+ .outcome-timeout { color: #ff9800; }
491
+ .ppo-row { display: flex; gap: 8px; flex-wrap: wrap; }
492
+ .ppo-stat { background: #12121a; border: 1px solid #2a2a40; border-radius: 6px; padding: 6px 12px; font-size: 11px; }
493
+ .ppo-stat span { color: #7eb8ff; }
494
+ .error-box { background: #2a0a0a; border: 1px solid #f44336; border-radius: 8px; padding: 12px; font-size: 11px; color: #f44336; white-space: pre-wrap; overflow-y: auto; max-height: 200px; }
495
+ </style>
496
+ </head>
497
+ <body>
498
+ <header>
499
+ <h1>OVERFLOW OPENENV</h1>
500
+ <span id="status-badge" class="badge badge-running">TRAINING</span>
501
+ <div class="mode-toggle">
502
+ <span style="font-size:11px;color:#667">REWARD MODE:</span>
503
+ <button class="mode-btn active" id="btn-capped" onclick="setMode('capped')">CAPPED</button>
504
+ <button class="mode-btn" id="btn-uncapped" onclick="setMode('uncapped')">UNCAPPED (LLM)</button>
505
+ </div>
506
+ </header>
507
+ <main>
508
+ <!-- LEFT: road + metrics + stage -->
509
+ <div class="left-panel">
510
+ <div>
511
+ <div class="panel-title">Road View β€” Live</div>
512
+ <canvas id="road-canvas"></canvas>
513
+ </div>
514
+ <div class="metrics-grid">
515
+ <div class="metric-card">
516
+ <div class="metric-label">Total Steps</div>
517
+ <div class="metric-value" id="m-steps">0</div>
518
+ <div class="metric-sub" id="m-sps">0 sps</div>
519
+ </div>
520
+ <div class="metric-card">
521
+ <div class="metric-label">Episodes</div>
522
+ <div class="metric-value" id="m-eps">0</div>
523
+ <div class="metric-sub" id="m-eplen">avg len: 0</div>
524
+ </div>
525
+ <div class="metric-card">
526
+ <div class="metric-label">Mean Reward (100)</div>
527
+ <div class="metric-value" id="m-reward">0.00</div>
528
+ <div class="metric-sub" id="m-curr-r">ep: 0.00</div>
529
+ </div>
530
+ <div class="metric-card">
531
+ <div class="metric-label">PPO Updates</div>
532
+ <div class="metric-value" id="m-updates">0</div>
533
+ <div class="metric-sub">policy gradient</div>
534
+ </div>
535
+ </div>
536
+ <div class="stage-bar">
537
+ <div class="panel-title">Curriculum Stage β€” <span id="stage-name">Survival</span></div>
538
+ <div class="stage-stages">
539
+ <div class="stage-pip active" id="pip-1"></div>
540
+ <div class="stage-pip" id="pip-2"></div>
541
+ <div class="stage-pip" id="pip-3"></div>
542
+ <div class="stage-pip" id="pip-4"></div>
543
+ </div>
544
+ </div>
545
+ <div>
546
+ <div class="panel-title">PPO Losses</div>
547
+ <div class="ppo-row">
548
+ <div class="ppo-stat">Policy: <span id="pg-loss">β€”</span></div>
549
+ <div class="ppo-stat">Value: <span id="vf-loss">β€”</span></div>
550
+ <div class="ppo-stat">Entropy: <span id="entropy">β€”</span></div>
551
+ </div>
552
+ </div>
553
+ <div id="error-section" style="display:none">
554
+ <div class="panel-title" style="color:#f44336">Error</div>
555
+ <div class="error-box" id="error-text"></div>
556
+ </div>
557
+ </div>
558
+
559
+ <!-- RIGHT: reward chart + episode table -->
560
+ <div class="right-panel">
561
+ <div style="flex:0 0 auto">
562
+ <div class="panel-title">Reward History</div>
563
+ </div>
564
+ <canvas id="reward-canvas"></canvas>
565
+ <div style="flex:0 0 auto">
566
+ <div class="panel-title">Episode Log</div>
567
+ </div>
568
+ <div class="episode-table">
569
+ <table>
570
+ <thead><tr>
571
+ <th>#</th><th>Steps</th><th>Reward</th><th>Outcome</th><th>Stage</th><th>Mode</th>
572
+ </tr></thead>
573
+ <tbody id="ep-tbody"></tbody>
574
+ </table>
575
+ </div>
576
+ </div>
577
+ </main>
578
+
579
+ <script>
580
+ // ── State ──────────────────────────────────────────────────────────────────
581
+ let state = { cars: [], reward_history: [], episode_history: [], stage: 1, reward_mode: 'capped' };
582
+
583
+ // ── Road canvas ────────────────────────────────────────────────────────────
584
+ const roadCanvas = document.getElementById('road-canvas');
585
+ const roadCtx = roadCanvas.getContext('2d');
586
+ const N_LANES = 3;
587
+ const LANE_H = 40;
588
+ const ROAD_PAD = 20;
589
+
590
+ function resizeRoad() {
591
+ roadCanvas.width = roadCanvas.offsetWidth;
592
+ roadCanvas.height = roadCanvas.offsetHeight;
593
+ }
594
+ resizeRoad();
595
+ window.addEventListener('resize', resizeRoad);
596
+
597
+ function laneY(lane) {
598
+ // lane 1..3, top = lane 1
599
+ const totalH = N_LANES * LANE_H;
600
+ const offsetY = (roadCanvas.height - totalH) / 2;
601
+ return offsetY + (lane - 1) * LANE_H + LANE_H / 2;
602
+ }
603
+
604
+ function carX(x, egoX) {
605
+ // Center ego at 30% of canvas
606
+ const w = roadCanvas.width;
607
+ const scale = 0.8; // pixels per unit
608
+ return w * 0.3 + (x - egoX) * scale;
609
+ }
610
+
611
+ function drawRoad() {
612
+ const w = roadCanvas.width, h = roadCanvas.height;
613
+ roadCtx.clearRect(0, 0, w, h);
614
+
615
+ // Road background
616
+ const totalH = N_LANES * LANE_H;
617
+ const offsetY = (h - totalH) / 2;
618
+ roadCtx.fillStyle = '#1a1a28';
619
+ roadCtx.fillRect(0, offsetY, w, totalH);
620
+
621
+ // Lane dividers
622
+ roadCtx.setLineDash([20, 15]);
623
+ roadCtx.strokeStyle = '#3a3a50';
624
+ roadCtx.lineWidth = 1;
625
+ for (let i = 1; i < N_LANES; i++) {
626
+ const y = offsetY + i * LANE_H;
627
+ roadCtx.beginPath();
628
+ roadCtx.moveTo(0, y);
629
+ roadCtx.lineTo(w, y);
630
+ roadCtx.stroke();
631
+ }
632
+ roadCtx.setLineDash([]);
633
+
634
+ // Road edges
635
+ roadCtx.strokeStyle = '#5a5a80';
636
+ roadCtx.lineWidth = 2;
637
+ roadCtx.beginPath(); roadCtx.moveTo(0, offsetY); roadCtx.lineTo(w, offsetY); roadCtx.stroke();
638
+ roadCtx.beginPath(); roadCtx.moveTo(0, offsetY + totalH); roadCtx.lineTo(w, offsetY + totalH); roadCtx.stroke();
639
+
640
+ // Cars
641
+ const egoX = state.ego_x || 0;
642
+ for (const car of state.cars || []) {
643
+ const cx = carX(car.x, egoX);
644
+ if (cx < -50 || cx > w + 50) continue;
645
+ const cy = laneY(car.lane);
646
+ const isEgo = car.car_id === 0;
647
+
648
+ // Car body
649
+ roadCtx.save();
650
+ roadCtx.translate(cx, cy);
651
+ const cw = 36, ch = 18;
652
+ roadCtx.fillStyle = isEgo ? '#2a5a9a' : '#3a2a1a';
653
+ roadCtx.strokeStyle = isEgo ? '#7eb8ff' : '#ff9800';
654
+ roadCtx.lineWidth = isEgo ? 2 : 1;
655
+ roadCtx.beginPath();
656
+ roadCtx.roundRect(-cw/2, -ch/2, cw, ch, 4);
657
+ roadCtx.fill();
658
+ roadCtx.stroke();
659
+
660
+ // Label
661
+ roadCtx.fillStyle = isEgo ? '#7eb8ff' : '#ff9800';
662
+ roadCtx.font = isEgo ? 'bold 9px Courier New' : '8px Courier New';
663
+ roadCtx.textAlign = 'center';
664
+ roadCtx.textBaseline = 'middle';
665
+ roadCtx.fillText(isEgo ? 'EGO' : `C${car.car_id}`, 0, 0);
666
+ roadCtx.restore();
667
+ }
668
+ }
669
+
670
+ // ── Reward chart ────────────────────────────────────────────────────────────
671
+ const rwCanvas = document.getElementById('reward-canvas');
672
+ const rwCtx = rwCanvas.getContext('2d');
673
+
674
+ function resizeRw() {
675
+ rwCanvas.width = rwCanvas.offsetWidth;
676
+ rwCanvas.height = rwCanvas.offsetHeight;
677
+ }
678
+ resizeRw();
679
+ window.addEventListener('resize', resizeRw);
680
+
681
+ function drawRewardChart() {
682
+ const w = rwCanvas.width, h = rwCanvas.height;
683
+ rwCtx.clearRect(0, 0, w, h);
684
+
685
+ const hist = state.reward_history || [];
686
+ if (hist.length < 2) {
687
+ rwCtx.fillStyle = '#667';
688
+ rwCtx.font = '12px Courier New';
689
+ rwCtx.textAlign = 'center';
690
+ rwCtx.fillText('Waiting for episodes...', w/2, h/2);
691
+ return;
692
+ }
693
+
694
+ const pad = { top: 10, right: 10, bottom: 30, left: 50 };
695
+ const pw = w - pad.left - pad.right;
696
+ const ph = h - pad.top - pad.bottom;
697
+
698
+ const minR = Math.min(...hist);
699
+ const maxR = Math.max(...hist);
700
+ const rangeR = maxR - minR || 1;
701
+
702
+ const xScale = pw / (hist.length - 1);
703
+ const yScale = ph / rangeR;
704
+
705
+ // Grid
706
+ rwCtx.strokeStyle = '#1e1e2e';
707
+ rwCtx.lineWidth = 1;
708
+ for (let i = 0; i <= 4; i++) {
709
+ const y = pad.top + ph * (i / 4);
710
+ rwCtx.beginPath(); rwCtx.moveTo(pad.left, y); rwCtx.lineTo(pad.left + pw, y); rwCtx.stroke();
711
+ const val = maxR - rangeR * (i / 4);
712
+ rwCtx.fillStyle = '#556';
713
+ rwCtx.font = '9px Courier New';
714
+ rwCtx.textAlign = 'right';
715
+ rwCtx.fillText(val.toFixed(1), pad.left - 4, y + 3);
716
+ }
717
+
718
+ // Zero line
719
+ if (minR < 0 && maxR > 0) {
720
+ const zy = pad.top + (maxR / rangeR) * ph;
721
+ rwCtx.strokeStyle = '#3a3a50';
722
+ rwCtx.lineWidth = 1;
723
+ rwCtx.setLineDash([4, 4]);
724
+ rwCtx.beginPath(); rwCtx.moveTo(pad.left, zy); rwCtx.lineTo(pad.left + pw, zy); rwCtx.stroke();
725
+ rwCtx.setLineDash([]);
726
+ }
727
+
728
+ // Moving average (window=10)
729
+ const MA = 10;
730
+ const ma = hist.map((_, i) => {
731
+ const sl = hist.slice(Math.max(0, i - MA + 1), i + 1);
732
+ return sl.reduce((a, b) => a + b, 0) / sl.length;
733
+ });
734
+
735
+ // Raw line
736
+ rwCtx.strokeStyle = 'rgba(126,184,255,0.25)';
737
+ rwCtx.lineWidth = 1;
738
+ rwCtx.beginPath();
739
+ hist.forEach((v, i) => {
740
+ const x = pad.left + i * xScale;
741
+ const y = pad.top + (maxR - v) * yScale;
742
+ i === 0 ? rwCtx.moveTo(x, y) : rwCtx.lineTo(x, y);
743
+ });
744
+ rwCtx.stroke();
745
+
746
+ // MA line
747
+ rwCtx.strokeStyle = '#7eb8ff';
748
+ rwCtx.lineWidth = 2;
749
+ rwCtx.beginPath();
750
+ ma.forEach((v, i) => {
751
+ const x = pad.left + i * xScale;
752
+ const y = pad.top + (maxR - v) * yScale;
753
+ i === 0 ? rwCtx.moveTo(x, y) : rwCtx.lineTo(x, y);
754
+ });
755
+ rwCtx.stroke();
756
+
757
+ // X axis label
758
+ rwCtx.fillStyle = '#556';
759
+ rwCtx.font = '9px Courier New';
760
+ rwCtx.textAlign = 'center';
761
+ rwCtx.fillText(`Episodes (${hist.length})`, pad.left + pw / 2, h - 6);
762
+ }
763
+
764
+ // ── Episode table ───────────────────────────────────────────────────────────
765
+ function updateEpisodeTable() {
766
+ const tbody = document.getElementById('ep-tbody');
767
+ const episodes = (state.episode_history || []).slice().reverse().slice(0, 50);
768
+ tbody.innerHTML = episodes.map(ep => `
769
+ <tr>
770
+ <td>${ep.episode}</td>
771
+ <td>${ep.steps}</td>
772
+ <td style="color:${ep.reward >= 0 ? '#4caf50' : '#f44336'}">${ep.reward.toFixed(2)}</td>
773
+ <td class="outcome-${ep.outcome}">${ep.outcome.toUpperCase()}</td>
774
+ <td>${ep.stage}</td>
775
+ <td style="color:#ff9800">${ep.mode || ep.reward_mode || 'capped'}</td>
776
+ </tr>
777
+ `).join('');
778
+ }
779
+
780
+ // ── Metric update ───────────────────────────────────────────────────────────
781
+ function updateMetrics(s) {
782
+ document.getElementById('m-steps').textContent = s.total_steps?.toLocaleString() || '0';
783
+ document.getElementById('m-sps').textContent = `${(s.steps_per_sec||0).toFixed(0)} sps`;
784
+ document.getElementById('m-eps').textContent = s.n_episodes || '0';
785
+ document.getElementById('m-eplen').textContent = `avg len: ${(s.mean_ep_len||0).toFixed(0)}`;
786
+ document.getElementById('m-reward').textContent = (s.mean_reward||0).toFixed(2);
787
+ document.getElementById('m-curr-r').textContent = `ep: ${(s.episode_reward||0).toFixed(2)}`;
788
+ document.getElementById('m-updates').textContent = s.n_updates || '0';
789
+ document.getElementById('pg-loss').textContent = s.pg_loss ?? 'β€”';
790
+ document.getElementById('vf-loss').textContent = s.vf_loss ?? 'β€”';
791
+ document.getElementById('entropy').textContent = s.entropy ?? 'β€”';
792
+ document.getElementById('stage-name').textContent = s.stage_name || 'Survival';
793
+
794
+ const stage = s.stage || 1;
795
+ for (let i = 1; i <= 4; i++) {
796
+ const pip = document.getElementById(`pip-${i}`);
797
+ pip.className = 'stage-pip' + (i < stage ? ' done' : (i === stage ? ' active' : ''));
798
+ }
799
+
800
+ // Mode buttons
801
+ const mode = s.reward_mode || 'capped';
802
+ document.getElementById('btn-capped').className = 'mode-btn' + (mode === 'capped' ? ' active' : '');
803
+ document.getElementById('btn-uncapped').className = 'mode-btn' + (mode === 'uncapped' ? ' active' : '');
804
+
805
+ if (s.error) {
806
+ document.getElementById('error-section').style.display = 'block';
807
+ document.getElementById('error-text').textContent = s.error;
808
+ document.getElementById('status-badge').textContent = 'ERROR';
809
+ document.getElementById('status-badge').className = 'badge badge-error';
810
+ }
811
+ }
812
+
813
+ // ── Mode switch ─────────────────────────────────────────────────────────────
814
+ function setMode(mode) {
815
+ fetch('/api/mode', {
816
+ method: 'POST',
817
+ headers: { 'Content-Type': 'application/json' },
818
+ body: JSON.stringify({ mode })
819
+ }).then(r => r.json()).then(d => {
820
+ state.reward_mode = d.mode;
821
+ updateMetrics(state);
822
+ });
823
+ }
824
+
825
+ // ── Poll /api/state every 2s (SSE as supplement) ───────────────────────────
826
+ function poll() {
827
+ fetch('/api/state').then(r => r.json()).then(s => {
828
+ Object.assign(state, s);
829
+ drawRoad();
830
+ drawRewardChart();
831
+ updateMetrics(s);
832
+ updateEpisodeTable();
833
+ }).catch(() => {});
834
+ }
835
+ setInterval(poll, 2000);
836
+ poll();
837
+
838
+ // ── SSE for fast episode events ─────────────────────────────────────────────
839
+ const evtSrc = new EventSource('/api/stream');
840
+ evtSrc.onmessage = (e) => {
841
+ try {
842
+ const msg = JSON.parse(e.data);
843
+ if (msg.type === 'episode') {
844
+ if (!state.episode_history) state.episode_history = [];
845
+ state.episode_history.push(msg.data);
846
+ if (!state.reward_history) state.reward_history = [];
847
+ state.reward_history.push(msg.data.reward);
848
+ updateEpisodeTable();
849
+ drawRewardChart();
850
+ } else if (msg.type === 'tick') {
851
+ Object.assign(state, msg.data);
852
+ drawRoad();
853
+ } else if (msg.type === 'update') {
854
+ Object.assign(state, msg.data);
855
+ updateMetrics(state);
856
+ }
857
+ } catch(err) {}
858
+ };
859
+
860
+ // Render loop for smooth road animation
861
+ function renderLoop() {
862
+ drawRoad();
863
+ requestAnimationFrame(renderLoop);
864
+ }
865
+ renderLoop();
866
+ </script>
867
+ </body>
868
+ </html>
869
+ """
870
+
871
+
872
+ @app.get("/", response_class=HTMLResponse)
873
+ @app.get("/web", response_class=HTMLResponse)
874
+ def dashboard():
875
+ return HTMLResponse(content=DASHBOARD_HTML)
876
 
877
 
878
  def main():
 
879
  import uvicorn
 
880
  uvicorn.run(app, host="0.0.0.0", port=8000)
881
 
882
 
server/overflow_environment.py CHANGED
@@ -15,18 +15,33 @@ from dataclasses import dataclass, field
15
  from typing import Any, List, Optional
16
  from uuid import uuid4
17
 
18
- from openenv.core.env_server.interfaces import Environment
19
- from openenv.core.env_server.types import State
20
-
21
- from ..models import (
22
- CarStateData,
23
- LaneOccupancyData,
24
- OverflowAction,
25
- OverflowObservation,
26
- OverflowState,
27
- Position,
28
- ProximityData,
29
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # --- Constants ---
32
  NUM_LANES = 3
@@ -253,6 +268,11 @@ class OverflowEnvironment(Environment):
253
  self._cars: List[Car] = []
254
  self._rng = random.Random()
255
  self._done = False
 
 
 
 
 
256
 
257
  def _build_observation(
258
  self,
@@ -332,7 +352,8 @@ class OverflowEnvironment(Environment):
332
  )
333
  )
334
 
335
- return self._build_observation(incident_report="", reward=0.0)
 
336
 
337
  def step(
338
  self,
@@ -347,6 +368,18 @@ class OverflowEnvironment(Environment):
347
  reward=0.0,
348
  )
349
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  self._state.step_count += 1
351
  reward = 0.0
352
  incidents = []
@@ -451,11 +484,12 @@ class OverflowEnvironment(Environment):
451
  "\n".join(incidents) if incidents else "Observer: No incidents this step."
452
  )
453
 
454
- return self._build_observation(
455
  incident_report=incident_report,
456
  reward=reward,
457
  proximities=proximity_list,
458
  )
 
459
 
460
  @property
461
  def state(self) -> OverflowState:
 
15
  from typing import Any, List, Optional
16
  from uuid import uuid4
17
 
18
+ try:
19
+ from openenv.core.env_server.interfaces import Environment
20
+ from openenv.core.env_server.types import State
21
+ except ImportError:
22
+ class Environment: # stub for training-only mode
23
+ pass
24
+ class State:
25
+ pass
26
+
27
+ try:
28
+ from ..models import (
29
+ CarStateData, LaneOccupancyData, OverflowAction,
30
+ OverflowObservation, OverflowState, Position, ProximityData,
31
+ )
32
+ from ..policies.flat_mlp_policy import FlatMLPPolicy
33
+ from ..policies.ticket_attention_policy import TicketAttentionPolicy
34
+ from ..policies.policy_spec import OBS_DIM
35
+ from .policy_adapter import overflow_obs_to_policy_obs, policy_action_to_decision
36
+ except ImportError:
37
+ from models import (
38
+ CarStateData, LaneOccupancyData, OverflowAction,
39
+ OverflowObservation, OverflowState, Position, ProximityData,
40
+ )
41
+ from policies.flat_mlp_policy import FlatMLPPolicy
42
+ from policies.ticket_attention_policy import TicketAttentionPolicy
43
+ from policies.policy_spec import OBS_DIM
44
+ from server.policy_adapter import overflow_obs_to_policy_obs, policy_action_to_decision
45
 
46
  # --- Constants ---
47
  NUM_LANES = 3
 
268
  self._cars: List[Car] = []
269
  self._rng = random.Random()
270
  self._done = False
271
+ self._last_obs: Optional[OverflowObservation] = None
272
+ self._policies = {
273
+ "flat_mlp": FlatMLPPolicy(obs_dim=OBS_DIM),
274
+ "ticket_attention": TicketAttentionPolicy(obs_dim=OBS_DIM),
275
+ }
276
 
277
  def _build_observation(
278
  self,
 
352
  )
353
  )
354
 
355
+ self._last_obs = self._build_observation(incident_report="", reward=0.0)
356
+ return self._last_obs
357
 
358
  def step(
359
  self,
 
368
  reward=0.0,
369
  )
370
 
371
+ # Policy intercept: decision="policy:flat_mlp" or "policy:ticket_attention"
372
+ if action.decision.startswith("policy:") and self._last_obs is not None:
373
+ policy_name = action.decision.split(":", 1)[1].lower()
374
+ if policy_name in self._policies:
375
+ obs_vec = overflow_obs_to_policy_obs(self._last_obs)
376
+ act_vec = self._policies[policy_name].predict(obs_vec)
377
+ decision, reasoning = policy_action_to_decision(act_vec)
378
+ action = OverflowAction(
379
+ decision=decision,
380
+ reasoning=f"[{policy_name}] {reasoning}",
381
+ )
382
+
383
  self._state.step_count += 1
384
  reward = 0.0
385
  incidents = []
 
484
  "\n".join(incidents) if incidents else "Observer: No incidents this step."
485
  )
486
 
487
+ self._last_obs = self._build_observation(
488
  incident_report=incident_report,
489
  reward=reward,
490
  proximities=proximity_list,
491
  )
492
+ return self._last_obs
493
 
494
  @property
495
  def state(self) -> OverflowState:
server/policy_adapter.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Adapter between OverflowObservation (2D road grid) and the OpenENV policy
3
+ observation format (ego state + ticket matrix).
4
+
5
+ Nearby cars are converted to collision_risk tickets so TicketAttentionPolicy
6
+ can reason about them using the same mechanism it was designed for.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import math
12
+ import numpy as np
13
+
14
+ try:
15
+ from ..policies.policy_spec import build_obs, build_ticket_vector, OBS_DIM
16
+ except ImportError:
17
+ from policies.policy_spec import build_obs, build_ticket_vector, OBS_DIM
18
+
19
+
20
+ def overflow_obs_to_policy_obs(obs) -> np.ndarray:
21
+ """OverflowObservation β†’ 603-dim numpy vector for our policies."""
22
+ cars = obs.cars
23
+ if not cars:
24
+ return np.zeros(OBS_DIM, dtype=np.float32)
25
+
26
+ ego = next((c for c in cars if c.carId == 0), cars[0])
27
+ ego_speed_ms = ego.speed / 4.5 # OverflowEnv speed units β†’ m/s
28
+ ego_x = ego.position.x
29
+ ego_y = (ego.lane - 2) * 3.7 # lane β†’ lateral metres
30
+
31
+ ticket_vectors = []
32
+ for car in cars:
33
+ if car.carId == 0:
34
+ continue
35
+ rel_x = car.position.x - ego.position.x
36
+ rel_y = (car.lane - ego.lane) * 3.7
37
+ car_spd = car.speed / 4.5
38
+ distance = math.sqrt(rel_x ** 2 + rel_y ** 2)
39
+ if distance > 80:
40
+ continue
41
+ closing = max(ego_speed_ms - car_spd * math.copysign(1, max(rel_x, 0.01)), 0.1)
42
+ ttc = min(distance / closing, 30.0)
43
+ severity = 1.0 if distance < 8 else (0.75 if distance < 15 else 0.5)
44
+ ticket_vectors.append(build_ticket_vector(
45
+ severity_weight=severity, ttl=5.0,
46
+ pos_x=rel_x, pos_y=rel_y, pos_z=0.0,
47
+ vel_x=car_spd, vel_y=0.0, vel_z=0.0,
48
+ heading=0.0,
49
+ size_length=4.0, size_width=2.0, size_height=1.5,
50
+ distance=distance, time_to_collision=ttc,
51
+ bearing=math.atan2(rel_y, max(rel_x, 0.01)),
52
+ ticket_type="collision_risk", entity_type="vehicle", confidence=1.0,
53
+ ))
54
+
55
+ tv = np.array(ticket_vectors, dtype=np.float32) if ticket_vectors else None
56
+ return build_obs(
57
+ ego_x=ego_x, ego_y=ego_y, ego_z=0.0,
58
+ ego_vx=ego_speed_ms, ego_vy=0.0,
59
+ heading=0.0, speed=ego_speed_ms,
60
+ steer=0.0, throttle=0.5, brake=0.0,
61
+ ticket_vectors=tv,
62
+ )
63
+
64
+
65
+ def policy_action_to_decision(action_vec: np.ndarray) -> tuple[str, str]:
66
+ """Continuous [steer, throttle, brake] β†’ (text decision, reasoning)."""
67
+ steer, throttle, brake = float(action_vec[0]), float(action_vec[1]), float(action_vec[2])
68
+ if abs(steer) > 0.35:
69
+ decision = "lane_change_left" if steer < 0 else "lane_change_right"
70
+ reasoning = f"steer={steer:.2f}: lateral avoidance"
71
+ elif brake > 0.25:
72
+ decision = "brake"
73
+ reasoning = f"brake={brake:.2f}: closing gap"
74
+ elif throttle > 0.20:
75
+ decision = "accelerate"
76
+ reasoning = f"throttle={throttle:.2f}: clear ahead"
77
+ else:
78
+ decision = "maintain"
79
+ reasoning = f"s={steer:.2f} t={throttle:.2f} b={brake:.2f}: holding course"
80
+ return decision, reasoning
training/__init__.py ADDED
File without changes
training/__pycache__/__init__.cpython-314.pyc ADDED
Binary file (169 Bytes). View file
 
training/__pycache__/curriculum.cpython-314.pyc ADDED
Binary file (5.69 kB). View file
 
training/__pycache__/overflow_gym_env.cpython-314.pyc ADDED
Binary file (9.75 kB). View file
 
training/__pycache__/ppo_trainer.cpython-314.pyc ADDED
Binary file (20.6 kB). View file
 
training/__pycache__/reward.cpython-314.pyc ADDED
Binary file (3.47 kB). View file
 
training/curriculum.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CurriculumManager β€” ported from openenv/training/curriculum.py.
3
+
4
+ Same 4-stage progression and same reward thresholds. Adapted for
5
+ OverflowEnvironment: no ticket injection (the env has its own scripted
6
+ NPCs), stages instead control training logging and advancement criteria.
7
+
8
+ Stage 1 No extra pressure. Goal: learn basic speed + lane keeping.
9
+ Stage 2 Standard traffic. Goal: survive without crashing.
10
+ Stage 3 Evaluate more. Goal: consistent goal-reaching.
11
+ Stage 4 Full evaluation. Goal: high mean reward over long window.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import dataclass
17
+ from typing import List
18
+
19
+
20
+ @dataclass
21
+ class StageConfig:
22
+ stage: int
23
+ name: str
24
+ description: str
25
+ advance_threshold: float # mean episode reward to advance
26
+ advance_window: int # consecutive episodes required
27
+
28
+
29
+ STAGES: List[StageConfig] = [
30
+ StageConfig(
31
+ stage=1, name="Survival",
32
+ description="Learn basic speed control and lane keeping.",
33
+ advance_threshold=50.0, advance_window=8,
34
+ ),
35
+ StageConfig(
36
+ stage=2, name="Crash Avoidance",
37
+ description="Navigate traffic without colliding.",
38
+ advance_threshold=120.0, advance_window=15,
39
+ ),
40
+ StageConfig(
41
+ stage=3, name="Goal Reaching",
42
+ description="Consistently reach the goal position.",
43
+ advance_threshold=200.0, advance_window=15,
44
+ ),
45
+ StageConfig(
46
+ stage=4, name="Mastery",
47
+ description="High reward, smooth driving, minimal near-misses.",
48
+ advance_threshold=280.0, advance_window=15,
49
+ ),
50
+ ]
51
+
52
+
53
+ class CurriculumManager:
54
+ """
55
+ Tracks stage progression based on episode rewards.
56
+ Same API as openenv CurriculumManager β€” PPOTrainer calls it unchanged.
57
+ """
58
+
59
+ def __init__(self, seed: int = 0):
60
+ self._stage_idx = 0
61
+ self._rewards: List[float] = []
62
+ self._auto_advance = True
63
+
64
+ @property
65
+ def current_stage(self) -> int:
66
+ return STAGES[self._stage_idx].stage
67
+
68
+ @property
69
+ def config(self) -> StageConfig:
70
+ return STAGES[self._stage_idx]
71
+
72
+ def step(self, sim_time: float) -> list:
73
+ """No ticket injection in OverflowEnvironment β€” always returns []."""
74
+ return []
75
+
76
+ def record_episode_reward(self, reward: float) -> bool:
77
+ """Record episode reward and advance stage if threshold met."""
78
+ self._rewards.append(reward)
79
+ cfg = self.config
80
+ window = self._rewards[-cfg.advance_window:]
81
+
82
+ if (
83
+ self._auto_advance
84
+ and len(window) >= cfg.advance_window
85
+ and sum(window) / len(window) >= cfg.advance_threshold
86
+ and self._stage_idx < len(STAGES) - 1
87
+ ):
88
+ self._stage_idx += 1
89
+ self._rewards = []
90
+ print(f"[Curriculum] Advanced to Stage {self.current_stage}: {self.config.name}")
91
+ return True
92
+ return False
93
+
94
+ def force_stage(self, stage: int) -> None:
95
+ idx = stage - 1
96
+ if 0 <= idx < len(STAGES):
97
+ self._stage_idx = idx
98
+ self._rewards = []
99
+ print(f"[Curriculum] Forced to Stage {stage}: {self.config.name}")
training/overflow_gym_env.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gymnasium wrapper around OverflowEnvironment.
3
+
4
+ Bridges the gap between OverflowEnvironment (text actions, structured obs)
5
+ and our PPO trainer (continuous actions, numeric obs vector).
6
+
7
+ Observation: 603-dim float32 vector (same layout as CarEnv3D β€” ego state +
8
+ collision-risk ticket matrix built from nearby cars)
9
+
10
+ Action: [steer, throttle, brake] all in [-1, 1]
11
+ β†’ mapped to text decision for OverflowEnvironment
12
+
13
+ This makes OverflowEnvironment a drop-in replacement for CarEnv3D so that
14
+ FlatMLPPolicy and TicketAttentionPolicy train with the exact same PPO loop.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import math
20
+ from typing import Any, Dict, Optional, Tuple
21
+
22
+ import numpy as np
23
+ import gymnasium as gym
24
+ from gymnasium import spaces
25
+
26
+ from ..server.overflow_environment import OverflowEnvironment
27
+ from ..models import OverflowAction
28
+ from ..policies.policy_spec import (
29
+ build_obs, build_ticket_vector, OBS_DIM,
30
+ )
31
+ from .reward import compute_reward
32
+
33
+
34
+ # ── Action mapping ────────────────────────────────────────────────────────────
35
+
36
+ def _action_to_decision(action: np.ndarray) -> str:
37
+ steer, throttle, brake = float(action[0]), float(action[1]), float(action[2])
38
+ if abs(steer) > 0.35:
39
+ return "lane_change_left" if steer < 0 else "lane_change_right"
40
+ if brake > 0.25:
41
+ return "brake"
42
+ if throttle > 0.20:
43
+ return "accelerate"
44
+ return "maintain"
45
+
46
+
47
+ # ── Observation extraction ────────────────────────────────────────────────────
48
+
49
+ def _obs_to_vector(overflow_obs) -> np.ndarray:
50
+ """OverflowObservation β†’ 603-dim numpy vector matching policy_spec layout."""
51
+ cars = overflow_obs.cars
52
+ if not cars:
53
+ return np.zeros(OBS_DIM, dtype=np.float32)
54
+
55
+ ego = next((c for c in cars if c.carId == 0), cars[0])
56
+ ego_speed_ms = ego.speed / 4.5
57
+ ego_x = ego.position.x
58
+ ego_y = (ego.lane - 2) * 3.7
59
+
60
+ ticket_vectors = []
61
+ for car in cars:
62
+ if car.carId == 0:
63
+ continue
64
+ rel_x = car.position.x - ego.position.x
65
+ rel_y = (car.lane - ego.lane) * 3.7
66
+ car_spd = car.speed / 4.5
67
+ distance = math.sqrt(rel_x ** 2 + rel_y ** 2)
68
+ if distance > 80:
69
+ continue
70
+ closing = max(ego_speed_ms - car_spd * math.copysign(1, max(rel_x, 0.01)), 0.1)
71
+ ttc = min(distance / closing, 30.0)
72
+ severity = 1.0 if distance < 8 else (0.75 if distance < 15 else 0.5)
73
+ ticket_vectors.append(build_ticket_vector(
74
+ severity_weight=severity, ttl=5.0,
75
+ pos_x=rel_x, pos_y=rel_y, pos_z=0.0,
76
+ vel_x=car_spd, vel_y=0.0, vel_z=0.0,
77
+ heading=0.0,
78
+ size_length=4.0, size_width=2.0, size_height=1.5,
79
+ distance=distance, time_to_collision=ttc,
80
+ bearing=math.atan2(rel_y, max(rel_x, 0.01)),
81
+ ticket_type="collision_risk", entity_type="vehicle", confidence=1.0,
82
+ ))
83
+
84
+ tv = np.array(ticket_vectors, dtype=np.float32) if ticket_vectors else None
85
+ return build_obs(
86
+ ego_x=ego_x, ego_y=ego_y, ego_z=0.0,
87
+ ego_vx=ego_speed_ms, ego_vy=0.0,
88
+ heading=0.0, speed=ego_speed_ms,
89
+ steer=0.0, throttle=0.5, brake=0.0,
90
+ ticket_vectors=tv,
91
+ )
92
+
93
+
94
+ # ── Gymnasium wrapper ─────────────────────────────────────────────────────────
95
+
96
+ class OverflowGymEnv(gym.Env):
97
+ """
98
+ Gymnasium-compatible wrapper around OverflowEnvironment.
99
+
100
+ Provides the same interface as CarEnv3D so PPOTrainer works unchanged.
101
+ """
102
+
103
+ metadata = {"render_modes": []}
104
+
105
+ def __init__(self):
106
+ super().__init__()
107
+ self._env = OverflowEnvironment()
108
+ self._last_overflow_obs = None
109
+ self._prev_action = np.zeros(3, dtype=np.float32)
110
+ self._sim_time = 0.0 # incremented each step (mirrors CarEnv3D._sim_time)
111
+ self._step_dt = 0.1 # seconds per step
112
+
113
+ self.observation_space = spaces.Box(
114
+ low=-1.0, high=1.0, shape=(OBS_DIM,), dtype=np.float32
115
+ )
116
+ self.action_space = spaces.Box(
117
+ low=-1.0, high=1.0, shape=(3,), dtype=np.float32
118
+ )
119
+
120
+ def reset(
121
+ self,
122
+ seed: Optional[int] = None,
123
+ options: Optional[Dict[str, Any]] = None,
124
+ ) -> Tuple[np.ndarray, Dict]:
125
+ super().reset(seed=seed)
126
+ self._last_overflow_obs = self._env.reset(seed=seed)
127
+ self._prev_action = np.zeros(3, dtype=np.float32)
128
+ self._sim_time = 0.0
129
+ return _obs_to_vector(self._last_overflow_obs), {}
130
+
131
+ def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, bool, Dict]:
132
+ decision = _action_to_decision(action)
133
+ overflow_action = OverflowAction(decision=decision, reasoning="")
134
+ overflow_obs = self._env.step(overflow_action)
135
+ self._last_overflow_obs = overflow_obs
136
+ self._sim_time += self._step_dt
137
+
138
+ obs_vec = _obs_to_vector(overflow_obs)
139
+
140
+ # Extract signals for reward shaping
141
+ ego = next((c for c in overflow_obs.cars if c.carId == 0), None)
142
+ ego_speed_ms = (ego.speed / 4.5) if ego else 0.0
143
+ ego_y = ((ego.lane - 2) * 3.7) if ego else 0.0
144
+
145
+ collision = any("CRASH" in p for p in (overflow_obs.incident_report or "").split("\n")
146
+ if "Car 0" in p)
147
+ goal_reached = overflow_obs.done and not collision
148
+
149
+ reward = compute_reward(
150
+ ego_speed = ego_speed_ms,
151
+ ego_y = ego_y,
152
+ action = action,
153
+ prev_action = self._prev_action,
154
+ collision = collision,
155
+ goal_reached = goal_reached,
156
+ near_miss = "NEAR MISS" in (overflow_obs.incident_report or ""),
157
+ raw_reward = overflow_obs.reward or 0.0,
158
+ )
159
+
160
+ self._prev_action = action.copy()
161
+
162
+ terminated = overflow_obs.done
163
+ truncated = False
164
+ info: Dict[str, Any] = {
165
+ "collision": collision,
166
+ "goal_reached": goal_reached,
167
+ "incident": overflow_obs.incident_report,
168
+ }
169
+
170
+ return obs_vec, reward, terminated, truncated, info
training/ppo_trainer.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PPO trainer β€” ported directly from openenv/training/ppo_trainer.py.
3
+
4
+ Same algorithm, same hyperparameters, same GAE implementation.
5
+ Only change: uses OverflowGymEnv instead of CarEnv3D.
6
+
7
+ Usage:
8
+ from overflow_env.training.ppo_trainer import run_training
9
+ run_training(policy_type="attention", total_steps=2_000_000)
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import time
15
+ from collections import deque
16
+ from pathlib import Path
17
+ from typing import Optional
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.optim as optim
23
+
24
+ from .overflow_gym_env import OverflowGymEnv
25
+ from .curriculum import CurriculumManager
26
+ from .reward import compute_episode_bonus
27
+ from ..policies.base_policy import BasePolicy
28
+ from ..policies.policy_spec import OBS_DIM
29
+
30
+
31
+ # ── Rollout buffer ─────────────────────────────────────────────────────────────
32
+ # Identical to openenv/training/ppo_trainer.py
33
+
34
+ class RolloutBuffer:
35
+ def __init__(self, n_steps: int, obs_dim: int, device: torch.device):
36
+ self.n = n_steps
37
+ self.obs = torch.zeros(n_steps, obs_dim, device=device)
38
+ self.acts = torch.zeros(n_steps, 3, device=device)
39
+ self.rew = torch.zeros(n_steps, device=device)
40
+ self.val = torch.zeros(n_steps, device=device)
41
+ self.logp = torch.zeros(n_steps, device=device)
42
+ self.done = torch.zeros(n_steps, device=device)
43
+ self.ptr = 0
44
+
45
+ def add(self, obs, act, rew, val, logp, done):
46
+ i = self.ptr
47
+ self.obs[i] = torch.as_tensor(obs, dtype=torch.float32)
48
+ self.acts[i] = torch.as_tensor(act, dtype=torch.float32)
49
+ self.rew[i] = float(rew)
50
+ self.val[i] = float(val)
51
+ self.logp[i] = float(logp)
52
+ self.done[i] = float(done)
53
+ self.ptr += 1
54
+
55
+ def full(self) -> bool:
56
+ return self.ptr >= self.n
57
+
58
+ def reset(self):
59
+ self.ptr = 0
60
+
61
+ def compute_returns(self, last_val: float, gamma: float, gae_lambda: float):
62
+ """Generalized Advantage Estimation β€” identical to openenv."""
63
+ adv = torch.zeros_like(self.rew)
64
+ gae = 0.0
65
+ for t in reversed(range(self.n)):
66
+ next_val = last_val if t == self.n - 1 else float(self.val[t + 1])
67
+ delta = self.rew[t] + gamma * next_val * (1 - self.done[t]) - self.val[t]
68
+ gae = delta + gamma * gae_lambda * (1 - self.done[t]) * gae
69
+ adv[t] = gae
70
+ self.ret = adv + self.val
71
+
72
+
73
+ # ── PPO Trainer ────────────────────────────────────────────────────────────────
74
+
75
+ class PPOTrainer:
76
+ """
77
+ Identical to openenv PPOTrainer β€” same hyperparameters, same PPO update.
78
+ Environment is OverflowGymEnv instead of CarEnv3D.
79
+ """
80
+
81
+ def __init__(
82
+ self,
83
+ policy: BasePolicy,
84
+ env: OverflowGymEnv,
85
+ curriculum: Optional[CurriculumManager] = None,
86
+ # PPO hyperparameters β€” same defaults as openenv
87
+ lr: float = 3e-4,
88
+ gamma: float = 0.99,
89
+ gae_lambda: float = 0.95,
90
+ clip_range: float = 0.2,
91
+ clip_range_vf: float = 0.2,
92
+ ent_coef: float = 0.02,
93
+ vf_coef: float = 0.5,
94
+ max_grad_norm: float = 0.5,
95
+ n_steps: int = 2048,
96
+ batch_size: int = 256,
97
+ n_epochs: int = 10,
98
+ save_dir: str = "checkpoints",
99
+ log_interval: int = 10,
100
+ device: str = "auto",
101
+ ):
102
+ self.policy = policy
103
+ self.env = env
104
+ self.curriculum = curriculum or CurriculumManager()
105
+ self.gamma = gamma
106
+ self.gae_lambda = gae_lambda
107
+ self.clip = clip_range
108
+ self.clip_vf = clip_range_vf
109
+ self.ent_coef = ent_coef
110
+ self.vf_coef = vf_coef
111
+ self.max_grad = max_grad_norm
112
+ self.n_steps = n_steps
113
+ self.batch_size = batch_size
114
+ self.n_epochs = n_epochs
115
+ self.log_every = log_interval
116
+ self.save_dir = Path(save_dir)
117
+ self.save_dir.mkdir(parents=True, exist_ok=True)
118
+
119
+ if device == "auto":
120
+ device = "cuda" if torch.cuda.is_available() else \
121
+ "mps" if torch.backends.mps.is_available() else "cpu"
122
+ self.device = torch.device(device)
123
+ self.policy.to(self.device)
124
+
125
+ self.optimizer = optim.Adam(policy.parameters(), lr=lr, eps=1e-5)
126
+ self.scheduler = optim.lr_scheduler.LinearLR(
127
+ self.optimizer, start_factor=1.0, end_factor=0.1, total_iters=500,
128
+ )
129
+
130
+ self.buffer = RolloutBuffer(n_steps, OBS_DIM, self.device)
131
+
132
+ self.ep_rewards = deque(maxlen=100)
133
+ self.ep_lengths = deque(maxlen=100)
134
+ self.total_steps = 0
135
+ self.n_updates = 0
136
+
137
+ # ── Main training loop ─────────────────────────────────────────────────────
138
+
139
+ def train(self, total_steps: int = 2_000_000) -> None:
140
+ print(f"\n{'='*70}", flush=True)
141
+ print(f" OpenENV PPO Training β€” policy={self.policy.__class__.__name__}", flush=True)
142
+ print(f" total_steps={total_steps} n_steps={self.n_steps} lr={self.optimizer.param_groups[0]['lr']:.0e}", flush=True)
143
+ print(f" gamma={self.gamma} gae_lambda={self.gae_lambda} clip={self.clip} ent_coef={self.ent_coef}", flush=True)
144
+ print(f"{'='*70}\n", flush=True)
145
+
146
+ obs, _ = self.env.reset()
147
+ ep_reward = 0.0
148
+ ep_steps = 0
149
+ t0 = time.time()
150
+
151
+ while self.total_steps < total_steps:
152
+ self.buffer.reset()
153
+ self.policy.eval()
154
+
155
+ # ── Collect rollout ──────────────────────────────────────────────
156
+ for _ in range(self.n_steps):
157
+ # Curriculum step (returns [] for OverflowEnv β€” kept for API compat)
158
+ self.curriculum.step(self.env._sim_time)
159
+
160
+ obs_t = torch.as_tensor(obs, dtype=torch.float32, device=self.device)
161
+ with torch.no_grad():
162
+ act_mean, val = self.policy(obs_t.unsqueeze(0))
163
+ act_mean = act_mean.squeeze(0)
164
+ val = val.squeeze(0)
165
+
166
+ dist = torch.distributions.Normal(act_mean, torch.ones_like(act_mean) * 0.3)
167
+ action = dist.sample().clamp(-1, 1)
168
+ logp = dist.log_prob(action).sum()
169
+
170
+ next_obs, reward, term, trunc, info = self.env.step(action.cpu().numpy())
171
+
172
+ self.buffer.add(
173
+ obs, action.cpu().numpy(), reward,
174
+ float(val), float(logp), float(term or trunc),
175
+ )
176
+
177
+ obs = next_obs
178
+ ep_reward += reward
179
+ ep_steps += 1
180
+ self.total_steps += 1
181
+
182
+ if term or trunc:
183
+ bonus = compute_episode_bonus(
184
+ total_steps=ep_steps,
185
+ survived=not info.get("collision", False),
186
+ )
187
+ ep_reward += bonus
188
+ self.ep_rewards.append(ep_reward)
189
+ self.ep_lengths.append(ep_steps)
190
+ advanced = self.curriculum.record_episode_reward(ep_reward)
191
+
192
+ outcome = "CRASH" if info.get("collision") else ("GOAL" if info.get("goal_reached") else "timeout")
193
+ print(
194
+ f" ep#{len(self.ep_rewards):>4d} | "
195
+ f"steps={ep_steps:>3d} | "
196
+ f"reward={ep_reward:>8.2f} | "
197
+ f"outcome={outcome:<8} | "
198
+ f"stage={self.curriculum.current_stage} | "
199
+ f"total_steps={self.total_steps}",
200
+ flush=True,
201
+ )
202
+
203
+ obs, _ = self.env.reset()
204
+ ep_reward = 0.0
205
+ ep_steps = 0
206
+
207
+ # ── PPO update ───────────────────────────────────────────────────
208
+ with torch.no_grad():
209
+ obs_t = torch.as_tensor(obs, dtype=torch.float32, device=self.device)
210
+ _, last_val = self.policy(obs_t.unsqueeze(0))
211
+ self.buffer.compute_returns(float(last_val), self.gamma, self.gae_lambda)
212
+
213
+ self.policy.train()
214
+ self._ppo_update()
215
+ self.n_updates += 1
216
+ self.scheduler.step()
217
+
218
+ elapsed = time.time() - t0
219
+ sps = self.total_steps / max(elapsed, 1)
220
+ mean_r = np.mean(self.ep_rewards) if self.ep_rewards else 0.0
221
+ mean_l = np.mean(self.ep_lengths) if self.ep_lengths else 0.0
222
+ print(
223
+ f"\n[PPO update #{self.n_updates}] "
224
+ f"step={self.total_steps} "
225
+ f"mean_reward={mean_r:.2f} "
226
+ f"mean_ep_len={mean_l:.0f} "
227
+ f"stage={self.curriculum.current_stage} "
228
+ f"sps={sps:.0f}\n",
229
+ flush=True,
230
+ )
231
+
232
+ # ── Checkpoint ───────────────────────────────────────────────────
233
+ if self.n_updates % 50 == 0:
234
+ ckpt = self.save_dir / f"policy_step{self.total_steps}_stage{self.curriculum.current_stage}.pt"
235
+ torch.save({
236
+ "step": self.total_steps,
237
+ "stage": self.curriculum.current_stage,
238
+ "policy": self.policy.state_dict(),
239
+ "optim": self.optimizer.state_dict(),
240
+ }, ckpt)
241
+ print(f"[PPO] Saved checkpoint β†’ {ckpt}")
242
+
243
+ # ── PPO update pass β€” identical to openenv ─────────────────────────────────
244
+
245
+ def _ppo_update(self):
246
+ obs = self.buffer.obs
247
+ acts = self.buffer.acts
248
+ old_logp = self.buffer.logp
249
+ adv = self.buffer.ret - self.buffer.val
250
+ adv = (adv - adv.mean()) / (adv.std() + 1e-8)
251
+ ret = self.buffer.ret
252
+ old_val = self.buffer.val
253
+
254
+ indices = torch.randperm(self.n_steps, device=self.device)
255
+
256
+ for _ in range(self.n_epochs):
257
+ for start in range(0, self.n_steps, self.batch_size):
258
+ idx = indices[start: start + self.batch_size]
259
+
260
+ act_mean, val = self.policy(obs[idx])
261
+ val = val.squeeze(-1)
262
+
263
+ dist = torch.distributions.Normal(act_mean, torch.ones_like(act_mean) * 0.3)
264
+ logp = dist.log_prob(acts[idx]).sum(dim=-1)
265
+ entropy = dist.entropy().sum(dim=-1).mean()
266
+
267
+ ratio = torch.exp(logp - old_logp[idx])
268
+ pg_loss1 = -adv[idx] * ratio
269
+ pg_loss2 = -adv[idx] * ratio.clamp(1 - self.clip, 1 + self.clip)
270
+ pg_loss = torch.max(pg_loss1, pg_loss2).mean()
271
+
272
+ val_unclipped = (val - ret[idx]) ** 2
273
+ val_clipped = (
274
+ old_val[idx]
275
+ + (val - old_val[idx]).clamp(-self.clip_vf, self.clip_vf)
276
+ - ret[idx]
277
+ ) ** 2
278
+ vf_loss = 0.5 * torch.max(val_unclipped, val_clipped).mean()
279
+
280
+ loss = pg_loss + self.vf_coef * vf_loss - self.ent_coef * entropy
281
+
282
+ self.optimizer.zero_grad()
283
+ loss.backward()
284
+ nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad)
285
+ self.optimizer.step()
286
+
287
+
288
+ # ── Entry point ────────────────────────────────────────────────────────────────
289
+
290
+ def run_training(
291
+ policy_type: str = "attention",
292
+ total_steps: int = 2_000_000,
293
+ start_stage: int = 1,
294
+ checkpoint: Optional[str] = None,
295
+ device: str = "auto",
296
+ ) -> None:
297
+ from ..policies.ticket_attention_policy import TicketAttentionPolicy
298
+ from ..policies.flat_mlp_policy import FlatMLPPolicy
299
+
300
+ policy_map = {
301
+ "attention": lambda: TicketAttentionPolicy(obs_dim=OBS_DIM),
302
+ "mlp": lambda: FlatMLPPolicy(obs_dim=OBS_DIM),
303
+ }
304
+ policy = policy_map[policy_type]()
305
+
306
+ if checkpoint:
307
+ ckpt = torch.load(checkpoint, map_location="cpu")
308
+ policy.load_state_dict(ckpt["policy"])
309
+ print(f"[PPO] Loaded checkpoint from {checkpoint}")
310
+
311
+ env = OverflowGymEnv()
312
+ cm = CurriculumManager()
313
+ if start_stage > 1:
314
+ cm.force_stage(start_stage)
315
+
316
+ trainer = PPOTrainer(policy=policy, env=env, curriculum=cm, device=device, n_steps=512)
317
+ trainer.train(total_steps=total_steps)
318
+
319
+
320
+ if __name__ == "__main__":
321
+ import argparse
322
+ p = argparse.ArgumentParser()
323
+ p.add_argument("--policy", default="attention", choices=["attention", "mlp"])
324
+ p.add_argument("--steps", default=2_000_000, type=int)
325
+ p.add_argument("--stage", default=1, type=int)
326
+ p.add_argument("--checkpoint", default=None)
327
+ p.add_argument("--device", default="auto")
328
+ args = p.parse_args()
329
+ run_training(args.policy, args.steps, args.stage, args.checkpoint, args.device)
training/reward.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reward shaping for OverflowEnvironment β€” ported from openenv/training/reward.py.
3
+
4
+ Same core principle: BASE + THREAT_RESPONSE with clear gradient direction.
5
+ Adapted to OverflowEnvironment's signals (no EventTicket objects β€” uses
6
+ collision/near-miss flags and raw reward from the environment).
7
+
8
+ BASE: survival + speed + lane ~+0.4/step
9
+ COLLISION: -50 (terminal)
10
+ NEAR MISS: -0.8 per event
11
+ GOAL REACHED: +5.0 (terminal bonus)
12
+ SMOOTH DRIVING: small bonus when no threats
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import numpy as np
18
+
19
+ # ── Same weights as openenv/training/reward.py ────────────────────────────────
20
+
21
+ W_ALIVE = 0.40
22
+ W_SPEED = 0.10
23
+ W_LANE = 0.15
24
+ W_SMOOTH = 0.03
25
+ TARGET_SPEED = 11.0 # m/s (~40 km/h)
26
+ TARGET_SPEED_TOL = 3.0
27
+
28
+ W_COLLISION = -50.0
29
+ W_NEAR_MISS = -0.8
30
+ W_GOAL = 5.0
31
+ W_SURVIVE_BONUS = 5.0
32
+
33
+ ROAD_HALF_WIDTH = 3.7 * 1.5 # ~2.5 lanes worth of tolerance
34
+
35
+
36
+ def compute_reward(
37
+ ego_speed: float,
38
+ ego_y: float,
39
+ action: np.ndarray,
40
+ prev_action: np.ndarray,
41
+ collision: bool,
42
+ goal_reached: bool,
43
+ near_miss: bool,
44
+ raw_reward: float, # OverflowEnvironment's built-in reward (used as baseline)
45
+ ) -> float:
46
+ """
47
+ Shaped reward. Mirrors openenv reward structure:
48
+ - collision β†’ large terminal penalty
49
+ - base survival + speed + lane keeping
50
+ - near-miss penalty
51
+ - goal bonus
52
+ - smooth driving bonus when clear
53
+ """
54
+ if collision:
55
+ return W_COLLISION
56
+
57
+ reward = 0.0
58
+
59
+ # 1. Survival
60
+ reward += W_ALIVE
61
+
62
+ # 2. Speed maintenance (same formula as openenv)
63
+ speed_err = abs(ego_speed - TARGET_SPEED)
64
+ if speed_err < TARGET_SPEED_TOL:
65
+ reward += W_SPEED * (1.0 - speed_err / TARGET_SPEED_TOL)
66
+ else:
67
+ reward -= 0.03 * min(speed_err - TARGET_SPEED_TOL, 5.0)
68
+
69
+ # 3. Lane keeping
70
+ norm_y = abs(ego_y) / ROAD_HALF_WIDTH
71
+ reward += W_LANE * max(0.0, 1.0 - norm_y ** 2)
72
+
73
+ # 4. Near miss penalty
74
+ if near_miss:
75
+ reward += W_NEAR_MISS
76
+
77
+ # 5. Goal bonus
78
+ if goal_reached:
79
+ reward += W_GOAL
80
+
81
+ # 6. Smooth driving
82
+ action_delta = np.abs(action - prev_action).sum()
83
+ reward += W_SMOOTH * max(0.0, 1.0 - action_delta * 3.0)
84
+
85
+ return float(reward)
86
+
87
+
88
+ def compute_episode_bonus(total_steps: int, survived: bool) -> float:
89
+ """End-of-episode bonus β€” same as openenv."""
90
+ if not survived:
91
+ return 0.0
92
+ bonus = W_SURVIVE_BONUS
93
+ bonus += min(total_steps, 500) * 0.02 # longevity reward
94
+ return float(bonus)