V1vex commited on
Commit
625b444
·
0 Parent(s):

first-commit

Browse files
Files changed (49) hide show
  1. AI-Trainer +1 -0
  2. DesktopRL Environment.txt +8 -0
  3. README.md +83 -0
  4. __pycache__/inference.cpython-312.pyc +0 -0
  5. __pycache__/inference.cpython-314.pyc +0 -0
  6. ai_contest_arena/.dockerignore +44 -0
  7. ai_contest_arena/Dockerfile +80 -0
  8. ai_contest_arena/README.md +255 -0
  9. ai_contest_arena/__init__.py +16 -0
  10. ai_contest_arena/__pycache__/__init__.cpython-312.pyc +0 -0
  11. ai_contest_arena/__pycache__/client.cpython-312.pyc +0 -0
  12. ai_contest_arena/__pycache__/models.cpython-312.pyc +0 -0
  13. ai_contest_arena/client.py +99 -0
  14. ai_contest_arena/models.py +35 -0
  15. ai_contest_arena/openenv.yaml +7 -0
  16. ai_contest_arena/openenv_ai_contest_arena.egg-info/PKG-INFO +11 -0
  17. ai_contest_arena/openenv_ai_contest_arena.egg-info/SOURCES.txt +17 -0
  18. ai_contest_arena/openenv_ai_contest_arena.egg-info/dependency_links.txt +1 -0
  19. ai_contest_arena/openenv_ai_contest_arena.egg-info/entry_points.txt +2 -0
  20. ai_contest_arena/openenv_ai_contest_arena.egg-info/requires.txt +7 -0
  21. ai_contest_arena/openenv_ai_contest_arena.egg-info/top_level.txt +1 -0
  22. ai_contest_arena/pyproject.toml +34 -0
  23. ai_contest_arena/pyrightconfig.json +8 -0
  24. ai_contest_arena/server/__init__.py +11 -0
  25. ai_contest_arena/server/__pycache__/__init__.cpython-312.pyc +0 -0
  26. ai_contest_arena/server/__pycache__/ai_contest_arena_environment.cpython-312.pyc +0 -0
  27. ai_contest_arena/server/__pycache__/ai_contest_arena_environment.cpython-314.pyc +0 -0
  28. ai_contest_arena/server/__pycache__/app.cpython-312.pyc +0 -0
  29. ai_contest_arena/server/ai_contest_arena_environment.py +212 -0
  30. ai_contest_arena/server/app.py +56 -0
  31. ai_contest_arena/server/requirements.txt +4 -0
  32. ai_contest_arena/task.json +17 -0
  33. ai_contest_arena/uv.lock +0 -0
  34. ai_server_admin/Dockerfile +80 -0
  35. ai_server_admin/README.md +255 -0
  36. ai_server_admin/__init__.py +16 -0
  37. ai_server_admin/client.py +99 -0
  38. ai_server_admin/models.py +27 -0
  39. ai_server_admin/openenv.yaml +7 -0
  40. ai_server_admin/pyproject.toml +45 -0
  41. ai_server_admin/pyrightconfig.json +8 -0
  42. ai_server_admin/server/__init__.py +11 -0
  43. ai_server_admin/server/ai_server_admin_environment.py +62 -0
  44. ai_server_admin/server/app.py +84 -0
  45. ai_server_admin/server/requirements.txt +6 -0
  46. ai_server_admin/uv.lock +0 -0
  47. hackathon_submission.zip +0 -0
  48. inference.py +86 -0
  49. requirements.txt +3 -0
AI-Trainer ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit f3f4b7149046a184ad5e5495f093ff66f9ebb8bf
DesktopRL Environment.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ cd "C:\Users\Vivek\Desktop\RL Environment\ai_contest_arena"
2
+ docker build -t ai_contest_arena-env:latest .
3
+ docker run -d -p 8000:8000 --name contest_arena ai_contest_arena-env:latest
4
+
5
+ $env:API_BASE_URL = "https://router.huggingface.co/v1"
6
+ $env:HF_TOKEN = "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"
7
+ $env:MODEL_NAME = "google/gemma-4-31B-it:novita"
8
+ python inference.py
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Meta OpenEnv Hackathon — AI Agent Submission
2
+
3
+ ## Overview
4
+
5
+ This project is a production-ready AI agent built for the **Meta OpenEnv Hackathon**. The agent competes in a 3-round coding challenge against an OpenAI GPT-4o-mini judge, receiving Python coding tasks and submitting solutions for evaluation.
6
+
7
+ ---
8
+
9
+ ## Agent: `inference.py`
10
+
11
+ The core agent logic lives entirely in `inference.py`. It runs a 3-round loop that:
12
+
13
+ 1. Calls the local OpenEnv server's `/reset` endpoint to fetch a new coding task
14
+ 2. Sends the task to a powerful LLM to generate a Python solution
15
+ 3. Submits the solution back to the `/step` endpoint and retrieves the score
16
+
17
+ ### API Payload Compatibility
18
+
19
+ The agent uses the exact payload format required by the Meta OpenEnv REST API:
20
+
21
+ ```json
22
+ {"action": {"answer": "<agent_answer>"}}
23
+ ```
24
+
25
+ This nested structure matches the server's strict Pydantic model validation, completely preventing `422 Unprocessable Entity` errors that arise from incorrect top-level keys or flat payload formats.
26
+
27
+ ---
28
+
29
+ ## Model: Qwen/Qwen2.5-72B-Instruct
30
+
31
+ The agent uses **`Qwen/Qwen2.5-72B-Instruct`** via the **Hugging Face Router API** (`https://router.huggingface.co/v1`), accessed through an OpenAI-compatible client.
32
+
33
+ Key reasons for this choice:
34
+
35
+ - The HF Router endpoint is the current active inference gateway — the legacy `api-inference.huggingface.co/v1` endpoint has been deprecated and returns `410 Gone` errors
36
+ - Qwen 2.5 72B delivers state-of-the-art Python code generation, significantly outperforming smaller models on structured coding tasks
37
+ - The OpenAI-compatible interface ensures zero-downtime, stable connections with clean error handling
38
+
39
+ ---
40
+
41
+ ## Task Parsing
42
+
43
+ The agent safely parses tasks from multiple possible server response shapes:
44
+
45
+ - `response["observation"]["echoed_message"]`
46
+ - `response["observation"]["task_prompt"]`
47
+ - `response["echoed_message"]`
48
+ - Raw JSON fallback via `json.dumps(resp)`
49
+
50
+ This makes the agent resilient to any OpenEnv server version or configuration.
51
+
52
+ ---
53
+
54
+ ## Setup
55
+
56
+ ### 1. Install dependencies
57
+
58
+ ```bash
59
+ pip install -r requirements.txt
60
+ ```
61
+
62
+ ### 2. Set environment variables
63
+
64
+ ```powershell
65
+ $env:HF_TOKEN = "hf_your_token_here"
66
+ $env:OPENAI_API_KEY = "sk-your_key_here"
67
+ ```
68
+
69
+ ### 3. Start the OpenEnv server, then run the agent
70
+
71
+ ```bash
72
+ python inference.py
73
+ ```
74
+
75
+ ---
76
+
77
+ ## Requirements
78
+
79
+ | Package | Version |
80
+ |--------------|----------|
81
+ | requests | 2.31.0 |
82
+ | openai | 1.14.0 |
83
+ | pydantic | 2.6.4 |
__pycache__/inference.cpython-312.pyc ADDED
Binary file (3.78 kB). View file
 
__pycache__/inference.cpython-314.pyc ADDED
Binary file (5.03 kB). View file
 
ai_contest_arena/.dockerignore ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python virtual environments (Windows & Unix)
2
+ .venv/
3
+ venv/
4
+ env/
5
+ ENV/
6
+ .env/
7
+
8
+ # uv cache
9
+ .uv/
10
+
11
+ # Python cache
12
+ __pycache__/
13
+ *.py[cod]
14
+ *.pyo
15
+ *.pyd
16
+ .Python
17
+
18
+ # Distribution / packaging
19
+ *.egg-info/
20
+ dist/
21
+ build/
22
+ *.egg
23
+
24
+ # Test & coverage artifacts
25
+ .pytest_cache/
26
+ .coverage
27
+ htmlcov/
28
+
29
+ # IDE / editor
30
+ .vscode/
31
+ .idea/
32
+ *.swp
33
+ *.swo
34
+
35
+ # OS artefacts
36
+ .DS_Store
37
+ Thumbs.db
38
+
39
+ # Git
40
+ .git/
41
+ .gitignore
42
+
43
+ # Inference script (runs outside the container)
44
+ inference.py
ai_contest_arena/Dockerfile ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Multi-stage build using openenv-base
8
+ # This Dockerfile is flexible and works for both:
9
+ # - In-repo environments (with local OpenEnv sources)
10
+ # - Standalone environments (with openenv from PyPI/Git)
11
+ # The build script (openenv build) handles context detection and sets appropriate build args.
12
+
13
+ ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
14
+ FROM ${BASE_IMAGE} AS builder
15
+
16
+ WORKDIR /app
17
+
18
+ # Ensure git is available (required for installing dependencies from VCS)
19
+ RUN apt-get update && \
20
+ apt-get install -y --no-install-recommends git && \
21
+ rm -rf /var/lib/apt/lists/*
22
+
23
+ # Build argument to control whether we're building standalone or in-repo
24
+ ARG BUILD_MODE=in-repo
25
+ ARG ENV_NAME=ai_contest_arena
26
+
27
+ # Copy environment code (always at root of build context)
28
+ COPY . /app/env
29
+
30
+ # For in-repo builds, openenv is already vendored in the build context
31
+ # For standalone builds, openenv will be installed via pyproject.toml
32
+ WORKDIR /app/env
33
+
34
+ # Ensure uv is available (for local builds where base image lacks it)
35
+ RUN if ! command -v uv >/dev/null 2>&1; then \
36
+ curl -LsSf https://astral.sh/uv/install.sh | sh && \
37
+ mv /root/.local/bin/uv /usr/local/bin/uv && \
38
+ mv /root/.local/bin/uvx /usr/local/bin/uvx; \
39
+ fi
40
+
41
+ # Install dependencies using uv sync
42
+ # If uv.lock exists, use it; otherwise resolve on the fly
43
+ RUN --mount=type=cache,target=/root/.cache/uv \
44
+ if [ -f uv.lock ]; then \
45
+ uv sync --frozen --no-install-project --no-editable; \
46
+ else \
47
+ uv sync --no-install-project --no-editable; \
48
+ fi
49
+
50
+ RUN --mount=type=cache,target=/root/.cache/uv \
51
+ if [ -f uv.lock ]; then \
52
+ uv sync --frozen --no-editable; \
53
+ else \
54
+ uv sync --no-editable; \
55
+ fi
56
+
57
+ # Final runtime stage
58
+ FROM ${BASE_IMAGE}
59
+
60
+ WORKDIR /app
61
+
62
+ # Copy the virtual environment from builder
63
+ COPY --from=builder /app/env/.venv /app/.venv
64
+
65
+ # Copy the environment code
66
+ COPY --from=builder /app/env /app/env
67
+
68
+ # Set PATH to use the virtual environment
69
+ ENV PATH="/app/.venv/bin:$PATH"
70
+
71
+ # Set PYTHONPATH so imports work correctly
72
+ ENV PYTHONPATH="/app/env:$PYTHONPATH"
73
+
74
+ # Health check
75
+ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
76
+ CMD curl -f http://localhost:8000/health || exit 1
77
+
78
+ # Run the FastAPI server
79
+ # The module path is constructed to work with the /app/env structure
80
+ CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
ai_contest_arena/README.md ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Ai Contest Arena Environment Server
3
+ emoji: 🎻
4
+ colorFrom: indigo
5
+ colorTo: red
6
+ sdk: docker
7
+ pinned: false
8
+ app_port: 8000
9
+ base_path: /web
10
+ tags:
11
+ - openenv
12
+ ---
13
+
14
+ # Ai Contest Arena Environment
15
+
16
+ A simple test environment that echoes back messages. Perfect for testing the env APIs as well as demonstrating environment usage patterns.
17
+
18
+ ## Quick Start
19
+
20
+ The simplest way to use the Ai Contest Arena environment is through the `AiContestArenaEnv` class:
21
+
22
+ ```python
23
+ from ai_contest_arena import AiContestArenaAction, AiContestArenaEnv
24
+
25
+ try:
26
+ # Create environment from Docker image
27
+ ai_contest_arenaenv = AiContestArenaEnv.from_docker_image("ai_contest_arena-env:latest")
28
+
29
+ # Reset
30
+ result = ai_contest_arenaenv.reset()
31
+ print(f"Reset: {result.observation.echoed_message}")
32
+
33
+ # Send multiple messages
34
+ messages = ["Hello, World!", "Testing echo", "Final message"]
35
+
36
+ for msg in messages:
37
+ result = ai_contest_arenaenv.step(AiContestArenaAction(message=msg))
38
+ print(f"Sent: '{msg}'")
39
+ print(f" → Echoed: '{result.observation.echoed_message}'")
40
+ print(f" → Length: {result.observation.message_length}")
41
+ print(f" → Reward: {result.reward}")
42
+
43
+ finally:
44
+ # Always clean up
45
+ ai_contest_arenaenv.close()
46
+ ```
47
+
48
+ That's it! The `AiContestArenaEnv.from_docker_image()` method handles:
49
+ - Starting the Docker container
50
+ - Waiting for the server to be ready
51
+ - Connecting to the environment
52
+ - Container cleanup when you call `close()`
53
+
54
+ ## Building the Docker Image
55
+
56
+ Before using the environment, you need to build the Docker image:
57
+
58
+ ```bash
59
+ # From project root
60
+ docker build -t ai_contest_arena-env:latest -f server/Dockerfile .
61
+ ```
62
+
63
+ ## Deploying to Hugging Face Spaces
64
+
65
+ You can easily deploy your OpenEnv environment to Hugging Face Spaces using the `openenv push` command:
66
+
67
+ ```bash
68
+ # From the environment directory (where openenv.yaml is located)
69
+ openenv push
70
+
71
+ # Or specify options
72
+ openenv push --namespace my-org --private
73
+ ```
74
+
75
+ The `openenv push` command will:
76
+ 1. Validate that the directory is an OpenEnv environment (checks for `openenv.yaml`)
77
+ 2. Prepare a custom build for Hugging Face Docker space (enables web interface)
78
+ 3. Upload to Hugging Face (ensuring you're logged in)
79
+
80
+ ### Prerequisites
81
+
82
+ - Authenticate with Hugging Face: The command will prompt for login if not already authenticated
83
+
84
+ ### Options
85
+
86
+ - `--directory`, `-d`: Directory containing the OpenEnv environment (defaults to current directory)
87
+ - `--repo-id`, `-r`: Repository ID in format 'username/repo-name' (defaults to 'username/env-name' from openenv.yaml)
88
+ - `--base-image`, `-b`: Base Docker image to use (overrides Dockerfile FROM)
89
+ - `--private`: Deploy the space as private (default: public)
90
+
91
+ ### Examples
92
+
93
+ ```bash
94
+ # Push to your personal namespace (defaults to username/env-name from openenv.yaml)
95
+ openenv push
96
+
97
+ # Push to a specific repository
98
+ openenv push --repo-id my-org/my-env
99
+
100
+ # Push with a custom base image
101
+ openenv push --base-image ghcr.io/meta-pytorch/openenv-base:latest
102
+
103
+ # Push as a private space
104
+ openenv push --private
105
+
106
+ # Combine options
107
+ openenv push --repo-id my-org/my-env --base-image custom-base:latest --private
108
+ ```
109
+
110
+ After deployment, your space will be available at:
111
+ `https://huggingface.co/spaces/<repo-id>`
112
+
113
+ The deployed space includes:
114
+ - **Web Interface** at `/web` - Interactive UI for exploring the environment
115
+ - **API Documentation** at `/docs` - Full OpenAPI/Swagger interface
116
+ - **Health Check** at `/health` - Container health monitoring
117
+ - **WebSocket** at `/ws` - Persistent session endpoint for low-latency interactions
118
+
119
+ ## Environment Details
120
+
121
+ ### Action
122
+ **AiContestArenaAction**: Contains a single field
123
+ - `message` (str) - The message to echo back
124
+
125
+ ### Observation
126
+ **AiContestArenaObservation**: Contains the echo response and metadata
127
+ - `echoed_message` (str) - The message echoed back
128
+ - `message_length` (int) - Length of the message
129
+ - `reward` (float) - Reward based on message length (length × 0.1)
130
+ - `done` (bool) - Always False for echo environment
131
+ - `metadata` (dict) - Additional info like step count
132
+
133
+ ### Reward
134
+ The reward is calculated as: `message_length × 0.1`
135
+ - "Hi" → reward: 0.2
136
+ - "Hello, World!" → reward: 1.3
137
+ - Empty message → reward: 0.0
138
+
139
+ ## Advanced Usage
140
+
141
+ ### Connecting to an Existing Server
142
+
143
+ If you already have a Ai Contest Arena environment server running, you can connect directly:
144
+
145
+ ```python
146
+ from ai_contest_arena import AiContestArenaEnv
147
+
148
+ # Connect to existing server
149
+ ai_contest_arenaenv = AiContestArenaEnv(base_url="<ENV_HTTP_URL_HERE>")
150
+
151
+ # Use as normal
152
+ result = ai_contest_arenaenv.reset()
153
+ result = ai_contest_arenaenv.step(AiContestArenaAction(message="Hello!"))
154
+ ```
155
+
156
+ Note: When connecting to an existing server, `ai_contest_arenaenv.close()` will NOT stop the server.
157
+
158
+ ### Using the Context Manager
159
+
160
+ The client supports context manager usage for automatic connection management:
161
+
162
+ ```python
163
+ from ai_contest_arena import AiContestArenaAction, AiContestArenaEnv
164
+
165
+ # Connect with context manager (auto-connects and closes)
166
+ with AiContestArenaEnv(base_url="http://localhost:8000") as env:
167
+ result = env.reset()
168
+ print(f"Reset: {result.observation.echoed_message}")
169
+ # Multiple steps with low latency
170
+ for msg in ["Hello", "World", "!"]:
171
+ result = env.step(AiContestArenaAction(message=msg))
172
+ print(f"Echoed: {result.observation.echoed_message}")
173
+ ```
174
+
175
+ The client uses WebSocket connections for:
176
+ - **Lower latency**: No HTTP connection overhead per request
177
+ - **Persistent session**: Server maintains your environment state
178
+ - **Efficient for episodes**: Better for many sequential steps
179
+
180
+ ### Concurrent WebSocket Sessions
181
+
182
+ The server supports multiple concurrent WebSocket connections. To enable this,
183
+ modify `server/app.py` to use factory mode:
184
+
185
+ ```python
186
+ # In server/app.py - use factory mode for concurrent sessions
187
+ app = create_app(
188
+ AiContestArenaEnvironment, # Pass class, not instance
189
+ AiContestArenaAction,
190
+ AiContestArenaObservation,
191
+ max_concurrent_envs=4, # Allow 4 concurrent sessions
192
+ )
193
+ ```
194
+
195
+ Then multiple clients can connect simultaneously:
196
+
197
+ ```python
198
+ from ai_contest_arena import AiContestArenaAction, AiContestArenaEnv
199
+ from concurrent.futures import ThreadPoolExecutor
200
+
201
+ def run_episode(client_id: int):
202
+ with AiContestArenaEnv(base_url="http://localhost:8000") as env:
203
+ result = env.reset()
204
+ for i in range(10):
205
+ result = env.step(AiContestArenaAction(message=f"Client {client_id}, step {i}"))
206
+ return client_id, result.observation.message_length
207
+
208
+ # Run 4 episodes concurrently
209
+ with ThreadPoolExecutor(max_workers=4) as executor:
210
+ results = list(executor.map(run_episode, range(4)))
211
+ ```
212
+
213
+ ## Development & Testing
214
+
215
+ ### Direct Environment Testing
216
+
217
+ Test the environment logic directly without starting the HTTP server:
218
+
219
+ ```bash
220
+ # From the server directory
221
+ python3 server/ai_contest_arena_environment.py
222
+ ```
223
+
224
+ This verifies that:
225
+ - Environment resets correctly
226
+ - Step executes actions properly
227
+ - State tracking works
228
+ - Rewards are calculated correctly
229
+
230
+ ### Running Locally
231
+
232
+ Run the server locally for development:
233
+
234
+ ```bash
235
+ uvicorn server.app:app --reload
236
+ ```
237
+
238
+ ## Project Structure
239
+
240
+ ```
241
+ ai_contest_arena/
242
+ ├── .dockerignore # Docker build exclusions
243
+ ├── __init__.py # Module exports
244
+ ├── README.md # This file
245
+ ├── openenv.yaml # OpenEnv manifest
246
+ ├── pyproject.toml # Project metadata and dependencies
247
+ ├── uv.lock # Locked dependencies (generated)
248
+ ├── client.py # AiContestArenaEnv client
249
+ ├── models.py # Action and Observation models
250
+ └── server/
251
+ ├── __init__.py # Server module exports
252
+ ├── ai_contest_arena_environment.py # Core environment logic
253
+ ├── app.py # FastAPI application (HTTP + WebSocket endpoints)
254
+ └── Dockerfile # Container image definition
255
+ ```
ai_contest_arena/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Contest Arena Environment."""
8
+
9
+ from .client import AiContestArenaEnv
10
+ from .models import AiContestArenaAction, AiContestArenaObservation
11
+
12
+ __all__ = [
13
+ "AiContestArenaAction",
14
+ "AiContestArenaObservation",
15
+ "AiContestArenaEnv",
16
+ ]
ai_contest_arena/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (392 Bytes). View file
 
ai_contest_arena/__pycache__/client.cpython-312.pyc ADDED
Binary file (3.82 kB). View file
 
ai_contest_arena/__pycache__/models.cpython-312.pyc ADDED
Binary file (1.86 kB). View file
 
ai_contest_arena/client.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Contest Arena Environment Client."""
8
+
9
+ from typing import Dict
10
+
11
+ from openenv.core import EnvClient
12
+ from openenv.core.client_types import StepResult
13
+ from openenv.core.env_server.types import State
14
+
15
+ from .models import AiContestArenaAction, AiContestArenaObservation
16
+
17
+
18
+ class AiContestArenaEnv(
19
+ EnvClient[AiContestArenaAction, AiContestArenaObservation, State]
20
+ ):
21
+ """
22
+ Client for the Ai Contest Arena Environment.
23
+
24
+ This client maintains a persistent WebSocket connection to the environment server,
25
+ enabling efficient multi-step interactions with lower latency.
26
+ Each client instance has its own dedicated environment session on the server.
27
+
28
+ Example:
29
+ >>> # Connect to a running server
30
+ >>> with AiContestArenaEnv(base_url="http://localhost:8000") as client:
31
+ ... result = client.reset()
32
+ ... print(result.observation.echoed_message)
33
+ ...
34
+ ... result = client.step(AiContestArenaAction(message="Hello!"))
35
+ ... print(result.observation.echoed_message)
36
+
37
+ Example with Docker:
38
+ >>> # Automatically start container and connect
39
+ >>> client = AiContestArenaEnv.from_docker_image("ai_contest_arena-env:latest")
40
+ >>> try:
41
+ ... result = client.reset()
42
+ ... result = client.step(AiContestArenaAction(message="Test"))
43
+ ... finally:
44
+ ... client.close()
45
+ """
46
+
47
+ def _step_payload(self, action: AiContestArenaAction) -> Dict:
48
+ """
49
+ Convert AiContestArenaAction to JSON payload for step message.
50
+
51
+ Args:
52
+ action: AiContestArenaAction instance
53
+
54
+ Returns:
55
+ Dictionary representation suitable for JSON encoding
56
+ """
57
+ return {
58
+ "message": action.message,
59
+ }
60
+
61
+ def _parse_result(self, payload: Dict) -> StepResult[AiContestArenaObservation]:
62
+ """
63
+ Parse server response into StepResult[AiContestArenaObservation].
64
+
65
+ Args:
66
+ payload: JSON response data from server
67
+
68
+ Returns:
69
+ StepResult with AiContestArenaObservation
70
+ """
71
+ obs_data = payload.get("observation", {})
72
+ observation = AiContestArenaObservation(
73
+ echoed_message=obs_data.get("echoed_message", ""),
74
+ message_length=obs_data.get("message_length", 0),
75
+ done=payload.get("done", False),
76
+ reward=payload.get("reward"),
77
+ metadata=obs_data.get("metadata", {}),
78
+ )
79
+
80
+ return StepResult(
81
+ observation=observation,
82
+ reward=payload.get("reward"),
83
+ done=payload.get("done", False),
84
+ )
85
+
86
+ def _parse_state(self, payload: Dict) -> State:
87
+ """
88
+ Parse server response into State object.
89
+
90
+ Args:
91
+ payload: JSON response from state request
92
+
93
+ Returns:
94
+ State object with episode_id and step_count
95
+ """
96
+ return State(
97
+ episode_id=payload.get("episode_id"),
98
+ step_count=payload.get("step_count", 0),
99
+ )
ai_contest_arena/models.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Data models for the Multi-Agent Contest Arena Environment.
9
+
10
+ Contestant A submits an answer (Action); the environment evaluates it
11
+ against a Baseline (Contestant B) using an LLM-as-a-Judge.
12
+ """
13
+
14
+ from openenv.core.env_server.types import Action, Observation, State
15
+ from pydantic import Field
16
+
17
+
18
+ class AiContestArenaAction(Action):
19
+ """Contestant A's submitted answer to the current task."""
20
+
21
+ answer: str = Field(..., description="Contestant A's solution to the current task")
22
+
23
+
24
+ class AiContestArenaObservation(Observation):
25
+ """Observation returned after each step — task prompt and judge feedback."""
26
+
27
+ task_prompt: str = Field(default="", description="The task/problem to solve")
28
+ judge_feedback: str = Field(default="", description="LLM Judge's evaluation feedback")
29
+
30
+
31
+ class AiContestArenaState(State):
32
+ """Full environment state — current task and latest judge feedback."""
33
+
34
+ current_task: str = Field(default="", description="The active task prompt")
35
+ judge_feedback: str = Field(default="", description="Latest judge feedback")
ai_contest_arena/openenv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ spec_version: 1
2
+ name: ai_contest_arena
3
+ type: space
4
+ runtime: fastapi
5
+ app: server.app:app
6
+ port: 8000
7
+
ai_contest_arena/openenv_ai_contest_arena.egg-info/PKG-INFO ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: openenv-ai_contest_arena
3
+ Version: 0.1.0
4
+ Summary: Multi-Agent Contest Arena environment for OpenEnv
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: openenv-core[core]>=0.2.2
7
+ Requires-Dist: openai>=1.0.0
8
+ Requires-Dist: requests>=2.28.0
9
+ Provides-Extra: dev
10
+ Requires-Dist: pytest>=8.0.0; extra == "dev"
11
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
ai_contest_arena/openenv_ai_contest_arena.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ __init__.py
3
+ client.py
4
+ models.py
5
+ pyproject.toml
6
+ ./__init__.py
7
+ ./client.py
8
+ ./models.py
9
+ openenv_ai_contest_arena.egg-info/PKG-INFO
10
+ openenv_ai_contest_arena.egg-info/SOURCES.txt
11
+ openenv_ai_contest_arena.egg-info/dependency_links.txt
12
+ openenv_ai_contest_arena.egg-info/entry_points.txt
13
+ openenv_ai_contest_arena.egg-info/requires.txt
14
+ openenv_ai_contest_arena.egg-info/top_level.txt
15
+ server/__init__.py
16
+ server/ai_contest_arena_environment.py
17
+ server/app.py
ai_contest_arena/openenv_ai_contest_arena.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
ai_contest_arena/openenv_ai_contest_arena.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ server = ai_contest_arena.server.app:main
ai_contest_arena/openenv_ai_contest_arena.egg-info/requires.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ openenv-core[core]>=0.2.2
2
+ openai>=1.0.0
3
+ requests>=2.28.0
4
+
5
+ [dev]
6
+ pytest>=8.0.0
7
+ pytest-cov>=4.0.0
ai_contest_arena/openenv_ai_contest_arena.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ai_contest_arena
ai_contest_arena/pyproject.toml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ [build-system]
8
+ requires = ["setuptools>=45", "wheel"]
9
+ build-backend = "setuptools.build_meta"
10
+
11
+ [project]
12
+ name = "openenv-ai_contest_arena"
13
+ version = "0.1.0"
14
+ description = "Multi-Agent Contest Arena environment for OpenEnv"
15
+ requires-python = ">=3.10"
16
+ dependencies = [
17
+ "openenv-core[core]>=0.2.2",
18
+ "openai>=1.0.0",
19
+ "requests>=2.28.0",
20
+ ]
21
+
22
+ [project.optional-dependencies]
23
+ dev = [
24
+ "pytest>=8.0.0",
25
+ "pytest-cov>=4.0.0",
26
+ ]
27
+
28
+ [project.scripts]
29
+ server = "ai_contest_arena.server.app:main"
30
+
31
+ [tool.setuptools]
32
+ include-package-data = true
33
+ packages = ["ai_contest_arena", "ai_contest_arena.server"]
34
+ package-dir = { "ai_contest_arena" = ".", "ai_contest_arena.server" = "server" }
ai_contest_arena/pyrightconfig.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "venvPath": ".",
3
+ "venv": ".venv",
4
+ "pythonVersion": "3.10",
5
+ "typeCheckingMode": "basic",
6
+ "reportMissingImports": "warning",
7
+ "reportMissingModuleSource": "none"
8
+ }
ai_contest_arena/server/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Contest Arena environment server components."""
8
+
9
+ from .ai_contest_arena_environment import AiContestArenaEnvironment
10
+
11
+ __all__ = ["AiContestArenaEnvironment"]
ai_contest_arena/server/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (346 Bytes). View file
 
ai_contest_arena/server/__pycache__/ai_contest_arena_environment.cpython-312.pyc ADDED
Binary file (7.89 kB). View file
 
ai_contest_arena/server/__pycache__/ai_contest_arena_environment.cpython-314.pyc ADDED
Binary file (8.8 kB). View file
 
ai_contest_arena/server/__pycache__/app.cpython-312.pyc ADDED
Binary file (2.02 kB). View file
 
ai_contest_arena/server/ai_contest_arena_environment.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Multi-Agent Contest Arena Environment.
9
+
10
+ Contestant A submits an answer; the LLM Judge evaluates it against
11
+ Contestant B's pre-defined baseline answer and returns a reward in [0, 1].
12
+ """
13
+
14
+ import os
15
+ import random
16
+ import re
17
+ from uuid import uuid4
18
+
19
+ from openenv.core.env_server.interfaces import Environment
20
+ from openai import OpenAI
21
+
22
+ try:
23
+ from ..models import AiContestArenaAction, AiContestArenaObservation, AiContestArenaState
24
+ except ImportError:
25
+ from models import AiContestArenaAction, AiContestArenaObservation, AiContestArenaState
26
+
27
+ JUDGE_API_BASE_URL = os.environ.get(
28
+ "JUDGE_API_BASE_URL",
29
+ os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1"),
30
+ )
31
+ JUDGE_API_KEY = os.environ.get("JUDGE_API_KEY") or os.environ.get("HF_TOKEN")
32
+ JUDGE_MODEL_NAME = os.environ.get(
33
+ "JUDGE_MODEL_NAME",
34
+ os.environ.get("MODEL_NAME", "google/gemma-4-31B-it:novita"),
35
+ )
36
+
37
+
38
+ TASKS = [
39
+ {
40
+ "difficulty": "Easy",
41
+ "prompt": "Write a Python function that returns the factorial of a non-negative integer n using recursion.",
42
+ "baseline": (
43
+ "def factorial(n):\n"
44
+ " if n == 0:\n"
45
+ " return 1\n"
46
+ " return n * factorial(n - 1)"
47
+ ),
48
+ },
49
+ {
50
+ "difficulty": "Medium",
51
+ "prompt": (
52
+ "Write a Python function that finds all pairs in a list of integers that sum to a given target. "
53
+ "Return a list of tuples. Each pair should appear only once."
54
+ ),
55
+ "baseline": (
56
+ "def find_pairs(nums, target):\n"
57
+ " seen, pairs = set(), []\n"
58
+ " for n in nums:\n"
59
+ " complement = target - n\n"
60
+ " if complement in seen:\n"
61
+ " pairs.append((complement, n))\n"
62
+ " seen.add(n)\n"
63
+ " return pairs"
64
+ ),
65
+ },
66
+ {
67
+ "difficulty": "Hard",
68
+ "prompt": (
69
+ "Implement a Python class `LRUCache` with a fixed capacity that supports "
70
+ "`get(key)` and `put(key, value)` operations in O(1) time."
71
+ ),
72
+ "baseline": (
73
+ "from collections import OrderedDict\n\n"
74
+ "class LRUCache:\n"
75
+ " def __init__(self, capacity):\n"
76
+ " self.cache = OrderedDict()\n"
77
+ " self.capacity = capacity\n\n"
78
+ " def get(self, key):\n"
79
+ " if key not in self.cache:\n"
80
+ " return -1\n"
81
+ " self.cache.move_to_end(key)\n"
82
+ " return self.cache[key]\n\n"
83
+ " def put(self, key, value):\n"
84
+ " if key in self.cache:\n"
85
+ " self.cache.move_to_end(key)\n"
86
+ " self.cache[key] = value\n"
87
+ " if len(self.cache) > self.capacity:\n"
88
+ " self.cache.popitem(last=False)"
89
+ ),
90
+ },
91
+ ]
92
+
93
+ JUDGE_PROMPT = """\
94
+ You are an impartial programming contest judge. Evaluate two solutions to the problem below.
95
+
96
+ ## Problem
97
+ {problem}
98
+
99
+ ## Contestant A's Answer
100
+ {answer_a}
101
+
102
+ ## Contestant B's Answer (Baseline)
103
+ {answer_b}
104
+
105
+ Score Contestant A's answer out of 10 based on:
106
+ 1. Correctness — does it solve the problem accurately?
107
+ 2. Reasoning — is the logic sound and efficient?
108
+ 3. Formatting — is the code clean and readable?
109
+
110
+ Respond in this exact format:
111
+ Score: <integer 0-10>
112
+ Feedback: <one concise paragraph>
113
+ """
114
+
115
+
116
+ def _get_judge_client() -> OpenAI:
117
+ if not JUDGE_API_KEY:
118
+ raise RuntimeError(
119
+ "Missing judge API key for judge evaluation. "
120
+ "Set JUDGE_API_KEY or HF_TOKEN in the environment for the ai_contest_arena server process."
121
+ )
122
+
123
+ return OpenAI(base_url=JUDGE_API_BASE_URL, api_key=JUDGE_API_KEY)
124
+
125
+
126
+ def _call_judge(problem: str, answer_a: str, answer_b: str) -> tuple[float, str]:
127
+ """Call the LLM judge and return (normalized_reward, feedback)."""
128
+ client = _get_judge_client()
129
+ prompt = JUDGE_PROMPT.format(problem=problem, answer_a=answer_a, answer_b=answer_b)
130
+ try:
131
+ response = client.chat.completions.create(
132
+ model=JUDGE_MODEL_NAME,
133
+ messages=[{"role": "user", "content": prompt}],
134
+ max_tokens=512,
135
+ temperature=0.2,
136
+ )
137
+ except Exception as err:
138
+ raise RuntimeError(
139
+ "LLM judge call failed. Check API_BASE_URL and HF_TOKEN for the ai_contest_arena server process. "
140
+ f"Original error: {err}"
141
+ ) from err
142
+
143
+ text = response.choices[0].message.content or ""
144
+
145
+ match = re.search(r"Score:\s*(\d+)", text)
146
+ raw_score = int(match.group(1)) if match else 5
147
+ raw_score = max(0, min(10, raw_score))
148
+
149
+ # Normalize strictly to (0.0, 1.0) — never exactly 0 or 1
150
+ reward = (raw_score + 0.5) / 11.0
151
+
152
+ feedback_match = re.search(r"Feedback:\s*(.+)", text, re.DOTALL)
153
+ feedback = feedback_match.group(1).strip() if feedback_match else text.strip()
154
+
155
+ return reward, feedback
156
+
157
+
158
+ class AiContestArenaEnvironment(Environment):
159
+ """
160
+ Multi-Agent Contest Arena.
161
+
162
+ On reset(), a random task (Easy / Medium / Hard) is selected.
163
+ On step(action), the LLM Judge scores Contestant A vs the baseline
164
+ and returns a reward strictly in (0.0, 1.0).
165
+ """
166
+
167
+ SUPPORTS_CONCURRENT_SESSIONS: bool = True
168
+
169
+ def __init__(self):
170
+ self._task: dict = {}
171
+ self._state = AiContestArenaState(episode_id=str(uuid4()), step_count=0)
172
+
173
+ def reset(self) -> AiContestArenaObservation:
174
+ self._task = random.choice(TASKS)
175
+ self._state = AiContestArenaState(
176
+ episode_id=str(uuid4()),
177
+ step_count=0,
178
+ current_task=self._task["prompt"],
179
+ judge_feedback="",
180
+ )
181
+ return AiContestArenaObservation(
182
+ task_prompt=self._task["prompt"],
183
+ judge_feedback="",
184
+ done=False,
185
+ reward=0.0,
186
+ )
187
+
188
+ def step(self, action: AiContestArenaAction) -> AiContestArenaObservation:
189
+ self._state.step_count += 1
190
+
191
+ if not hasattr(self, '_task') or not self._task or 'prompt' not in self._task:
192
+ self._task = random.choice(TASKS)
193
+
194
+ reward, feedback = _call_judge(
195
+ problem=self._task["prompt"],
196
+ answer_a=action.answer,
197
+ answer_b=self._task["baseline"],
198
+ )
199
+
200
+ self._state.judge_feedback = feedback
201
+
202
+ return AiContestArenaObservation(
203
+ task_prompt=self._task["prompt"],
204
+ judge_feedback=feedback,
205
+ done=True,
206
+ reward=reward,
207
+ metadata={"difficulty": self._task["difficulty"], "raw_reward": reward},
208
+ )
209
+
210
+ @property
211
+ def state(self) -> AiContestArenaState:
212
+ return self._state
ai_contest_arena/server/app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ FastAPI application for the Multi-Agent Contest Arena Environment.
9
+
10
+ Endpoints:
11
+ POST /reset — randomly select a task (Easy / Medium / Hard)
12
+ POST /step — submit Contestant A's answer; LLM Judge scores it vs baseline
13
+ GET /state — current task and latest judge feedback
14
+ GET /schema — action/observation schemas
15
+ WS /ws — WebSocket endpoint for persistent sessions
16
+
17
+ Usage:
18
+ uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
19
+ """
20
+
21
+ try:
22
+ from openenv.core.env_server.http_server import create_app
23
+ except Exception as e: # pragma: no cover
24
+ raise ImportError(
25
+ "openenv is required. Install dependencies with '\n uv sync\n'"
26
+ ) from e
27
+
28
+ try:
29
+ from ..models import AiContestArenaAction, AiContestArenaObservation
30
+ from .ai_contest_arena_environment import AiContestArenaEnvironment
31
+ except ImportError:
32
+ from models import AiContestArenaAction, AiContestArenaObservation
33
+ from server.ai_contest_arena_environment import AiContestArenaEnvironment
34
+
35
+
36
+ app = create_app(
37
+ AiContestArenaEnvironment,
38
+ AiContestArenaAction,
39
+ AiContestArenaObservation,
40
+ env_name="ai_contest_arena",
41
+ max_concurrent_envs=1,
42
+ )
43
+
44
+
45
+ def main(host: str = "0.0.0.0", port: int = 8000):
46
+ import uvicorn
47
+ uvicorn.run(app, host=host, port=port)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ import argparse
52
+
53
+ parser = argparse.ArgumentParser()
54
+ parser.add_argument("--port", type=int, default=8000)
55
+ args = parser.parse_args()
56
+ main(port=args.port)
ai_contest_arena/server/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openenv-core[core]
2
+ fastapi
3
+ uvicorn
4
+ openai
ai_contest_arena/task.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "task_1_coding",
4
+ "prompt": "Write a Python function named `is_even(n)` that returns True if a number is even, and False if it is odd. Only output the code.",
5
+ "rubric": "The answer must contain a valid Python function named is_even. Return 1.0 if correct, else 0.0."
6
+ },
7
+ {
8
+ "id": "task_2_logic",
9
+ "prompt": "Explain the core concept of 'Quantum Computing' in exactly one simple sentence.",
10
+ "rubric": "The answer must be exactly one sentence and mention qubits or superposition/entanglement. Return 1.0 for perfect, 0.5 for okay, 0.0 for wrong."
11
+ },
12
+ {
13
+ "id": "task_3_general",
14
+ "prompt": "What is the capital city of France? Answer in one word.",
15
+ "rubric": "The answer must be 'Paris'. Return 1.0 if correct, else 0.0."
16
+ }
17
+ ]
ai_contest_arena/uv.lock ADDED
The diff for this file is too large to render. See raw diff
 
ai_server_admin/Dockerfile ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Multi-stage build using openenv-base
8
+ # This Dockerfile is flexible and works for both:
9
+ # - In-repo environments (with local OpenEnv sources)
10
+ # - Standalone environments (with openenv from PyPI/Git)
11
+ # The build script (openenv build) handles context detection and sets appropriate build args.
12
+
13
+ ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
14
+ FROM ${BASE_IMAGE} AS builder
15
+
16
+ WORKDIR /app
17
+
18
+ # Ensure git is available (required for installing dependencies from VCS)
19
+ RUN apt-get update && \
20
+ apt-get install -y --no-install-recommends git && \
21
+ rm -rf /var/lib/apt/lists/*
22
+
23
+ # Build argument to control whether we're building standalone or in-repo
24
+ ARG BUILD_MODE=in-repo
25
+ ARG ENV_NAME=ai_server_admin
26
+
27
+ # Copy environment code (always at root of build context)
28
+ COPY . /app/env
29
+
30
+ # For in-repo builds, openenv is already vendored in the build context
31
+ # For standalone builds, openenv will be installed via pyproject.toml
32
+ WORKDIR /app/env
33
+
34
+ # Ensure uv is available (for local builds where base image lacks it)
35
+ RUN if ! command -v uv >/dev/null 2>&1; then \
36
+ curl -LsSf https://astral.sh/uv/install.sh | sh && \
37
+ mv /root/.local/bin/uv /usr/local/bin/uv && \
38
+ mv /root/.local/bin/uvx /usr/local/bin/uvx; \
39
+ fi
40
+
41
+ # Install dependencies using uv sync
42
+ # If uv.lock exists, use it; otherwise resolve on the fly
43
+ RUN --mount=type=cache,target=/root/.cache/uv \
44
+ if [ -f uv.lock ]; then \
45
+ uv sync --frozen --no-install-project --no-editable; \
46
+ else \
47
+ uv sync --no-install-project --no-editable; \
48
+ fi
49
+
50
+ RUN --mount=type=cache,target=/root/.cache/uv \
51
+ if [ -f uv.lock ]; then \
52
+ uv sync --frozen --no-editable; \
53
+ else \
54
+ uv sync --no-editable; \
55
+ fi
56
+
57
+ # Final runtime stage
58
+ FROM ${BASE_IMAGE}
59
+
60
+ WORKDIR /app
61
+
62
+ # Copy the virtual environment from builder
63
+ COPY --from=builder /app/env/.venv /app/.venv
64
+
65
+ # Copy the environment code
66
+ COPY --from=builder /app/env /app/env
67
+
68
+ # Set PATH to use the virtual environment
69
+ ENV PATH="/app/.venv/bin:$PATH"
70
+
71
+ # Set PYTHONPATH so imports work correctly
72
+ ENV PYTHONPATH="/app/env:$PYTHONPATH"
73
+
74
+ # Health check
75
+ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
76
+ CMD curl -f http://localhost:8000/health || exit 1
77
+
78
+ # Run the FastAPI server
79
+ # The module path is constructed to work with the /app/env structure
80
+ CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
ai_server_admin/README.md ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Ai Server Admin Environment Server
3
+ emoji: 🖥️
4
+ colorFrom: green
5
+ colorTo: yellow
6
+ sdk: docker
7
+ pinned: false
8
+ app_port: 8000
9
+ base_path: /web
10
+ tags:
11
+ - openenv
12
+ ---
13
+
14
+ # Ai Server Admin Environment
15
+
16
+ A simple test environment that echoes back messages. Perfect for testing the env APIs as well as demonstrating environment usage patterns.
17
+
18
+ ## Quick Start
19
+
20
+ The simplest way to use the Ai Server Admin environment is through the `AiServerAdminEnv` class:
21
+
22
+ ```python
23
+ from ai_server_admin import AiServerAdminAction, AiServerAdminEnv
24
+
25
+ try:
26
+ # Create environment from Docker image
27
+ ai_server_adminenv = AiServerAdminEnv.from_docker_image("ai_server_admin-env:latest")
28
+
29
+ # Reset
30
+ result = ai_server_adminenv.reset()
31
+ print(f"Reset: {result.observation.echoed_message}")
32
+
33
+ # Send multiple messages
34
+ messages = ["Hello, World!", "Testing echo", "Final message"]
35
+
36
+ for msg in messages:
37
+ result = ai_server_adminenv.step(AiServerAdminAction(message=msg))
38
+ print(f"Sent: '{msg}'")
39
+ print(f" → Echoed: '{result.observation.echoed_message}'")
40
+ print(f" → Length: {result.observation.message_length}")
41
+ print(f" → Reward: {result.reward}")
42
+
43
+ finally:
44
+ # Always clean up
45
+ ai_server_adminenv.close()
46
+ ```
47
+
48
+ That's it! The `AiServerAdminEnv.from_docker_image()` method handles:
49
+ - Starting the Docker container
50
+ - Waiting for the server to be ready
51
+ - Connecting to the environment
52
+ - Container cleanup when you call `close()`
53
+
54
+ ## Building the Docker Image
55
+
56
+ Before using the environment, you need to build the Docker image:
57
+
58
+ ```bash
59
+ # From project root
60
+ docker build -t ai_server_admin-env:latest -f server/Dockerfile .
61
+ ```
62
+
63
+ ## Deploying to Hugging Face Spaces
64
+
65
+ You can easily deploy your OpenEnv environment to Hugging Face Spaces using the `openenv push` command:
66
+
67
+ ```bash
68
+ # From the environment directory (where openenv.yaml is located)
69
+ openenv push
70
+
71
+ # Or specify options
72
+ openenv push --namespace my-org --private
73
+ ```
74
+
75
+ The `openenv push` command will:
76
+ 1. Validate that the directory is an OpenEnv environment (checks for `openenv.yaml`)
77
+ 2. Prepare a custom build for Hugging Face Docker space (enables web interface)
78
+ 3. Upload to Hugging Face (ensuring you're logged in)
79
+
80
+ ### Prerequisites
81
+
82
+ - Authenticate with Hugging Face: The command will prompt for login if not already authenticated
83
+
84
+ ### Options
85
+
86
+ - `--directory`, `-d`: Directory containing the OpenEnv environment (defaults to current directory)
87
+ - `--repo-id`, `-r`: Repository ID in format 'username/repo-name' (defaults to 'username/env-name' from openenv.yaml)
88
+ - `--base-image`, `-b`: Base Docker image to use (overrides Dockerfile FROM)
89
+ - `--private`: Deploy the space as private (default: public)
90
+
91
+ ### Examples
92
+
93
+ ```bash
94
+ # Push to your personal namespace (defaults to username/env-name from openenv.yaml)
95
+ openenv push
96
+
97
+ # Push to a specific repository
98
+ openenv push --repo-id my-org/my-env
99
+
100
+ # Push with a custom base image
101
+ openenv push --base-image ghcr.io/meta-pytorch/openenv-base:latest
102
+
103
+ # Push as a private space
104
+ openenv push --private
105
+
106
+ # Combine options
107
+ openenv push --repo-id my-org/my-env --base-image custom-base:latest --private
108
+ ```
109
+
110
+ After deployment, your space will be available at:
111
+ `https://huggingface.co/spaces/<repo-id>`
112
+
113
+ The deployed space includes:
114
+ - **Web Interface** at `/web` - Interactive UI for exploring the environment
115
+ - **API Documentation** at `/docs` - Full OpenAPI/Swagger interface
116
+ - **Health Check** at `/health` - Container health monitoring
117
+ - **WebSocket** at `/ws` - Persistent session endpoint for low-latency interactions
118
+
119
+ ## Environment Details
120
+
121
+ ### Action
122
+ **AiServerAdminAction**: Contains a single field
123
+ - `message` (str) - The message to echo back
124
+
125
+ ### Observation
126
+ **AiServerAdminObservation**: Contains the echo response and metadata
127
+ - `echoed_message` (str) - The message echoed back
128
+ - `message_length` (int) - Length of the message
129
+ - `reward` (float) - Reward based on message length (length × 0.1)
130
+ - `done` (bool) - Always False for echo environment
131
+ - `metadata` (dict) - Additional info like step count
132
+
133
+ ### Reward
134
+ The reward is calculated as: `message_length × 0.1`
135
+ - "Hi" → reward: 0.2
136
+ - "Hello, World!" → reward: 1.3
137
+ - Empty message → reward: 0.0
138
+
139
+ ## Advanced Usage
140
+
141
+ ### Connecting to an Existing Server
142
+
143
+ If you already have a Ai Server Admin environment server running, you can connect directly:
144
+
145
+ ```python
146
+ from ai_server_admin import AiServerAdminEnv
147
+
148
+ # Connect to existing server
149
+ ai_server_adminenv = AiServerAdminEnv(base_url="<ENV_HTTP_URL_HERE>")
150
+
151
+ # Use as normal
152
+ result = ai_server_adminenv.reset()
153
+ result = ai_server_adminenv.step(AiServerAdminAction(message="Hello!"))
154
+ ```
155
+
156
+ Note: When connecting to an existing server, `ai_server_adminenv.close()` will NOT stop the server.
157
+
158
+ ### Using the Context Manager
159
+
160
+ The client supports context manager usage for automatic connection management:
161
+
162
+ ```python
163
+ from ai_server_admin import AiServerAdminAction, AiServerAdminEnv
164
+
165
+ # Connect with context manager (auto-connects and closes)
166
+ with AiServerAdminEnv(base_url="http://localhost:8000") as env:
167
+ result = env.reset()
168
+ print(f"Reset: {result.observation.echoed_message}")
169
+ # Multiple steps with low latency
170
+ for msg in ["Hello", "World", "!"]:
171
+ result = env.step(AiServerAdminAction(message=msg))
172
+ print(f"Echoed: {result.observation.echoed_message}")
173
+ ```
174
+
175
+ The client uses WebSocket connections for:
176
+ - **Lower latency**: No HTTP connection overhead per request
177
+ - **Persistent session**: Server maintains your environment state
178
+ - **Efficient for episodes**: Better for many sequential steps
179
+
180
+ ### Concurrent WebSocket Sessions
181
+
182
+ The server supports multiple concurrent WebSocket connections. To enable this,
183
+ modify `server/app.py` to use factory mode:
184
+
185
+ ```python
186
+ # In server/app.py - use factory mode for concurrent sessions
187
+ app = create_app(
188
+ AiServerAdminEnvironment, # Pass class, not instance
189
+ AiServerAdminAction,
190
+ AiServerAdminObservation,
191
+ max_concurrent_envs=4, # Allow 4 concurrent sessions
192
+ )
193
+ ```
194
+
195
+ Then multiple clients can connect simultaneously:
196
+
197
+ ```python
198
+ from ai_server_admin import AiServerAdminAction, AiServerAdminEnv
199
+ from concurrent.futures import ThreadPoolExecutor
200
+
201
+ def run_episode(client_id: int):
202
+ with AiServerAdminEnv(base_url="http://localhost:8000") as env:
203
+ result = env.reset()
204
+ for i in range(10):
205
+ result = env.step(AiServerAdminAction(message=f"Client {client_id}, step {i}"))
206
+ return client_id, result.observation.message_length
207
+
208
+ # Run 4 episodes concurrently
209
+ with ThreadPoolExecutor(max_workers=4) as executor:
210
+ results = list(executor.map(run_episode, range(4)))
211
+ ```
212
+
213
+ ## Development & Testing
214
+
215
+ ### Direct Environment Testing
216
+
217
+ Test the environment logic directly without starting the HTTP server:
218
+
219
+ ```bash
220
+ # From the server directory
221
+ python3 server/ai_server_admin_environment.py
222
+ ```
223
+
224
+ This verifies that:
225
+ - Environment resets correctly
226
+ - Step executes actions properly
227
+ - State tracking works
228
+ - Rewards are calculated correctly
229
+
230
+ ### Running Locally
231
+
232
+ Run the server locally for development:
233
+
234
+ ```bash
235
+ uvicorn server.app:app --reload
236
+ ```
237
+
238
+ ## Project Structure
239
+
240
+ ```
241
+ ai_server_admin/
242
+ ├── .dockerignore # Docker build exclusions
243
+ ├── __init__.py # Module exports
244
+ ├── README.md # This file
245
+ ├── openenv.yaml # OpenEnv manifest
246
+ ├── pyproject.toml # Project metadata and dependencies
247
+ ├── uv.lock # Locked dependencies (generated)
248
+ ├── client.py # AiServerAdminEnv client
249
+ ├── models.py # Action and Observation models
250
+ └── server/
251
+ ├── __init__.py # Server module exports
252
+ ├── ai_server_admin_environment.py # Core environment logic
253
+ ├── app.py # FastAPI application (HTTP + WebSocket endpoints)
254
+ └── Dockerfile # Container image definition
255
+ ```
ai_server_admin/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Server Admin Environment."""
8
+
9
+ from .client import AiServerAdminEnv
10
+ from .models import AiServerAdminAction, AiServerAdminObservation
11
+
12
+ __all__ = [
13
+ "AiServerAdminAction",
14
+ "AiServerAdminObservation",
15
+ "AiServerAdminEnv",
16
+ ]
ai_server_admin/client.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Server Admin Environment Client."""
8
+
9
+ from typing import Dict
10
+
11
+ from openenv.core import EnvClient
12
+ from openenv.core.client_types import StepResult
13
+ from openenv.core.env_server.types import State
14
+
15
+ from .models import AiServerAdminAction, AiServerAdminObservation
16
+
17
+
18
+ class AiServerAdminEnv(
19
+ EnvClient[AiServerAdminAction, AiServerAdminObservation, State]
20
+ ):
21
+ """
22
+ Client for the Ai Server Admin Environment.
23
+
24
+ This client maintains a persistent WebSocket connection to the environment server,
25
+ enabling efficient multi-step interactions with lower latency.
26
+ Each client instance has its own dedicated environment session on the server.
27
+
28
+ Example:
29
+ >>> # Connect to a running server
30
+ >>> with AiServerAdminEnv(base_url="http://localhost:8000") as client:
31
+ ... result = client.reset()
32
+ ... print(result.observation.echoed_message)
33
+ ...
34
+ ... result = client.step(AiServerAdminAction(message="Hello!"))
35
+ ... print(result.observation.echoed_message)
36
+
37
+ Example with Docker:
38
+ >>> # Automatically start container and connect
39
+ >>> client = AiServerAdminEnv.from_docker_image("ai_server_admin-env:latest")
40
+ >>> try:
41
+ ... result = client.reset()
42
+ ... result = client.step(AiServerAdminAction(message="Test"))
43
+ ... finally:
44
+ ... client.close()
45
+ """
46
+
47
+ def _step_payload(self, action: AiServerAdminAction) -> Dict:
48
+ """
49
+ Convert AiServerAdminAction to JSON payload for step message.
50
+
51
+ Args:
52
+ action: AiServerAdminAction instance
53
+
54
+ Returns:
55
+ Dictionary representation suitable for JSON encoding
56
+ """
57
+ return {
58
+ "message": action.message,
59
+ }
60
+
61
+ def _parse_result(self, payload: Dict) -> StepResult[AiServerAdminObservation]:
62
+ """
63
+ Parse server response into StepResult[AiServerAdminObservation].
64
+
65
+ Args:
66
+ payload: JSON response data from server
67
+
68
+ Returns:
69
+ StepResult with AiServerAdminObservation
70
+ """
71
+ obs_data = payload.get("observation", {})
72
+ observation = AiServerAdminObservation(
73
+ echoed_message=obs_data.get("echoed_message", ""),
74
+ message_length=obs_data.get("message_length", 0),
75
+ done=payload.get("done", False),
76
+ reward=payload.get("reward"),
77
+ metadata=obs_data.get("metadata", {}),
78
+ )
79
+
80
+ return StepResult(
81
+ observation=observation,
82
+ reward=payload.get("reward"),
83
+ done=payload.get("done", False),
84
+ )
85
+
86
+ def _parse_state(self, payload: Dict) -> State:
87
+ """
88
+ Parse server response into State object.
89
+
90
+ Args:
91
+ payload: JSON response from state request
92
+
93
+ Returns:
94
+ State object with episode_id and step_count
95
+ """
96
+ return State(
97
+ episode_id=payload.get("episode_id"),
98
+ step_count=payload.get("step_count", 0),
99
+ )
ai_server_admin/models.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Data models for the Ai Server Admin Environment.
9
+
10
+ The ai_server_admin environment is a simple test environment that echoes back messages.
11
+ """
12
+
13
+ from openenv.core.env_server.types import Action, Observation
14
+ from pydantic import Field
15
+
16
+
17
+ class AiServerAdminAction(Action):
18
+ """Action for the Ai Server Admin environment - just a message to echo."""
19
+
20
+ message: str = Field(..., description="Message to echo back")
21
+
22
+
23
+ class AiServerAdminObservation(Observation):
24
+ """Observation from the Ai Server Admin environment - the echoed message."""
25
+
26
+ echoed_message: str = Field(default="", description="The echoed message")
27
+ message_length: int = Field(default=0, description="Length of the echoed message")
ai_server_admin/openenv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ spec_version: 1
2
+ name: ai_server_admin
3
+ type: space
4
+ runtime: fastapi
5
+ app: server.app:app
6
+ port: 8000
7
+
ai_server_admin/pyproject.toml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ [build-system]
8
+ requires = ["setuptools>=45", "wheel"]
9
+ build-backend = "setuptools.build_meta"
10
+
11
+ [project]
12
+ name = "openenv-ai_server_admin"
13
+ version = "0.1.0"
14
+ description = "Ai Server Admin environment for OpenEnv"
15
+ requires-python = ">=3.10"
16
+ dependencies = [
17
+ # Core OpenEnv runtime (provides FastAPI server + HTTP client types)
18
+ # install from github
19
+ # "openenv-core[core] @ git+https://github.com/meta-pytorch/OpenEnv.git",
20
+ "openenv-core[core]>=0.2.2",
21
+ # Environment-specific dependencies
22
+ # Add all dependencies needed for your environment here
23
+ # Examples:
24
+ # "numpy>=1.19.0",
25
+ # "torch>=2.0.0",
26
+ # "gymnasium>=0.29.0",
27
+ # "openspiel>=1.0.0",
28
+ # "smolagents>=1.22.0,<2",
29
+ ]
30
+
31
+ [project.optional-dependencies]
32
+ dev = [
33
+ "pytest>=8.0.0",
34
+ "pytest-cov>=4.0.0",
35
+ ]
36
+
37
+ [project.scripts]
38
+ # Server entry point - enables running via: uv run --project . server
39
+ # or: python -m ai_server_admin.server.app
40
+ server = "ai_server_admin.server.app:main"
41
+
42
+ [tool.setuptools]
43
+ include-package-data = true
44
+ packages = ["ai_server_admin", "ai_server_admin.server"]
45
+ package-dir = { "ai_server_admin" = ".", "ai_server_admin.server" = "server" }
ai_server_admin/pyrightconfig.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "venvPath": ".",
3
+ "venv": ".venv",
4
+ "pythonVersion": "3.10",
5
+ "typeCheckingMode": "basic",
6
+ "reportMissingImports": "warning",
7
+ "reportMissingModuleSource": "none"
8
+ }
ai_server_admin/server/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Ai Server Admin environment server components."""
8
+
9
+ from .ai_server_admin_environment import AiServerAdminEnvironment
10
+
11
+ __all__ = ["AiServerAdminEnvironment"]
ai_server_admin/server/ai_server_admin_environment.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, json, random, requests
2
+ from uuid import uuid4
3
+ from openenv.core.env_server.interfaces import Environment
4
+ from openenv.core.env_server.types import State
5
+
6
+ try:
7
+ from ..models import AiServerAdminAction, AiServerAdminObservation
8
+ except ImportError:
9
+ from models import AiServerAdminAction, AiServerAdminObservation
10
+
11
+ class AiServerAdminEnvironment(Environment):
12
+ SUPPORTS_CONCURRENT_SESSIONS: bool = True
13
+
14
+ def __init__(self):
15
+ self._state = State(episode_id=str(uuid4()), step_count=0)
16
+ self.current_task = None
17
+ tasks_path = os.path.join(os.path.dirname(__file__), "..", "tasks.json")
18
+ try:
19
+ with open(tasks_path, "r") as f: self.tasks = json.load(f)
20
+ except Exception:
21
+ with open("tasks.json", "r") as f: self.tasks = json.load(f)
22
+
23
+ def reset(self) -> AiServerAdminObservation:
24
+ self._state = State(episode_id=str(uuid4()), step_count=0)
25
+ self.current_task = random.choice(self.tasks)
26
+ return AiServerAdminObservation(
27
+ echoed_message=f"[NEW TASK]: {self.current_task['prompt']}",
28
+ message_length=0, done=False, reward=0.0
29
+ )
30
+
31
+ def step(self, action: AiServerAdminAction) -> AiServerAdminObservation:
32
+ self._state.step_count += 1
33
+ agent_answer = action.message
34
+ reward = self._judge_code(agent_answer)
35
+ return AiServerAdminObservation(
36
+ echoed_message="Evaluation Complete.",
37
+ message_length=len(agent_answer), done=True, reward=reward,
38
+ )
39
+
40
+ def _judge_code(self, agent_answer: str) -> float:
41
+ openai_key = os.environ.get("OPENAI_API_KEY", "")
42
+ if not openai_key: return 0.5
43
+
44
+ headers = {"Authorization": f"Bearer {openai_key}", "Content-Type": "application/json"}
45
+ payload = {
46
+ "model": "gpt-4o-mini",
47
+ "messages": [
48
+ {"role": "system", "content": "You are a strict AI Judge. Evaluate the answer based on the rubric. Output ONLY a single float number between 0.0 and 1.0. No extra text."},
49
+ {"role": "user", "content": f"Task: {self.current_task['prompt']}\nRubric: {self.current_task['rubric']}\nAgent Answer: {agent_answer}"}
50
+ ]
51
+ }
52
+
53
+ try:
54
+ resp = requests.post("https://api.openai.com/v1/chat/completions", json=payload, headers=headers)
55
+ resp.raise_for_status()
56
+ score_str = resp.json()["choices"][0]["message"]["content"].strip()
57
+ return min(max(float(score_str), 0.0), 1.0)
58
+ except Exception:
59
+ return 0.0
60
+
61
+ @property
62
+ def state(self) -> State: return self._state
ai_server_admin/server/app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ FastAPI application for the Ai Server Admin Environment.
9
+
10
+ This module creates an HTTP server that exposes the AiServerAdminEnvironment
11
+ over HTTP and WebSocket endpoints, compatible with EnvClient.
12
+
13
+ Endpoints:
14
+ - POST /reset: Reset the environment
15
+ - POST /step: Execute an action
16
+ - GET /state: Get current environment state
17
+ - GET /schema: Get action/observation schemas
18
+ - WS /ws: WebSocket endpoint for persistent sessions
19
+
20
+ Usage:
21
+ # Development (with auto-reload):
22
+ uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
23
+
24
+ # Production:
25
+ uvicorn server.app:app --host 0.0.0.0 --port 8000 --workers 4
26
+
27
+ # Or run directly:
28
+ python -m server.app
29
+ """
30
+
31
+ try:
32
+ from openenv.core.env_server.http_server import create_app
33
+ except Exception as e: # pragma: no cover
34
+ raise ImportError(
35
+ "openenv is required for the web interface. Install dependencies with '\n uv sync\n'"
36
+ ) from e
37
+
38
+ try:
39
+ from ..models import AiServerAdminAction, AiServerAdminObservation
40
+ from .ai_server_admin_environment import AiServerAdminEnvironment
41
+ except ModuleNotFoundError:
42
+ from models import AiServerAdminAction, AiServerAdminObservation
43
+ from server.ai_server_admin_environment import AiServerAdminEnvironment
44
+
45
+
46
+ # Create the app with web interface and README integration
47
+ app = create_app(
48
+ AiServerAdminEnvironment,
49
+ AiServerAdminAction,
50
+ AiServerAdminObservation,
51
+ env_name="ai_server_admin",
52
+ max_concurrent_envs=1, # increase this number to allow more concurrent WebSocket sessions
53
+ )
54
+
55
+
56
+ def main(host: str = "0.0.0.0", port: int = 8000):
57
+ """
58
+ Entry point for direct execution via uv run or python -m.
59
+
60
+ This function enables running the server without Docker:
61
+ uv run --project . server
62
+ uv run --project . server --port 8001
63
+ python -m ai_server_admin.server.app
64
+
65
+ Args:
66
+ host: Host address to bind to (default: "0.0.0.0")
67
+ port: Port number to listen on (default: 8000)
68
+
69
+ For production deployments, consider using uvicorn directly with
70
+ multiple workers:
71
+ uvicorn ai_server_admin.server.app:app --workers 4
72
+ """
73
+ import uvicorn
74
+
75
+ uvicorn.run(app, host=host, port=port)
76
+
77
+
78
+ if __name__ == "__main__":
79
+ import argparse
80
+
81
+ parser = argparse.ArgumentParser()
82
+ parser.add_argument("--port", type=int, default=8000)
83
+ args = parser.parse_args()
84
+ main(port=args.port)
ai_server_admin/server/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openenv[core]>=0.2.0
2
+ fastapi>=0.115.0
3
+ uvicorn>=0.24.0
4
+
5
+
6
+
ai_server_admin/uv.lock ADDED
The diff for this file is too large to render. See raw diff
 
hackathon_submission.zip ADDED
Binary file (3 kB). View file
 
inference.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys, requests, json, time
2
+ from openai import OpenAI
3
+
4
+ ENV_BASE_URL = "http://localhost:8000"
5
+
6
+ def play_round(round_number):
7
+ print(f"\n{'='*50}\n🏁 ROUND {round_number} STARTS!\n{'='*50}")
8
+
9
+ # 1. Get Task safely
10
+ try:
11
+ resp = requests.post(f"{ENV_BASE_URL}/reset", timeout=120).json()
12
+ except Exception as e:
13
+ sys.exit(f"🚨 Error: Cannot connect to OpenEnv Server. {e}")
14
+
15
+ task = ""
16
+ if isinstance(resp, dict):
17
+ if "observation" in resp and isinstance(resp["observation"], dict) and "echoed_message" in resp["observation"]:
18
+ task = resp["observation"]["echoed_message"]
19
+ elif "observation" in resp and isinstance(resp["observation"], dict) and "task_prompt" in resp["observation"]:
20
+ task = resp["observation"]["task_prompt"]
21
+ elif "echoed_message" in resp:
22
+ task = resp["echoed_message"]
23
+ else:
24
+ task = json.dumps(resp)
25
+ else:
26
+ task = str(resp)
27
+
28
+ print(f"🔥 JUDGE ASKS:\n{task}\n")
29
+
30
+ # 2. Qwen Agent API Call (Working on HF Router)
31
+ print("🤖 Agent is thinking (Using Qwen 2.5)...")
32
+ hf_token = os.environ.get("HF_TOKEN", "")
33
+ if not hf_token:
34
+ print("🚨 WARNING: HF_TOKEN is missing. Please set it in your environment variables.")
35
+
36
+ client = OpenAI(
37
+ base_url="https://router.huggingface.co/v1",
38
+ api_key=hf_token
39
+ )
40
+
41
+ try:
42
+ completion = client.chat.completions.create(
43
+ model="Qwen/Qwen2.5-72B-Instruct",
44
+ messages=[
45
+ {"role": "system", "content": "You are a Python expert. Output ONLY valid Python code. No explanations, no markdown blocks like ```python."},
46
+ {"role": "user", "content": task}
47
+ ],
48
+ )
49
+ agent_answer = completion.choices[0].message.content.replace("```python", "").replace("```", "").strip()
50
+ except Exception as e:
51
+ print(f"🚨 HF API Error: {e}")
52
+ agent_answer = "def generic_answer(): pass"
53
+
54
+ print(f"🗣️ AGENT'S ANSWER (Snippet):\n{agent_answer[:150]}...\n")
55
+
56
+ # 3. Submit to Server (Direct Payload)
57
+ print("⚖️ Submitting to Judge...")
58
+ payload = {"action": {"answer": agent_answer}}
59
+ try:
60
+ step_resp = requests.post(f"{ENV_BASE_URL}/step", json=payload, timeout=120)
61
+
62
+ if step_resp.status_code == 200:
63
+ result = step_resp.json()
64
+ score = result.get("observation", {}).get("reward", result.get("reward", 0.0))
65
+ else:
66
+ print(f"🚨 Server Error! Status: {step_resp.status_code}")
67
+ print(f"🚨 Details: {step_resp.text}")
68
+ score = 0.0
69
+
70
+ except Exception as e:
71
+ print(f"🚨 Server Communication Error: {e}")
72
+ score = 0.0
73
+
74
+ print(f"🏆 ROUND {round_number} SCORE : {score} / 1.0")
75
+ return score
76
+
77
+ def main():
78
+ print("🚀 [START] GEMMA AGENT vs OPENAI JUDGE")
79
+ total_score = 0
80
+ for i in range(1, 4):
81
+ total_score += play_round(i)
82
+ time.sleep(2)
83
+ print(f"\n🎉🎉 MATCH FINISHED! FINAL TOTAL SCORE: {total_score} / 3.0 🎉🎉")
84
+
85
+ if __name__ == "__main__":
86
+ main()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ requests==2.33.1
2
+ openai==2.7.2
3
+ pydantic==2.11.7