burtenshaw HF Staff commited on
Commit
59d7f2d
·
verified ·
1 Parent(s): 8293196

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. src/core/openenv/__init__.py +23 -0
  2. src/core/openenv/auto/__init__.py +39 -0
  3. src/core/openenv/auto/_discovery.py +584 -0
  4. src/core/openenv/auto/auto_action.py +276 -0
  5. src/core/openenv/auto/auto_env.py +896 -0
  6. src/core/openenv/cli/__init__.py +9 -0
  7. src/core/openenv/cli/__main__.py +62 -0
  8. src/core/openenv/cli/_cli_utils.py +79 -0
  9. src/core/openenv/cli/_validation.py +162 -0
  10. src/core/openenv/cli/commands/__init__.py +11 -0
  11. src/core/openenv/cli/commands/build.py +461 -0
  12. src/core/openenv/cli/commands/fork.py +197 -0
  13. src/core/openenv/cli/commands/init.py +500 -0
  14. src/core/openenv/cli/commands/push.py +718 -0
  15. src/core/openenv/cli/commands/serve.py +94 -0
  16. src/core/openenv/cli/commands/validate.py +108 -0
  17. src/core/openenv/cli/templates/__init__.py +7 -0
  18. src/core/openenv/cli/templates/openenv_env/.dockerignore +15 -0
  19. src/core/openenv/cli/templates/openenv_env/README.md +255 -0
  20. src/core/openenv/cli/templates/openenv_env/__init__.py +16 -0
  21. src/core/openenv/cli/templates/openenv_env/client.py +99 -0
  22. src/core/openenv/cli/templates/openenv_env/models.py +28 -0
  23. src/core/openenv/cli/templates/openenv_env/openenv.yaml +7 -0
  24. src/core/openenv/cli/templates/openenv_env/pyproject.toml +45 -0
  25. src/core/openenv/cli/templates/openenv_env/server/Dockerfile +80 -0
  26. src/core/openenv/cli/templates/openenv_env/server/__ENV_NAME___environment.py +101 -0
  27. src/core/openenv/cli/templates/openenv_env/server/__init__.py +11 -0
  28. src/core/openenv/cli/templates/openenv_env/server/app.py +81 -0
  29. src/core/openenv/cli/templates/openenv_env/server/requirements.txt +6 -0
  30. src/core/openenv/core/README.md +212 -0
  31. src/core/openenv/core/__init__.py +24 -0
  32. src/core/openenv/core/client_types.py +23 -0
  33. src/core/openenv/core/containers/__init__.py +7 -0
  34. src/core/openenv/core/containers/images/Dockerfile +64 -0
  35. src/core/openenv/core/containers/images/README.md +92 -0
  36. src/core/openenv/core/containers/runtime/__init__.py +25 -0
  37. src/core/openenv/core/containers/runtime/daytona_provider.py +572 -0
  38. src/core/openenv/core/containers/runtime/providers.py +667 -0
  39. src/core/openenv/core/containers/runtime/uv_provider.py +224 -0
  40. src/core/openenv/core/containers/test_local_docker_provider.py +261 -0
  41. src/core/openenv/core/env_client.py +483 -0
  42. src/core/openenv/core/env_server/__init__.py +140 -0
  43. src/core/openenv/core/env_server/base_transforms.py +29 -0
  44. src/core/openenv/core/env_server/exceptions.py +105 -0
  45. src/core/openenv/core/env_server/gradio_theme.py +128 -0
  46. src/core/openenv/core/env_server/gradio_ui.py +240 -0
  47. src/core/openenv/core/env_server/http_server.py +1396 -0
  48. src/core/openenv/core/env_server/interfaces.py +295 -0
  49. src/core/openenv/core/env_server/mcp_environment.py +610 -0
  50. src/core/openenv/core/env_server/mcp_types.py +321 -0
src/core/openenv/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified OpenEnv package bundling the CLI and core runtime.
3
+ """
4
+
5
+ from importlib import metadata
6
+
7
+ from .auto import AutoAction, AutoEnv
8
+ from .core import GenericEnvClient, GenericAction, SyncEnvClient
9
+
10
+ __all__ = [
11
+ "core",
12
+ "cli",
13
+ "AutoEnv",
14
+ "AutoAction",
15
+ "GenericEnvClient",
16
+ "GenericAction",
17
+ "SyncEnvClient",
18
+ ]
19
+
20
+ try:
21
+ __version__ = metadata.version("openenv") # type: ignore[arg-type]
22
+ except metadata.PackageNotFoundError: # pragma: no cover - local dev
23
+ __version__ = "0.0.0"
src/core/openenv/auto/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ OpenEnv Auto Module
9
+ ===================
10
+
11
+ Provides HuggingFace-style auto-discovery API for OpenEnv environments.
12
+
13
+ This module enables automatic environment and action class loading without
14
+ manual imports:
15
+
16
+ >>> from openenv import AutoEnv, AutoAction
17
+ >>>
18
+ >>> # Load environment from installed package or HuggingFace Hub
19
+ >>> env = AutoEnv.from_name("coding-env")
20
+ >>>
21
+ >>> # Get action class
22
+ >>> CodeAction = AutoAction.from_name("coding")
23
+ >>> action = CodeAction(code="print('Hello!')")
24
+
25
+ Classes:
26
+ AutoEnv: Automatic environment client selection and instantiation
27
+ AutoAction: Automatic action class selection
28
+
29
+ The auto-discovery system works by:
30
+ 1. Discovering installed openenv-* packages via importlib.metadata
31
+ 2. Loading environment manifests (openenv.yaml) from package resources
32
+ 3. Supporting HuggingFace Hub repositories for remote environments
33
+ 4. Caching discovery results for performance
34
+ """
35
+
36
+ from .auto_action import AutoAction
37
+ from .auto_env import AutoEnv
38
+
39
+ __all__ = ["AutoEnv", "AutoAction"]
src/core/openenv/auto/_discovery.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Environment Auto-Discovery System
9
+ ==================================
10
+
11
+ This module provides automatic discovery of OpenEnv environments by:
12
+ 1. Discovering installed openenv-* packages using importlib.metadata
13
+ 2. Loading manifests (openenv.yaml) from package resources
14
+ 3. Caching results for performance
15
+ 4. Supporting HuggingFace Hub downloads
16
+
17
+ This enables AutoEnv to work without coupling to src/envs/ directory.
18
+ """
19
+
20
+ import importlib
21
+ import importlib.metadata
22
+ import importlib.resources
23
+ import json
24
+ import logging
25
+ import re
26
+ import tempfile
27
+ from dataclasses import dataclass, asdict
28
+ from pathlib import Path
29
+ from typing import Dict, Optional, Type, Any
30
+
31
+ import yaml
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ @dataclass
37
+ class EnvironmentInfo:
38
+ """
39
+ Rich information about a discovered environment.
40
+
41
+ Attributes:
42
+ env_key: Environment key (e.g., "echo", "coding")
43
+ name: Full environment name (e.g., "echo_env")
44
+ package_name: Package name (e.g., "openenv-echo_env")
45
+ version: Version string
46
+ description: Human-readable description
47
+ client_module_path: Full module path to client (e.g., "echo_env.client")
48
+ client_class_name: Client class name (e.g., "EchoEnv")
49
+ action_class_name: Action class name (e.g., "EchoAction")
50
+ observation_class_name: Observation class name (e.g., "EchoObservation")
51
+ default_image: Default Docker image name (e.g., "echo-env:latest")
52
+ spec_version: OpenEnv spec version (from openenv.yaml)
53
+ manifest: Original manifest data
54
+ """
55
+
56
+ env_key: str
57
+ name: str
58
+ package_name: str
59
+ version: str
60
+ description: str
61
+ client_module_path: str
62
+ client_class_name: str
63
+ action_class_name: str
64
+ observation_class_name: str
65
+ default_image: str
66
+ spec_version: Optional[int] = None
67
+ manifest: Optional[Dict[str, Any]] = None
68
+
69
+ def get_client_class(self) -> Type:
70
+ """
71
+ Dynamically import and return the client class.
72
+
73
+ Returns:
74
+ Client class (e.g., EchoEnv)
75
+
76
+ Raises:
77
+ ImportError: If module or class cannot be imported
78
+ """
79
+ try:
80
+ module = importlib.import_module(self.client_module_path)
81
+ return getattr(module, self.client_class_name)
82
+ except ImportError as e:
83
+ raise ImportError(
84
+ f"Failed to import {self.client_class_name} from {self.client_module_path}: {e}\n"
85
+ f"Make sure the package '{self.package_name}' is installed: "
86
+ f"pip install {self.package_name}"
87
+ ) from e
88
+ except AttributeError as e:
89
+ raise ImportError(
90
+ f"Class {self.client_class_name} not found in {self.client_module_path}: {e}"
91
+ ) from e
92
+
93
+ def get_action_class(self) -> Type:
94
+ """
95
+ Dynamically import and return the action class.
96
+
97
+ Returns:
98
+ Action class (e.g., EchoAction)
99
+
100
+ Raises:
101
+ ImportError: If module or class cannot be imported
102
+ """
103
+ try:
104
+ module = importlib.import_module(self.client_module_path)
105
+ return getattr(module, self.action_class_name)
106
+ except ImportError as e:
107
+ raise ImportError(
108
+ f"Failed to import {self.action_class_name} from {self.client_module_path}: {e}\n"
109
+ f"Make sure the package '{self.package_name}' is installed: "
110
+ f"pip install {self.package_name}"
111
+ ) from e
112
+ except AttributeError as e:
113
+ raise ImportError(
114
+ f"Class {self.action_class_name} not found in {self.client_module_path}: {e}"
115
+ ) from e
116
+
117
+ def get_observation_class(self) -> Type:
118
+ """
119
+ Dynamically import and return the observation class.
120
+
121
+ Returns:
122
+ Observation class (e.g., EchoObservation)
123
+
124
+ Raises:
125
+ ImportError: If module or class cannot be imported
126
+ """
127
+ try:
128
+ module = importlib.import_module(self.client_module_path)
129
+ return getattr(module, self.observation_class_name)
130
+ except ImportError as e:
131
+ raise ImportError(
132
+ f"Failed to import {self.observation_class_name} from {self.client_module_path}: {e}\n"
133
+ f"Make sure the package '{self.package_name}' is installed: "
134
+ f"pip install {self.package_name}"
135
+ ) from e
136
+ except AttributeError as e:
137
+ raise ImportError(
138
+ f"Class {self.observation_class_name} not found in {self.client_module_path}: {e}"
139
+ ) from e
140
+
141
+
142
+ def _normalize_env_name(name: str) -> str:
143
+ """
144
+ Normalize environment name to standard format.
145
+
146
+ Args:
147
+ name: Input name (e.g., "echo", "echo-env", "echo_env")
148
+
149
+ Returns:
150
+ Normalized name (e.g., "echo_env")
151
+
152
+ Examples:
153
+ >>> _normalize_env_name("echo")
154
+ 'echo_env'
155
+ >>> _normalize_env_name("echo-env")
156
+ 'echo_env'
157
+ >>> _normalize_env_name("echo_env")
158
+ 'echo_env'
159
+ """
160
+ # Remove common suffixes
161
+ name = re.sub(r"[-_]env$", "", name)
162
+ # Convert hyphens to underscores
163
+ name = name.replace("-", "_")
164
+ # Add _env suffix if not present
165
+ if not name.endswith("_env"):
166
+ name = f"{name}_env"
167
+ return name
168
+
169
+
170
+ def _is_hub_url(name: str) -> bool:
171
+ """
172
+ Check if name is a HuggingFace Hub URL or repo ID.
173
+
174
+ Args:
175
+ name: Input name
176
+
177
+ Returns:
178
+ True if it looks like a Hub URL
179
+
180
+ Examples:
181
+ >>> _is_hub_url("meta-pytorch/echo_env")
182
+ True
183
+ >>> _is_hub_url("https://huggingface.co/meta-pytorch/echo_env")
184
+ True
185
+ >>> _is_hub_url("echo")
186
+ False
187
+ """
188
+ # Contains org/repo pattern or huggingface.co domain
189
+ return "/" in name or "huggingface.co" in name
190
+
191
+
192
+ def _infer_class_name(env_name: str, class_type: str) -> str:
193
+ """
194
+ Infer class name from environment name using simple conventions.
195
+
196
+ Args:
197
+ env_name: Environment name (e.g., "echo_env")
198
+ class_type: Type of class ("client", "action", "observation")
199
+
200
+ Returns:
201
+ Inferred class name
202
+
203
+ Examples:
204
+ >>> _infer_class_name("echo_env", "client")
205
+ 'EchoEnv'
206
+ >>> _infer_class_name("echo_env", "action")
207
+ 'EchoAction'
208
+ """
209
+ # Remove _env suffix for base name
210
+ base_name = env_name.replace("_env", "")
211
+
212
+ # Convert to PascalCase
213
+ pascal_name = "".join(word.capitalize() for word in base_name.split("_"))
214
+
215
+ # Add suffix based on type
216
+ if class_type == "client":
217
+ return f"{pascal_name}Env"
218
+ elif class_type == "action":
219
+ return f"{pascal_name}Action"
220
+ elif class_type == "observation":
221
+ return f"{pascal_name}Observation"
222
+ else:
223
+ raise ValueError(f"Unknown class type: {class_type}")
224
+
225
+
226
+ def _load_manifest_from_package(
227
+ package_name: str, module_name: str
228
+ ) -> Optional[Dict[str, Any]]:
229
+ """
230
+ Load openenv.yaml manifest from an installed package.
231
+
232
+ Args:
233
+ package_name: Package name (e.g., "openenv-echo_env")
234
+ module_name: Module name (e.g., "echo_env")
235
+
236
+ Returns:
237
+ Parsed manifest dictionary, or None if not found
238
+
239
+ """
240
+ try:
241
+ # Try to read openenv.yaml from package
242
+ if hasattr(importlib.resources, "files"):
243
+ # Python 3.9+
244
+ package_files = importlib.resources.files(module_name)
245
+ if (package_files / "openenv.yaml").is_file():
246
+ manifest_text = (package_files / "openenv.yaml").read_text()
247
+ return yaml.safe_load(manifest_text)
248
+ else:
249
+ # Python 3.7-3.8 fallback
250
+ with importlib.resources.open_text(module_name, "openenv.yaml") as f:
251
+ return yaml.safe_load(f)
252
+ except (FileNotFoundError, ModuleNotFoundError, AttributeError):
253
+ logger.debug(f"No openenv.yaml found in {module_name}")
254
+ return None
255
+ except Exception as e:
256
+ logger.warning(f"Failed to load openenv.yaml from {module_name}: {e}")
257
+ return None
258
+
259
+
260
+ def _create_env_info_from_package(
261
+ package_name: str, module_name: str, version: str
262
+ ) -> Optional[EnvironmentInfo]:
263
+ """
264
+ Create EnvironmentInfo from an installed package.
265
+
266
+ Args:
267
+ package_name: Package name (e.g., "openenv-echo_env")
268
+ module_name: Module name (e.g., "echo_env")
269
+ version: Package version
270
+
271
+ Returns:
272
+ EnvironmentInfo instance, or None if invalid
273
+ """
274
+ # Load manifest
275
+ manifest = _load_manifest_from_package(package_name, module_name)
276
+
277
+ # Get environment name
278
+ if manifest and "name" in manifest:
279
+ env_name = manifest["name"]
280
+ else:
281
+ # Infer from module name
282
+ env_name = module_name
283
+
284
+ # Normalize to ensure _env suffix
285
+ if not env_name.endswith("_env"):
286
+ env_name = f"{env_name}_env"
287
+
288
+ # Determine env_key (e.g., "echo_env" → "echo")
289
+ env_key = env_name.replace("_env", "") if env_name.endswith("_env") else env_name
290
+
291
+ # Get description
292
+ description = (
293
+ manifest.get("description", f"{env_name} environment")
294
+ if manifest
295
+ else f"{env_name} environment"
296
+ )
297
+
298
+ # Get spec version
299
+ spec_version = manifest.get("spec_version") if manifest else None
300
+
301
+ # Determine class names
302
+ # Check if manifest has custom class names (custom format)
303
+ if manifest and "action" in manifest and "observation" in manifest:
304
+ # Custom format (like coding_env)
305
+ client_class_name = _infer_class_name(env_name, "client")
306
+ action_class_name = manifest.get(
307
+ "action", _infer_class_name(env_name, "action")
308
+ )
309
+ observation_class_name = manifest.get(
310
+ "observation", _infer_class_name(env_name, "observation")
311
+ )
312
+ else:
313
+ # Use conventions
314
+ client_class_name = _infer_class_name(env_name, "client")
315
+ action_class_name = _infer_class_name(env_name, "action")
316
+ observation_class_name = _infer_class_name(env_name, "observation")
317
+
318
+ # Module path is just module_name.client
319
+ client_module_path = f"{module_name}.client"
320
+
321
+ # Determine default Docker image name
322
+ image_name = env_name.replace("_", "-")
323
+ default_image = f"{image_name}:latest"
324
+
325
+ return EnvironmentInfo(
326
+ env_key=env_key,
327
+ name=env_name,
328
+ package_name=package_name,
329
+ version=version,
330
+ description=description,
331
+ client_module_path=client_module_path,
332
+ client_class_name=client_class_name,
333
+ action_class_name=action_class_name,
334
+ observation_class_name=observation_class_name,
335
+ default_image=default_image,
336
+ spec_version=spec_version,
337
+ manifest=manifest,
338
+ )
339
+
340
+
341
+ class EnvironmentDiscovery:
342
+ """
343
+ Auto-discovery system for OpenEnv environments using installed packages.
344
+
345
+ This class discovers installed openenv-* packages and loads their metadata.
346
+ """
347
+
348
+ def __init__(self):
349
+ """Initialize discovery system."""
350
+ self._cache: Optional[Dict[str, EnvironmentInfo]] = None
351
+ self._cache_file = Path(tempfile.gettempdir()) / "openenv_discovery_cache.json"
352
+
353
+ def _discover_installed_packages(self) -> Dict[str, EnvironmentInfo]:
354
+ """
355
+ Discover all installed openenv-* packages.
356
+
357
+ Returns:
358
+ Dictionary mapping env_key to EnvironmentInfo
359
+ """
360
+ environments = {}
361
+
362
+ # Invalidate import caches to ensure we pick up newly installed packages
363
+ importlib.invalidate_caches()
364
+
365
+ # Get all installed packages
366
+ try:
367
+ distributions = importlib.metadata.distributions()
368
+ except Exception as e:
369
+ logger.warning(f"Failed to get installed packages: {e}")
370
+ return environments
371
+
372
+ # Filter for openenv-* packages (exclude openenv-core)
373
+ for dist in distributions:
374
+ package_name = dist.metadata["Name"]
375
+
376
+ if not package_name.startswith("openenv-"):
377
+ continue
378
+
379
+ if package_name == "openenv-core":
380
+ continue
381
+
382
+ # Get module name (e.g., "openenv-echo_env" → "echo_env")
383
+ module_name = package_name.replace("openenv-", "").replace("-", "_")
384
+
385
+ # Get version
386
+ version = dist.version
387
+
388
+ try:
389
+ # Create environment info
390
+ env_info = _create_env_info_from_package(
391
+ package_name, module_name, version
392
+ )
393
+
394
+ if env_info:
395
+ environments[env_info.env_key] = env_info
396
+ logger.debug(
397
+ f"Discovered environment: {env_info.env_key} ({package_name})"
398
+ )
399
+
400
+ except Exception as e:
401
+ logger.warning(f"Failed to load environment from {package_name}: {e}")
402
+ continue
403
+
404
+ return environments
405
+
406
+ def _load_cache(self) -> Optional[Dict[str, EnvironmentInfo]]:
407
+ """
408
+ Load cached discovery results.
409
+
410
+ Returns:
411
+ Dictionary of env_key -> EnvironmentInfo, or None if cache invalid
412
+ """
413
+ if not self._cache_file.exists():
414
+ return None
415
+
416
+ try:
417
+ with open(self._cache_file, "r") as f:
418
+ cache_data = json.load(f)
419
+
420
+ # Reconstruct EnvironmentInfo objects
421
+ cache = {}
422
+ for env_key, env_data in cache_data.items():
423
+ cache[env_key] = EnvironmentInfo(**env_data)
424
+
425
+ return cache
426
+ except Exception as e:
427
+ logger.warning(f"Failed to load discovery cache: {e}")
428
+ return None
429
+
430
+ def _save_cache(self, environments: Dict[str, EnvironmentInfo]) -> None:
431
+ """
432
+ Save discovery results to cache.
433
+
434
+ Args:
435
+ environments: Dictionary of env_key -> EnvironmentInfo
436
+ """
437
+ try:
438
+ cache_data = {}
439
+ for env_key, env_info in environments.items():
440
+ cache_data[env_key] = asdict(env_info)
441
+
442
+ with open(self._cache_file, "w") as f:
443
+ json.dump(cache_data, f, indent=2)
444
+
445
+ except Exception as e:
446
+ logger.warning(f"Failed to save discovery cache: {e}")
447
+
448
+ def discover(self, use_cache: bool = True) -> Dict[str, EnvironmentInfo]:
449
+ """
450
+ Discover all installed OpenEnv environments.
451
+
452
+ Args:
453
+ use_cache: If True, try to load from cache first
454
+
455
+ Returns:
456
+ Dictionary mapping env_key to EnvironmentInfo
457
+
458
+ Examples:
459
+ >>> discovery = EnvironmentDiscovery()
460
+ >>> envs = discovery.discover()
461
+ >>> print(envs.keys())
462
+ dict_keys(['echo', 'coding', ...])
463
+ """
464
+ # Try to load from memory cache first
465
+ if use_cache and self._cache is not None:
466
+ return self._cache
467
+
468
+ # Try to load from file cache
469
+ if use_cache:
470
+ cached = self._load_cache()
471
+ if cached is not None:
472
+ self._cache = cached
473
+ return self._cache
474
+
475
+ # Discover from installed packages
476
+ environments = self._discover_installed_packages()
477
+
478
+ # Save to cache
479
+ self._save_cache(environments)
480
+ self._cache = environments
481
+
482
+ return environments
483
+
484
+ def get_environment(self, env_key: str) -> Optional[EnvironmentInfo]:
485
+ """
486
+ Get information about a specific environment.
487
+
488
+ Args:
489
+ env_key: Environment key (e.g., "echo", "coding")
490
+
491
+ Returns:
492
+ EnvironmentInfo if found, None otherwise
493
+
494
+ Examples:
495
+ >>> discovery = EnvironmentDiscovery()
496
+ >>> env = discovery.get_environment("echo")
497
+ >>> print(env.client_class_name)
498
+ 'EchoEnv'
499
+ """
500
+ environments = self.discover()
501
+ return environments.get(env_key)
502
+
503
+ def get_environment_by_name(self, name: str) -> Optional[EnvironmentInfo]:
504
+ """
505
+ Get environment info by flexible name matching.
506
+
507
+ Args:
508
+ name: Environment name (e.g., "echo", "echo-env", "echo_env")
509
+
510
+ Returns:
511
+ EnvironmentInfo if found, None otherwise
512
+ """
513
+ # Normalize name to env_key
514
+ normalized = _normalize_env_name(name)
515
+ env_key = normalized.replace("_env", "")
516
+
517
+ return self.get_environment(env_key)
518
+
519
+ def list_environments(self) -> None:
520
+ """
521
+ Print a formatted list of all discovered environments.
522
+
523
+ Examples:
524
+ >>> discovery = EnvironmentDiscovery()
525
+ >>> discovery.list_environments()
526
+ Available OpenEnv Environments:
527
+ ----------------------------------------------------------------------
528
+ echo : Echo Environment (v0.1.0) - openenv-echo_env
529
+ coding : Coding Environment (v0.1.0) - openenv-coding_env
530
+ ...
531
+ """
532
+ environments = self.discover()
533
+
534
+ print("Available OpenEnv Environments:")
535
+ print("-" * 70)
536
+
537
+ if not environments:
538
+ print(" No OpenEnv environments found.")
539
+ print(" Install environments with: pip install openenv-<env-name>")
540
+ else:
541
+ for env_key in sorted(environments.keys()):
542
+ env = environments[env_key]
543
+ print(f" {env_key:<15}: {env.description} (v{env.version})")
544
+ print(f" Package: {env.package_name}")
545
+
546
+ print("-" * 70)
547
+ print(f"Total: {len(environments)} environments")
548
+
549
+ def clear_cache(self) -> None:
550
+ """Clear the discovery cache."""
551
+ if self._cache_file.exists():
552
+ self._cache_file.unlink()
553
+ self._cache = None
554
+
555
+
556
+ # Global discovery instance
557
+ _global_discovery: Optional[EnvironmentDiscovery] = None
558
+
559
+
560
+ def get_discovery() -> EnvironmentDiscovery:
561
+ """
562
+ Get or create the global discovery instance.
563
+
564
+ Returns:
565
+ Global EnvironmentDiscovery instance
566
+
567
+ Examples:
568
+ >>> discovery = get_discovery()
569
+ >>> envs = discovery.discover()
570
+ """
571
+ global _global_discovery
572
+
573
+ if _global_discovery is None:
574
+ _global_discovery = EnvironmentDiscovery()
575
+
576
+ return _global_discovery
577
+
578
+
579
+ def reset_discovery() -> None:
580
+ """Reset the global discovery instance (useful for testing)."""
581
+ global _global_discovery
582
+ if _global_discovery is not None:
583
+ _global_discovery.clear_cache()
584
+ _global_discovery = None
src/core/openenv/auto/auto_action.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ AutoAction - Automatic Action Class Selection
9
+ ==============================================
10
+
11
+ AutoAction provides a HuggingFace-style API for automatically retrieving the
12
+ correct Action class from installed packages or HuggingFace Hub.
13
+
14
+ This module simplifies working with environment actions by automatically
15
+ detecting and returning the appropriate Action class without requiring
16
+ manual imports.
17
+
18
+ Example:
19
+ >>> from openenv import AutoEnv, AutoAction
20
+ >>>
21
+ >>> # Get Action class from environment name
22
+ >>> CodeAction = AutoAction.from_env("coding")
23
+ >>> action = CodeAction(code="print('Hello!')")
24
+ >>>
25
+ >>> # From HuggingFace Hub
26
+ >>> CodeAction = AutoAction.from_env("meta-pytorch/coding-env")
27
+ >>>
28
+ >>> # Use with AutoEnv
29
+ >>> env = AutoEnv.from_env("coding-env")
30
+ >>> result = env.step(action)
31
+ """
32
+
33
+ from __future__ import annotations
34
+
35
+ import logging
36
+ from typing import Type, Dict, Any
37
+
38
+ from ._discovery import get_discovery, _is_hub_url
39
+ from .auto_env import AutoEnv
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ class AutoAction:
45
+ """
46
+ AutoAction automatically retrieves the correct Action class based on
47
+ environment names or HuggingFace Hub repositories.
48
+
49
+ This class follows the HuggingFace AutoModel pattern, making it easy to
50
+ get the right Action class without needing to know which module to import.
51
+
52
+ The class provides factory methods that look up the Action class and
53
+ return the class (not an instance) for you to instantiate.
54
+
55
+ Example:
56
+ >>> # From installed package
57
+ >>> CodeAction = AutoAction.from_env("coding")
58
+ >>> action = CodeAction(code="print('test')")
59
+ >>>
60
+ >>> # From HuggingFace Hub
61
+ >>> CodeAction = AutoAction.from_env("meta-pytorch/coding-env")
62
+ >>> action = CodeAction(code="print('test')")
63
+ >>>
64
+ >>> # Use with AutoEnv for a complete workflow
65
+ >>> env = AutoEnv.from_env("coding-env")
66
+ >>> ActionClass = AutoAction.from_env("coding-env")
67
+ >>> action = ActionClass(code="print('Hello, AutoAction!')")
68
+ >>> result = env.step(action)
69
+
70
+ Note:
71
+ AutoAction is not meant to be instantiated directly. Use the class
72
+ method from_env() instead.
73
+ """
74
+
75
+ def __init__(self):
76
+ """AutoAction should not be instantiated directly. Use class methods instead."""
77
+ raise TypeError(
78
+ "AutoAction is a factory class and should not be instantiated directly. "
79
+ "Use AutoAction.from_hub() or AutoAction.from_env() instead."
80
+ )
81
+
82
+ @classmethod
83
+ def from_env(cls, name: str, skip_install: bool = False) -> Type:
84
+ """
85
+ Get the Action class from environment name or HuggingFace Hub repository.
86
+
87
+ This method automatically:
88
+ 1. Checks if the name is a HuggingFace Hub URL/repo ID
89
+ 2. If Hub: downloads and installs the environment package
90
+ 3. If local: looks up the installed openenv-* package
91
+ 4. Imports and returns the Action class
92
+
93
+ Args:
94
+ name: Environment name or HuggingFace Hub repo ID
95
+ Examples:
96
+ - "coding" / "coding-env" / "coding_env"
97
+ - "meta-pytorch/coding-env" (Hub repo ID)
98
+ - "https://huggingface.co/meta-pytorch/coding-env" (Hub URL)
99
+ skip_install: If True, skip package installation and return
100
+ GenericAction class instead. Use this when working with
101
+ GenericEnvClient to avoid installing remote packages.
102
+
103
+ Returns:
104
+ Action class (not an instance!). Returns GenericAction when
105
+ skip_install=True.
106
+
107
+ Raises:
108
+ ValueError: If environment not found (only when skip_install=False)
109
+ ImportError: If environment package is not installed (only when skip_install=False)
110
+
111
+ Examples:
112
+ >>> # From installed package
113
+ >>> CodeAction = AutoAction.from_env("coding-env")
114
+ >>> action = CodeAction(code="print('Hello!')")
115
+ >>>
116
+ >>> # From HuggingFace Hub
117
+ >>> CodeAction = AutoAction.from_env("meta-pytorch/coding-env")
118
+ >>> action = CodeAction(code="print('Hello!')")
119
+ >>>
120
+ >>> # Skip installation, use GenericAction (for GenericEnvClient)
121
+ >>> ActionClass = AutoAction.from_env("user/repo", skip_install=True)
122
+ >>> action = ActionClass(code="print('Hello!')") # Returns GenericAction
123
+ >>>
124
+ >>> # Different name formats
125
+ >>> EchoAction = AutoAction.from_env("echo")
126
+ >>> EchoAction = AutoAction.from_env("echo-env")
127
+ >>> EchoAction = AutoAction.from_env("echo_env")
128
+ """
129
+ # If skip_install is True, return GenericAction without any package lookup
130
+ if skip_install:
131
+ from openenv.core.generic_client import GenericAction
132
+
133
+ logger.info(
134
+ f"Returning GenericAction for '{name}' (skip_install=True). "
135
+ f"Use keyword arguments to create actions: GenericAction(code='...')"
136
+ )
137
+ return GenericAction
138
+
139
+ # Check if it's a HuggingFace Hub URL or repo ID
140
+ if _is_hub_url(name):
141
+ # Ensure package is installed (reuse AutoEnv logic, downloads only if needed)
142
+ env_name = AutoEnv._ensure_package_from_hub(name)
143
+ else:
144
+ env_name = name
145
+
146
+ # Get environment info from discovery
147
+ discovery = get_discovery()
148
+ env_info = discovery.get_environment_by_name(env_name)
149
+
150
+ if not env_info:
151
+ # Environment not found - provide helpful error message
152
+ available_envs = discovery.discover()
153
+
154
+ if not available_envs:
155
+ raise ValueError(
156
+ "No OpenEnv environments found.\n"
157
+ "Install an environment with: pip install openenv-<env-name>\n"
158
+ "Or specify a HuggingFace Hub repository: AutoAction.from_env('openenv/echo_env')"
159
+ )
160
+
161
+ # Try to suggest similar environment names
162
+ from difflib import get_close_matches
163
+
164
+ env_keys = list(available_envs.keys())
165
+ suggestions = get_close_matches(env_name, env_keys, n=3, cutoff=0.6)
166
+
167
+ error_msg = f"Unknown environment '{env_name}'.\n"
168
+ if suggestions:
169
+ error_msg += f"Did you mean: {', '.join(suggestions)}?\n"
170
+ error_msg += f"Available environments: {', '.join(sorted(env_keys))}"
171
+
172
+ raise ValueError(error_msg)
173
+
174
+ # Get the action class
175
+ try:
176
+ action_class = env_info.get_action_class()
177
+ return action_class
178
+ except ImportError as e:
179
+ raise ImportError(
180
+ f"Failed to import action class for '{env_name}'.\n"
181
+ f"Package '{env_info.package_name}' appears to be installed but the module cannot be imported.\n"
182
+ f"Try reinstalling: pip install --force-reinstall {env_info.package_name}\n"
183
+ f"Original error: {e}"
184
+ ) from e
185
+
186
+ @classmethod
187
+ def from_hub(cls, env_name: str, skip_install: bool = False) -> Type:
188
+ """
189
+ Get the Action class from environment name.
190
+
191
+ This is an alias for from_env() for backward compatibility and clarity.
192
+
193
+ Args:
194
+ env_name: Environment name (e.g., "coding", "echo")
195
+ skip_install: If True, skip package installation and return
196
+ GenericAction class instead.
197
+
198
+ Returns:
199
+ Action class (not an instance!)
200
+
201
+ Examples:
202
+ >>> CodeAction = AutoAction.from_hub("coding")
203
+ >>> action = CodeAction(code="print('Hello!')")
204
+ """
205
+ return cls.from_env(env_name, skip_install=skip_install)
206
+
207
+ @classmethod
208
+ def get_action_info(cls, name: str) -> Dict[str, Any]:
209
+ """
210
+ Get detailed information about an action class.
211
+
212
+ Args:
213
+ name: Environment name
214
+
215
+ Returns:
216
+ Dictionary with action class metadata
217
+
218
+ Raises:
219
+ ValueError: If environment not found
220
+
221
+ Examples:
222
+ >>> info = AutoAction.get_action_info("coding")
223
+ >>> print(info['action_class'])
224
+ 'CodingAction'
225
+ >>> print(info['module'])
226
+ 'coding_env.client'
227
+ """
228
+ discovery = get_discovery()
229
+ env_info = discovery.get_environment_by_name(name)
230
+
231
+ if not env_info:
232
+ raise ValueError(f"Unknown environment: {name}")
233
+
234
+ return {
235
+ "env_key": env_info.env_key,
236
+ "env_name": env_info.name,
237
+ "package": env_info.package_name,
238
+ "action_class": env_info.action_class_name,
239
+ "observation_class": env_info.observation_class_name,
240
+ "module": env_info.client_module_path,
241
+ }
242
+
243
+ @classmethod
244
+ def list_actions(cls) -> None:
245
+ """
246
+ Print a formatted list of all available action classes.
247
+
248
+ This discovers all installed openenv-* packages and displays
249
+ their action class information in a user-friendly format.
250
+
251
+ Examples:
252
+ >>> AutoAction.list_actions()
253
+ Available Action Classes:
254
+ ----------------------------------------------------------------------
255
+ echo : EchoAction (from openenv-echo-env)
256
+ coding : CodingAction (from openenv-coding_env)
257
+ ----------------------------------------------------------------------
258
+ Total: 2 action classes
259
+ """
260
+ discovery = get_discovery()
261
+ environments = discovery.discover()
262
+
263
+ print("Available Action Classes:")
264
+ print("-" * 70)
265
+
266
+ if not environments:
267
+ print(" No OpenEnv environments found.")
268
+ print(" Install environments with: pip install openenv-<env-name>")
269
+ else:
270
+ for env_key in sorted(environments.keys()):
271
+ env = environments[env_key]
272
+ print(f" {env_key:<15}: {env.action_class_name}")
273
+ print(f" Package: {env.package_name}")
274
+
275
+ print("-" * 70)
276
+ print(f"Total: {len(environments)} action classes")
src/core/openenv/auto/auto_env.py ADDED
@@ -0,0 +1,896 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ AutoEnv - Automatic Environment Selection
9
+ ==========================================
10
+
11
+ AutoEnv provides a HuggingFace-style API for automatically selecting and
12
+ instantiating the correct environment client from installed packages or
13
+ HuggingFace Hub.
14
+
15
+ This module simplifies environment creation by automatically detecting the
16
+ environment type from the name and instantiating the appropriate client class.
17
+
18
+ Example:
19
+ >>> from openenv import AutoEnv, AutoAction
20
+ >>>
21
+ >>> # From installed package
22
+ >>> env = AutoEnv.from_env("coding-env")
23
+ >>>
24
+ >>> # From HuggingFace Hub
25
+ >>> env = AutoEnv.from_env("meta-pytorch/coding-env")
26
+ >>>
27
+ >>> # With configuration
28
+ >>> env = AutoEnv.from_env("coding", env_vars={"DEBUG": "1"})
29
+ """
30
+
31
+ from __future__ import annotations
32
+
33
+ import importlib
34
+ import logging
35
+ import os
36
+ import shutil
37
+ import subprocess
38
+ import sys
39
+ import requests
40
+ from typing import Any, Optional, TYPE_CHECKING, Dict
41
+
42
+ from ._discovery import get_discovery, _is_hub_url
43
+ from openenv.core.utils import run_async_safely
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from openenv.core.containers.runtime import ContainerProvider
48
+ from openenv.core.env_client import EnvClient
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+ # Cache for repo ID → env_name mapping to avoid redundant downloads
53
+ _hub_env_name_cache: Dict[str, str] = {}
54
+
55
+ # Environment variable to skip user confirmation for remote installs
56
+ OPENENV_TRUST_REMOTE_CODE = "OPENENV_TRUST_REMOTE_CODE"
57
+
58
+
59
+ def _has_uv() -> bool:
60
+ """Check if uv is available in the system."""
61
+ return shutil.which("uv") is not None
62
+
63
+
64
+ def _get_pip_command() -> list[str]:
65
+ """
66
+ Get the appropriate pip command (uv pip or pip).
67
+
68
+ Returns:
69
+ List of command parts for pip installation
70
+ """
71
+ if _has_uv():
72
+ return ["uv", "pip"]
73
+ return [sys.executable, "-m", "pip"]
74
+
75
+
76
+ def _confirm_remote_install(repo_id: str) -> bool:
77
+ """
78
+ Ask user for confirmation before installing remote code.
79
+
80
+ This is a security measure since we're executing code from the internet.
81
+
82
+ Args:
83
+ repo_id: The HuggingFace repo ID being installed
84
+
85
+ Returns:
86
+ True if user confirms, False otherwise
87
+ """
88
+ # Check environment variable for automated/CI environments
89
+ if os.environ.get(OPENENV_TRUST_REMOTE_CODE, "").lower() in ("1", "true", "yes"):
90
+ logger.info("Skipping confirmation (OPENENV_TRUST_REMOTE_CODE is set)")
91
+ return True
92
+
93
+ # Check if we're in an interactive terminal
94
+ if not sys.stdin.isatty():
95
+ logger.warning(
96
+ "Cannot prompt for confirmation in non-interactive mode. "
97
+ "Set OPENENV_TRUST_REMOTE_CODE=1 to allow remote installs."
98
+ )
99
+ return False
100
+
101
+ print(f"\n{'=' * 60}")
102
+ print("⚠️ SECURITY WARNING: Remote Code Installation")
103
+ print(f"{'=' * 60}")
104
+ print("You are about to install code from a remote repository:")
105
+ print(f" Repository: {repo_id}")
106
+ print(f" Source: https://huggingface.co/spaces/{repo_id}")
107
+ print("\nThis will execute code from the internet on your machine.")
108
+ print("Only proceed if you trust the source.")
109
+ print(f"{'=' * 60}\n")
110
+
111
+ try:
112
+ response = input("Do you want to proceed? [y/N]: ").strip().lower()
113
+ return response in ("y", "yes")
114
+ except (EOFError, KeyboardInterrupt):
115
+ print("\nInstallation cancelled.")
116
+ return False
117
+
118
+
119
+ class AutoEnv:
120
+ """
121
+ AutoEnv automatically selects and instantiates the correct environment client
122
+ based on environment names or HuggingFace Hub repositories.
123
+
124
+ This class follows the HuggingFace AutoModel pattern, making it easy to work
125
+ with different environments without needing to import specific client classes.
126
+
127
+ The class provides factory methods that:
128
+ 1. Check if name is a HuggingFace Hub URL/repo ID
129
+ 2. If Hub: download and install the environment package
130
+ 3. If local: look up the installed openenv-* package
131
+ 4. Import and instantiate the client class
132
+
133
+ Example:
134
+ >>> # From installed package
135
+ >>> env = AutoEnv.from_env("coding-env")
136
+ >>>
137
+ >>> # From HuggingFace Hub
138
+ >>> env = AutoEnv.from_env("meta-pytorch/coding-env")
139
+ >>>
140
+ >>> # List available environments
141
+ >>> AutoEnv.list_environments()
142
+
143
+ Note:
144
+ AutoEnv is not meant to be instantiated directly. Use the class method
145
+ from_env() instead.
146
+ """
147
+
148
+ def __init__(self):
149
+ """AutoEnv should not be instantiated directly. Use class methods instead."""
150
+ raise TypeError(
151
+ "AutoEnv is a factory class and should not be instantiated directly. "
152
+ "Use AutoEnv.from_hub() or AutoEnv.from_env() instead."
153
+ )
154
+
155
+ @classmethod
156
+ def _resolve_space_url(cls, repo_id: str) -> str:
157
+ """
158
+ Resolve HuggingFace Space repo ID to Space URL.
159
+
160
+ Args:
161
+ repo_id: HuggingFace repo ID (e.g., "wukaixingxp/coding-env-test")
162
+
163
+ Returns:
164
+ Space URL (e.g., "https://wukaixingxp-coding-env-test.hf.space")
165
+
166
+ Examples:
167
+ >>> AutoEnv._resolve_space_url("wukaixingxp/coding-env-test")
168
+ 'https://wukaixingxp-coding-env-test.hf.space'
169
+ """
170
+ # Clean up repo_id if it's a full URL
171
+ if "huggingface.co" in repo_id:
172
+ # Extract org/repo from URL
173
+ # https://huggingface.co/wukaixingxp/coding-env-test -> wukaixingxp/coding-env-test
174
+ parts = repo_id.split("/")
175
+ if len(parts) >= 2:
176
+ repo_id = f"{parts[-2]}/{parts[-1]}"
177
+
178
+ # Convert user/space-name to user-space-name.hf.space
179
+ space_slug = repo_id.replace("/", "-")
180
+ return f"https://{space_slug}.hf.space"
181
+
182
+ @classmethod
183
+ def _is_local_url(cls, url: str) -> bool:
184
+ """
185
+ Check if a URL points to a local server.
186
+
187
+ Args:
188
+ url: URL to check
189
+
190
+ Returns:
191
+ True if URL is localhost or 127.0.0.1, False otherwise
192
+
193
+ Examples:
194
+ >>> AutoEnv._is_local_url("http://localhost:8000")
195
+ True
196
+ >>> AutoEnv._is_local_url("http://127.0.0.1:8000")
197
+ True
198
+ >>> AutoEnv._is_local_url("https://example.com")
199
+ False
200
+ """
201
+ url_lower = url.lower()
202
+ return "localhost" in url_lower or "127.0.0.1" in url_lower
203
+
204
+ @classmethod
205
+ def _check_server_availability(cls, base_url: str, timeout: float = 2.0) -> bool:
206
+ """
207
+ Check if a server at the given URL is running and accessible.
208
+
209
+ Args:
210
+ base_url: Server base URL to check
211
+ timeout: Request timeout in seconds
212
+
213
+ Returns:
214
+ True if server is accessible, False otherwise
215
+
216
+ Examples:
217
+ >>> AutoEnv._check_server_availability("http://localhost:8000")
218
+ True # if server is running
219
+ """
220
+ try:
221
+ # Bypass proxy for localhost to avoid proxy issues
222
+ proxies = None
223
+ if cls._is_local_url(base_url):
224
+ proxies = {"http": None, "https": None}
225
+
226
+ # Try to access the health endpoint
227
+ response = requests.get(
228
+ f"{base_url}/health", timeout=timeout, proxies=proxies
229
+ )
230
+ if response.status_code == 200:
231
+ return True
232
+
233
+ # If health endpoint doesn't exist, try root endpoint
234
+ response = requests.get(base_url, timeout=timeout, proxies=proxies)
235
+ return response.status_code == 200
236
+ except (requests.RequestException, Exception) as e:
237
+ logger.debug(f"Server {base_url} not accessible: {e}")
238
+ return False
239
+
240
+ @classmethod
241
+ def _check_space_availability(cls, space_url: str, timeout: float = 5.0) -> bool:
242
+ """
243
+ Check if HuggingFace Space is running and accessible.
244
+
245
+ Args:
246
+ space_url: Space URL to check
247
+ timeout: Request timeout in seconds
248
+
249
+ Returns:
250
+ True if Space is accessible, False otherwise
251
+
252
+ Examples:
253
+ >>> AutoEnv._check_space_availability("https://wukaixingxp-coding-env-test.hf.space")
254
+ True
255
+ """
256
+ try:
257
+ # Try to access the health endpoint
258
+ response = requests.get(f"{space_url}/health", timeout=timeout)
259
+ if response.status_code == 200:
260
+ return True
261
+
262
+ # If health endpoint doesn't exist, try root endpoint
263
+ response = requests.get(space_url, timeout=timeout)
264
+ return response.status_code == 200
265
+ except (requests.RequestException, Exception) as e:
266
+ logger.debug(f"Space {space_url} not accessible: {e}")
267
+ return False
268
+
269
+ @classmethod
270
+ def _get_hub_git_url(cls, repo_id: str) -> str:
271
+ """
272
+ Get the git URL for a HuggingFace Space.
273
+
274
+ Args:
275
+ repo_id: HuggingFace repo ID (e.g., "wukaixingxp/coding-env-test")
276
+
277
+ Returns:
278
+ Git URL for pip installation (e.g., "git+https://huggingface.co/spaces/wukaixingxp/coding-env-test")
279
+ """
280
+ # Clean up repo_id if it's a full URL
281
+ if "huggingface.co" in repo_id:
282
+ parts = repo_id.split("/")
283
+ if len(parts) >= 2:
284
+ repo_id = f"{parts[-2]}/{parts[-1]}"
285
+
286
+ return f"git+https://huggingface.co/spaces/{repo_id}"
287
+
288
+ @classmethod
289
+ def _install_from_hub(cls, repo_id: str, trust_remote_code: bool = False) -> str:
290
+ """
291
+ Install environment package directly from HuggingFace Hub using git+.
292
+
293
+ This is the preferred method as it avoids downloading the entire repo
294
+ and uses pip/uv's native git support.
295
+
296
+ Args:
297
+ repo_id: HuggingFace repo ID (e.g., "wukaixingxp/coding-env-test")
298
+ trust_remote_code: If True, skip user confirmation
299
+
300
+ Returns:
301
+ Package name that was installed
302
+
303
+ Raises:
304
+ ValueError: If installation fails or user declines
305
+ """
306
+ # Security check - confirm with user before installing remote code
307
+ if not trust_remote_code and not _confirm_remote_install(repo_id):
308
+ raise ValueError(
309
+ "Installation cancelled by user.\n"
310
+ "To allow remote installs without prompting, set OPENENV_TRUST_REMOTE_CODE=1"
311
+ )
312
+
313
+ git_url = cls._get_hub_git_url(repo_id)
314
+ pip_cmd = _get_pip_command()
315
+ pip_name = "uv pip" if pip_cmd[0] == "uv" else "pip"
316
+
317
+ logger.info(f"Installing from HuggingFace Space using {pip_name}: {repo_id}")
318
+ logger.info(f"Command: {' '.join(pip_cmd)} install {git_url}")
319
+
320
+ try:
321
+ result = subprocess.run(
322
+ [*pip_cmd, "install", git_url],
323
+ check=True,
324
+ capture_output=True,
325
+ text=True,
326
+ )
327
+
328
+ # Try to extract package name from pip output
329
+ # Look for "Successfully installed <package-name>-<version>"
330
+ for line in result.stdout.split("\n"):
331
+ if "Successfully installed" in line:
332
+ # Parse package name from the line
333
+ parts = line.replace("Successfully installed", "").strip().split()
334
+ for part in parts:
335
+ if part.startswith("openenv-"):
336
+ # Remove version suffix (e.g., "openenv-coding_env-0.1.0" -> "openenv-coding_env")
337
+ # Check if last segment looks like a version number
338
+ last_segment = part.rsplit("-", 1)[-1]
339
+ if last_segment.replace(".", "").isdigit():
340
+ package_name = "-".join(part.rsplit("-", 1)[:-1])
341
+ else:
342
+ package_name = part
343
+ logger.info(f"Successfully installed: {package_name}")
344
+ return package_name
345
+
346
+ # Fallback: try to determine package name from repo_id
347
+ # Convention: repo name like "coding-env-test" -> package "openenv-coding_env"
348
+ env_name = repo_id.split("/")[-1] # Get repo name from "user/repo"
349
+ env_name = env_name.replace("-", "_")
350
+ if not env_name.endswith("_env"):
351
+ env_name = f"{env_name}_env"
352
+ package_name = f"openenv-{env_name}"
353
+
354
+ logger.info(f"Installed (inferred package name): {package_name}")
355
+ return package_name
356
+
357
+ except subprocess.CalledProcessError as e:
358
+ error_msg = e.stderr or e.stdout or str(e)
359
+ raise ValueError(
360
+ f"Failed to install environment from HuggingFace Space: {repo_id}\n"
361
+ f"Command: {' '.join(pip_cmd)} install {git_url}\n"
362
+ f"Error: {error_msg}\n"
363
+ f"Make sure the repository exists and contains a valid Python package."
364
+ ) from e
365
+
366
+ @classmethod
367
+ def _is_package_installed(cls, package_name: str) -> bool:
368
+ """
369
+ Check if a package is already installed.
370
+
371
+ Args:
372
+ package_name: Package name (e.g., "openenv-coding_env")
373
+
374
+ Returns:
375
+ True if installed, False otherwise
376
+ """
377
+ try:
378
+ import importlib.metadata
379
+
380
+ importlib.metadata.distribution(package_name)
381
+ return True
382
+ except importlib.metadata.PackageNotFoundError:
383
+ return False
384
+
385
+ @classmethod
386
+ def _ensure_package_from_hub(
387
+ cls, name: str, trust_remote_code: bool = False
388
+ ) -> str:
389
+ """
390
+ Ensure package from HuggingFace Hub is installed.
391
+
392
+ Uses git+ URLs for direct installation without downloading the entire repo.
393
+ Prompts user for confirmation before installing remote code.
394
+
395
+ Args:
396
+ name: HuggingFace repo ID (e.g., "wukaixingxp/coding-env-test")
397
+ trust_remote_code: If True, skip user confirmation
398
+
399
+ Returns:
400
+ Environment name (e.g., "coding_env")
401
+ """
402
+ global _hub_env_name_cache
403
+
404
+ # Check if we already resolved this repo ID
405
+ if name in _hub_env_name_cache:
406
+ env_name = _hub_env_name_cache[name]
407
+ logger.debug(f"Using cached env name for {name}: {env_name}")
408
+ return env_name
409
+
410
+ # Try to infer expected package name from repo ID
411
+ # Convention: repo "user/coding-env" -> package "openenv-coding_env"
412
+ repo_name = name.split("/")[-1] if "/" in name else name
413
+ expected_env_name = repo_name.replace("-", "_")
414
+ if not expected_env_name.endswith("_env"):
415
+ expected_env_name = f"{expected_env_name}_env"
416
+ expected_package_name = f"openenv-{expected_env_name}"
417
+
418
+ # Check if already installed
419
+ if cls._is_package_installed(expected_package_name):
420
+ logger.info(f"Package already installed: {expected_package_name}")
421
+ # Clear and refresh discovery cache to make sure it's detected
422
+ get_discovery().clear_cache()
423
+ get_discovery().discover(use_cache=False)
424
+ # Cache the result
425
+ _hub_env_name_cache[name] = expected_env_name
426
+ return expected_env_name
427
+
428
+ # Not installed, install using git+ URL
429
+ logger.info(f"Package not found locally, installing from Hub: {name}")
430
+
431
+ # Track existing packages before installation
432
+ get_discovery().clear_cache()
433
+ existing_envs = set(get_discovery().discover(use_cache=False).keys())
434
+
435
+ # Install the package
436
+ cls._install_from_hub(name, trust_remote_code=trust_remote_code)
437
+
438
+ # Clear discovery cache to pick up the newly installed package
439
+ try:
440
+ importlib.invalidate_caches()
441
+ except Exception:
442
+ pass
443
+ get_discovery().clear_cache()
444
+ discovered_envs = get_discovery().discover(use_cache=False)
445
+
446
+ # Find the newly installed environment by comparing before/after
447
+ new_envs = set(discovered_envs.keys()) - existing_envs
448
+
449
+ if new_envs:
450
+ # Use the first newly discovered environment
451
+ env_name = next(iter(new_envs))
452
+ logger.info(f"Found newly installed environment: '{env_name}'")
453
+ else:
454
+ # Fallback: try to find by matching module patterns
455
+ # Look for any env that might match the repo name pattern
456
+ repo_name = name.split("/")[-1] if "/" in name else name
457
+ repo_base = (
458
+ repo_name.replace("-", "_").replace("_env", "").replace("_test", "")
459
+ )
460
+
461
+ env_name = None
462
+ for env_key, env_info in discovered_envs.items():
463
+ # Check if env_key is a prefix/substring match
464
+ if env_key in repo_base or repo_base.startswith(env_key):
465
+ env_name = env_key
466
+ logger.info(
467
+ f"Found matching environment '{env_name}' for repo '{name}'"
468
+ )
469
+ break
470
+
471
+ if env_name is None:
472
+ # Last resort: use inferred name from repo
473
+ env_name = repo_name.replace("-", "_")
474
+ if not env_name.endswith("_env"):
475
+ env_name = f"{env_name}_env"
476
+ # Strip to get env_key
477
+ env_name = env_name.replace("_env", "")
478
+ logger.warning(
479
+ f"Could not find newly installed environment for repo '{name}', "
480
+ f"using inferred name: {env_name}"
481
+ )
482
+
483
+ # Cache the result to avoid redundant installs
484
+ _hub_env_name_cache[name] = env_name
485
+
486
+ return env_name
487
+
488
+ @classmethod
489
+ def from_env(
490
+ cls,
491
+ name: str,
492
+ base_url: Optional[str] = None,
493
+ docker_image: Optional[str] = None,
494
+ container_provider: Optional[ContainerProvider] = None,
495
+ wait_timeout: float = 30.0,
496
+ env_vars: Optional[Dict[str, str]] = None,
497
+ trust_remote_code: bool = False,
498
+ skip_install: bool = False,
499
+ **kwargs: Any,
500
+ ) -> "EnvClient":
501
+ """
502
+ Create an environment client from a name or HuggingFace Hub repository.
503
+
504
+ This method automatically:
505
+ 1. Checks if the name is a HuggingFace Hub URL/repo ID
506
+ 2. If Hub: installs the environment package using git+ URL
507
+ 3. If local: looks up the installed openenv-* package
508
+ 4. Imports the client class and instantiates it
509
+
510
+ Args:
511
+ name: Environment name or HuggingFace Hub repo ID
512
+ Examples:
513
+ - "coding" / "coding-env" / "coding_env"
514
+ - "meta-pytorch/coding-env" (Hub repo ID)
515
+ - "https://huggingface.co/meta-pytorch/coding-env" (Hub URL)
516
+ base_url: Optional base URL for HTTP connection
517
+ docker_image: Optional Docker image name (overrides default)
518
+ container_provider: Optional container provider
519
+ wait_timeout: Timeout for container startup (seconds)
520
+ env_vars: Optional environment variables for the container
521
+ trust_remote_code: If True, skip user confirmation when installing
522
+ from HuggingFace Hub. Can also be set via OPENENV_TRUST_REMOTE_CODE
523
+ environment variable.
524
+ skip_install: If True, skip package installation and return a
525
+ GenericEnvClient for remote environments. Useful when you only
526
+ want to connect to a running server without installing any
527
+ remote code. When True:
528
+ - If base_url is provided: connects directly using GenericEnvClient
529
+ - If HF Space is running: connects to Space using GenericEnvClient
530
+ - If HF Space is not running: uses Docker from HF registry
531
+ **kwargs: Additional arguments passed to the client class
532
+
533
+ Returns:
534
+ Instance of the environment client class
535
+
536
+ Raises:
537
+ ValueError: If environment not found or cannot be loaded
538
+ ImportError: If environment package is not installed
539
+
540
+ Examples:
541
+ >>> # From installed package
542
+ >>> env = AutoEnv.from_env("coding-env")
543
+ >>>
544
+ >>> # From HuggingFace Hub
545
+ >>> env = AutoEnv.from_env("meta-pytorch/coding-env")
546
+ >>>
547
+ >>> # With custom Docker image
548
+ >>> env = AutoEnv.from_env("coding", docker_image="my-coding-env:v2")
549
+ >>>
550
+ >>> # With environment variables
551
+ >>> env = AutoEnv.from_env(
552
+ ... "dipg",
553
+ ... env_vars={"DIPG_DATASET_PATH": "/data/dipg"}
554
+ ... )
555
+ >>>
556
+ >>> # Skip package installation, use GenericEnvClient
557
+ >>> env = AutoEnv.from_env(
558
+ ... "user/my-env",
559
+ ... skip_install=True
560
+ ... )
561
+ """
562
+ from openenv.core import GenericEnvClient
563
+
564
+ # Handle skip_install mode - return GenericEnvClient without package installation
565
+ if skip_install:
566
+ # If base_url is provided, connect directly
567
+ if base_url:
568
+ if cls._check_server_availability(base_url):
569
+ logger.info(
570
+ f"Using GenericEnvClient for {base_url} (skip_install=True)"
571
+ )
572
+ return GenericEnvClient(base_url=base_url, **kwargs)
573
+ else:
574
+ raise ConnectionError(
575
+ f"Server not available at {base_url}. "
576
+ f"Please ensure the server is running."
577
+ )
578
+
579
+ # If it's a Hub URL, try to connect to Space or use Docker
580
+ if _is_hub_url(name):
581
+ space_url = cls._resolve_space_url(name)
582
+ logger.info(f"Checking if HuggingFace Space is accessible: {space_url}")
583
+
584
+ if cls._check_space_availability(space_url):
585
+ logger.info(
586
+ f"Using GenericEnvClient for Space {space_url} (skip_install=True)"
587
+ )
588
+ return GenericEnvClient(base_url=space_url, **kwargs)
589
+ else:
590
+ # Space not running, use Docker from HF registry
591
+ logger.info(
592
+ f"Space not running at {space_url}, "
593
+ f"using GenericEnvClient with HF Docker registry"
594
+ )
595
+ return run_async_safely(
596
+ GenericEnvClient.from_env(
597
+ name,
598
+ use_docker=True,
599
+ provider=container_provider,
600
+ env_vars=env_vars or {},
601
+ **kwargs,
602
+ )
603
+ )
604
+
605
+ # For local environments with skip_install, we need docker_image
606
+ if docker_image:
607
+ logger.info(
608
+ f"Using GenericEnvClient with Docker image {docker_image} "
609
+ f"(skip_install=True)"
610
+ )
611
+ return run_async_safely(
612
+ GenericEnvClient.from_docker_image(
613
+ image=docker_image,
614
+ provider=container_provider,
615
+ wait_timeout=wait_timeout,
616
+ env_vars=env_vars or {},
617
+ **kwargs,
618
+ )
619
+ )
620
+ else:
621
+ raise ValueError(
622
+ f"Cannot use skip_install=True for local environment '{name}' "
623
+ f"without providing base_url or docker_image. "
624
+ f"For local environments, either:\n"
625
+ f" 1. Provide base_url to connect to a running server\n"
626
+ f" 2. Provide docker_image to start a container\n"
627
+ f" 3. Set skip_install=False to use the installed package"
628
+ )
629
+
630
+ # Check if it's a HuggingFace Hub URL or repo ID
631
+ if _is_hub_url(name):
632
+ # Try to connect to Space directly first
633
+ space_url = cls._resolve_space_url(name)
634
+ logger.info(f"Checking if HuggingFace Space is accessible: {space_url}")
635
+
636
+ space_is_available = cls._check_space_availability(space_url)
637
+
638
+ if space_is_available and base_url is None:
639
+ # Space is accessible! We'll connect directly without Docker
640
+ logger.info(f"Space is accessible at: {space_url}")
641
+ logger.info("Installing package for client code (no Docker needed)...")
642
+
643
+ # Ensure package is installed (uses git+ URL)
644
+ env_name = cls._ensure_package_from_hub(
645
+ name, trust_remote_code=trust_remote_code
646
+ )
647
+
648
+ # Set base_url to connect to remote Space
649
+ base_url = space_url
650
+ logger.info("Will connect to remote Space (no local Docker)")
651
+ else:
652
+ # Space not accessible or user provided explicit base_url
653
+ if not space_is_available:
654
+ logger.info(f"Space not accessible at {space_url}")
655
+ logger.info("Falling back to local Docker mode...")
656
+
657
+ # Ensure package is installed (uses git+ URL)
658
+ env_name = cls._ensure_package_from_hub(
659
+ name, trust_remote_code=trust_remote_code
660
+ )
661
+ else:
662
+ env_name = name
663
+
664
+ # Get environment info from discovery
665
+ discovery = get_discovery()
666
+ env_info = discovery.get_environment_by_name(env_name)
667
+
668
+ if not env_info:
669
+ # Environment not found - provide helpful error message
670
+ available_envs = discovery.discover()
671
+
672
+ if not available_envs:
673
+ raise ValueError(
674
+ "No OpenEnv environments found.\n"
675
+ "Install an environment with: pip install openenv-<env-name>\n"
676
+ "Or specify a HuggingFace Hub repository: AutoEnv.from_env('openenv/echo_env')"
677
+ )
678
+
679
+ # Try to suggest similar environment names
680
+ from difflib import get_close_matches
681
+
682
+ env_keys = list(available_envs.keys())
683
+ suggestions = get_close_matches(env_name, env_keys, n=3, cutoff=0.6)
684
+
685
+ error_msg = f"Unknown environment '{env_name}'.\n"
686
+ if suggestions:
687
+ error_msg += f"Did you mean: {', '.join(suggestions)}?\n"
688
+ error_msg += f"Available environments: {', '.join(sorted(env_keys))}"
689
+
690
+ raise ValueError(error_msg)
691
+
692
+ # Get the client class
693
+ try:
694
+ client_class = env_info.get_client_class()
695
+ except ImportError as e:
696
+ raise ImportError(
697
+ f"Failed to import environment client for '{env_name}'.\n"
698
+ f"Package '{env_info.package_name}' appears to be installed but the module cannot be imported.\n"
699
+ f"Try reinstalling: pip install --force-reinstall {env_info.package_name}\n"
700
+ f"Original error: {e}"
701
+ ) from e
702
+
703
+ # Determine Docker image to use
704
+ if docker_image is None:
705
+ docker_image = env_info.default_image
706
+
707
+ # Create client instance
708
+ try:
709
+ if base_url:
710
+ # Check if the server at base_url is available
711
+ is_local = cls._is_local_url(base_url)
712
+ server_available = cls._check_server_availability(base_url)
713
+
714
+ if server_available:
715
+ # Server is running, connect directly
716
+ logger.info(
717
+ f"✅ Server available at {base_url}, connecting directly"
718
+ )
719
+ return client_class(base_url=base_url, provider=None, **kwargs)
720
+ elif is_local:
721
+ # Local server not running, auto-start Docker container
722
+ logger.info(f"❌ Server not available at {base_url}")
723
+ logger.info(f"🐳 Auto-starting Docker container: {docker_image}")
724
+ return run_async_safely(
725
+ client_class.from_docker_image(
726
+ image=docker_image,
727
+ provider=container_provider,
728
+ wait_timeout=wait_timeout,
729
+ env_vars=env_vars or {},
730
+ **kwargs,
731
+ )
732
+ )
733
+ else:
734
+ # Remote server not available, cannot auto-start
735
+ raise ConnectionError(
736
+ f"Remote server not available at {base_url}. "
737
+ f"Please ensure the server is running."
738
+ )
739
+ else:
740
+ # No base_url provided, start new Docker container
741
+ return run_async_safely(
742
+ client_class.from_docker_image(
743
+ image=docker_image,
744
+ provider=container_provider,
745
+ wait_timeout=wait_timeout,
746
+ env_vars=env_vars or {},
747
+ **kwargs,
748
+ )
749
+ )
750
+ except Exception as e:
751
+ raise ValueError(
752
+ f"Failed to create environment client for '{env_name}'.\n"
753
+ f"Client class: {client_class.__name__}\n"
754
+ f"Docker image: {docker_image}\n"
755
+ f"Error: {e}"
756
+ ) from e
757
+
758
+ @classmethod
759
+ def from_hub(
760
+ cls,
761
+ name: str,
762
+ base_url: Optional[str] = None,
763
+ docker_image: Optional[str] = None,
764
+ container_provider: Optional["ContainerProvider"] = None,
765
+ wait_timeout: float = 30.0,
766
+ env_vars: Optional[Dict[str, str]] = None,
767
+ trust_remote_code: bool = False,
768
+ skip_install: bool = False,
769
+ **kwargs: Any,
770
+ ) -> "EnvClient":
771
+ """
772
+ Create an environment client from a name or HuggingFace Hub repository.
773
+
774
+ This is an alias for from_env() for backward compatibility.
775
+
776
+ Args:
777
+ name: Environment name or HuggingFace Hub repo ID
778
+ base_url: Optional base URL for HTTP connection
779
+ docker_image: Optional Docker image name (overrides default)
780
+ container_provider: Optional container provider
781
+ wait_timeout: Timeout for container startup (seconds)
782
+ env_vars: Optional environment variables for the container
783
+ trust_remote_code: If True, skip user confirmation when installing
784
+ from HuggingFace Hub
785
+ skip_install: If True, skip package installation and return a
786
+ GenericEnvClient for remote environments
787
+ **kwargs: Additional arguments passed to the client class
788
+
789
+ Returns:
790
+ Instance of the environment client class
791
+
792
+ Examples:
793
+ >>> env = AutoEnv.from_hub("coding-env")
794
+ >>> env = AutoEnv.from_hub("meta-pytorch/coding-env")
795
+ """
796
+ return cls.from_env(
797
+ name=name,
798
+ base_url=base_url,
799
+ docker_image=docker_image,
800
+ container_provider=container_provider,
801
+ wait_timeout=wait_timeout,
802
+ env_vars=env_vars,
803
+ trust_remote_code=trust_remote_code,
804
+ skip_install=skip_install,
805
+ **kwargs,
806
+ )
807
+
808
+ @classmethod
809
+ def get_env_class(cls, name: str):
810
+ """
811
+ Get the environment client class without instantiating it.
812
+
813
+ Args:
814
+ name: Environment name
815
+
816
+ Returns:
817
+ The environment client class
818
+
819
+ Raises:
820
+ ValueError: If environment not found
821
+
822
+ Examples:
823
+ >>> CodingEnv = AutoEnv.get_env_class("coding")
824
+ >>> # Now you can instantiate it yourself
825
+ >>> env = CodingEnv(base_url="http://localhost:8000")
826
+ """
827
+ discovery = get_discovery()
828
+ env_info = discovery.get_environment_by_name(name)
829
+
830
+ if not env_info:
831
+ raise ValueError(f"Unknown environment: {name}")
832
+
833
+ return env_info.get_client_class()
834
+
835
+ @classmethod
836
+ def get_env_info(cls, name: str) -> Dict[str, Any]:
837
+ """
838
+ Get detailed information about an environment.
839
+
840
+ Args:
841
+ name: Environment name
842
+
843
+ Returns:
844
+ Dictionary with environment metadata
845
+
846
+ Raises:
847
+ ValueError: If environment not found
848
+
849
+ Examples:
850
+ >>> info = AutoEnv.get_env_info("coding")
851
+ >>> print(info['description'])
852
+ 'Coding environment for OpenEnv'
853
+ >>> print(info['default_image'])
854
+ 'coding-env:latest'
855
+ """
856
+ discovery = get_discovery()
857
+ env_info = discovery.get_environment_by_name(name)
858
+
859
+ if not env_info:
860
+ raise ValueError(f"Unknown environment: {name}")
861
+
862
+ return {
863
+ "env_key": env_info.env_key,
864
+ "name": env_info.name,
865
+ "package": env_info.package_name,
866
+ "version": env_info.version,
867
+ "description": env_info.description,
868
+ "env_class": env_info.client_class_name,
869
+ "action_class": env_info.action_class_name,
870
+ "observation_class": env_info.observation_class_name,
871
+ "module": env_info.client_module_path,
872
+ "default_image": env_info.default_image,
873
+ "spec_version": env_info.spec_version,
874
+ }
875
+
876
+ @classmethod
877
+ def list_environments(cls) -> None:
878
+ """
879
+ Print a formatted list of all available environments.
880
+
881
+ This discovers all installed openenv-* packages and displays
882
+ their metadata in a user-friendly format.
883
+
884
+ Examples:
885
+ >>> AutoEnv.list_environments()
886
+ Available OpenEnv Environments:
887
+ ----------------------------------------------------------------------
888
+ echo : Echo Environment (v0.1.0)
889
+ Package: openenv-echo-env
890
+ coding : Coding Environment (v0.1.0)
891
+ Package: openenv-coding_env
892
+ ----------------------------------------------------------------------
893
+ Total: 2 environments
894
+ """
895
+ discovery = get_discovery()
896
+ discovery.list_environments()
src/core/openenv/cli/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """OpenEnv CLI package."""
8
+
9
+ __version__ = "0.1.0"
src/core/openenv/cli/__main__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ OpenEnv CLI entry point.
9
+
10
+ This module provides the main entry point for the OpenEnv command-line interface,
11
+ following the Hugging Face CLI pattern.
12
+ """
13
+
14
+ import sys
15
+
16
+ import typer
17
+
18
+ from openenv.cli.commands import build, fork, init, push, serve, validate
19
+
20
+ # Create the main CLI app
21
+ app = typer.Typer(
22
+ name="openenv",
23
+ help="OpenEnv - An e2e framework for creating, deploying and using isolated execution environments for agentic RL training",
24
+ no_args_is_help=True,
25
+ )
26
+
27
+ # Register commands
28
+ app.command(name="init", help="Initialize a new OpenEnv environment")(init.init)
29
+ app.command(name="build", help="Build Docker images for OpenEnv environments")(
30
+ build.build
31
+ )
32
+ app.command(
33
+ name="validate", help="Validate environment structure and deployment readiness"
34
+ )(validate.validate)
35
+ app.command(
36
+ name="push",
37
+ help="Push an OpenEnv environment to Hugging Face Spaces or custom registry",
38
+ )(push.push)
39
+ app.command(name="serve", help="Serve environments locally (TODO: Phase 4)")(
40
+ serve.serve
41
+ )
42
+ app.command(
43
+ name="fork",
44
+ help="Fork (duplicate) a Hugging Face Space to your account",
45
+ )(fork.fork)
46
+
47
+
48
+ # Entry point for setuptools
49
+ def main() -> None:
50
+ """Main entry point for the CLI."""
51
+ try:
52
+ app()
53
+ except KeyboardInterrupt:
54
+ print("\nOperation cancelled by user.")
55
+ sys.exit(130)
56
+ except Exception as e:
57
+ print(f"Error: {e}", file=sys.stderr)
58
+ sys.exit(1)
59
+
60
+
61
+ if __name__ == "__main__":
62
+ main()
src/core/openenv/cli/_cli_utils.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """CLI utilities for OpenEnv command-line interface."""
8
+
9
+ from pathlib import Path
10
+ from typing import List
11
+
12
+ from rich.console import Console
13
+
14
+ # Create a console instance for CLI output
15
+ console = Console()
16
+
17
+
18
+ def validate_env_structure(env_dir: Path, strict: bool = False) -> List[str]:
19
+ """
20
+ Validate that the directory follows OpenEnv environment structure.
21
+
22
+ Args:
23
+ env_dir: Path to environment directory
24
+ strict: If True, enforce all optional requirements
25
+
26
+ Returns:
27
+ List of validation warnings (empty if all checks pass)
28
+
29
+ Raises:
30
+ FileNotFoundError: If required files are missing
31
+ """
32
+ warnings = []
33
+
34
+ # Required files
35
+ required_files = [
36
+ "openenv.yaml",
37
+ "__init__.py",
38
+ "client.py",
39
+ "models.py",
40
+ "README.md",
41
+ ]
42
+
43
+ for file in required_files:
44
+ if not (env_dir / file).exists():
45
+ raise FileNotFoundError(f"Required file missing: {file}")
46
+
47
+ # Dockerfile: must exist in server/ or at env root
48
+ has_root_dockerfile = (env_dir / "Dockerfile").exists()
49
+ has_server_dockerfile = (env_dir / "server" / "Dockerfile").exists()
50
+
51
+ if not has_root_dockerfile and not has_server_dockerfile:
52
+ raise FileNotFoundError(
53
+ "Required file missing: server/Dockerfile or Dockerfile at env root"
54
+ )
55
+
56
+ # When no root Dockerfile, require the traditional server/ layout
57
+ if not has_root_dockerfile:
58
+ server_dir = env_dir / "server"
59
+ if not server_dir.exists() or not server_dir.is_dir():
60
+ raise FileNotFoundError("Required directory missing: server/")
61
+
62
+ for file in ["server/__init__.py", "server/app.py"]:
63
+ if not (env_dir / file).exists():
64
+ raise FileNotFoundError(f"Required file missing: {file}")
65
+
66
+ # Check for dependency management (pyproject.toml required)
67
+ has_pyproject = (env_dir / "pyproject.toml").exists()
68
+
69
+ if not has_pyproject:
70
+ raise FileNotFoundError(
71
+ "No dependency specification found. 'pyproject.toml' is required."
72
+ )
73
+
74
+ # Warnings for recommended structure
75
+
76
+ if not (env_dir / "outputs").exists():
77
+ warnings.append("Recommended directory missing: outputs/")
78
+
79
+ return warnings
src/core/openenv/cli/_validation.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Validation utilities for multi-mode deployment readiness.
9
+
10
+ This module provides functions to check if environments are properly
11
+ configured for multi-mode deployment (Docker, direct Python, notebooks, clusters).
12
+ """
13
+
14
+ import subprocess
15
+ from pathlib import Path
16
+
17
+ try:
18
+ import tomllib
19
+ except ModuleNotFoundError:
20
+ import tomli as tomllib
21
+
22
+
23
+ def validate_multi_mode_deployment(env_path: Path) -> tuple[bool, list[str]]:
24
+ """
25
+ Validate that an environment is ready for multi-mode deployment.
26
+
27
+ Checks:
28
+ 1. pyproject.toml exists
29
+ 2. uv.lock exists and is up-to-date
30
+ 3. pyproject.toml has [project.scripts] with server entry point
31
+ 4. server/app.py has a main() function
32
+ 5. Required dependencies are present
33
+
34
+ Returns:
35
+ Tuple of (is_valid, list of issues found)
36
+ """
37
+ issues = []
38
+
39
+ # Check pyproject.toml exists
40
+ pyproject_path = env_path / "pyproject.toml"
41
+ if not pyproject_path.exists():
42
+ issues.append("Missing pyproject.toml")
43
+ return False, issues
44
+
45
+ # Check uv.lock exists
46
+ lockfile_path = env_path / "uv.lock"
47
+ if not lockfile_path.exists():
48
+ issues.append("Missing uv.lock - run 'uv lock' to generate it")
49
+ else:
50
+ # Check if uv.lock is up-to-date (optional, can be expensive)
51
+ # We can add a check using `uv lock --check` if needed
52
+ try:
53
+ result = subprocess.run(
54
+ ["uv", "lock", "--check", "--directory", str(env_path)],
55
+ capture_output=True,
56
+ text=True,
57
+ timeout=5,
58
+ )
59
+ if result.returncode != 0:
60
+ issues.append(
61
+ "uv.lock is out of date with pyproject.toml - run 'uv lock' to update"
62
+ )
63
+ except (subprocess.TimeoutExpired, FileNotFoundError):
64
+ # If uv is not available or times out, skip this check
65
+ pass
66
+
67
+ # Parse pyproject.toml
68
+ try:
69
+ with open(pyproject_path, "rb") as f:
70
+ pyproject = tomllib.load(f)
71
+ except Exception as e:
72
+ issues.append(f"Failed to parse pyproject.toml: {e}")
73
+ return False, issues
74
+
75
+ # Check [project.scripts] section
76
+ scripts = pyproject.get("project", {}).get("scripts", {})
77
+ if "server" not in scripts:
78
+ issues.append("Missing [project.scripts] server entry point")
79
+
80
+ # Check server entry point format
81
+ server_entry = scripts.get("server", "")
82
+ if server_entry and ":main" not in server_entry:
83
+ issues.append(
84
+ f"Server entry point should reference main function, got: {server_entry}"
85
+ )
86
+
87
+ # Check required dependencies
88
+ deps = [dep.lower() for dep in pyproject.get("project", {}).get("dependencies", [])]
89
+ has_openenv = any(
90
+ dep.startswith("openenv") and not dep.startswith("openenv-core") for dep in deps
91
+ )
92
+ has_legacy_core = any(dep.startswith("openenv-core") for dep in deps)
93
+
94
+ if not (has_openenv or has_legacy_core):
95
+ issues.append(
96
+ "Missing required dependency: openenv-core>=0.2.0 (or openenv>=0.2.0)"
97
+ )
98
+
99
+ # Check server/app.py exists
100
+ server_app = env_path / "server" / "app.py"
101
+ if not server_app.exists():
102
+ issues.append("Missing server/app.py")
103
+ else:
104
+ # Check for main() function (flexible - with or without parameters)
105
+ app_content = server_app.read_text(encoding="utf-8")
106
+ if "def main(" not in app_content:
107
+ issues.append("server/app.py missing main() function")
108
+
109
+ # Check if main() is callable
110
+ if "__name__" not in app_content or "main()" not in app_content:
111
+ issues.append(
112
+ "server/app.py main() function not callable (missing if __name__ == '__main__')"
113
+ )
114
+
115
+ return len(issues) == 0, issues
116
+
117
+
118
+ def get_deployment_modes(env_path: Path) -> dict[str, bool]:
119
+ """
120
+ Check which deployment modes are supported by the environment.
121
+
122
+ Returns:
123
+ Dictionary with deployment mode names and whether they're supported
124
+ """
125
+ modes = {
126
+ "docker": False,
127
+ "openenv_serve": False,
128
+ "uv_run": False,
129
+ "python_module": False,
130
+ }
131
+
132
+ # Check Docker (Dockerfile may be in server/ or at env root)
133
+ modes["docker"] = (env_path / "server" / "Dockerfile").exists() or (
134
+ env_path / "Dockerfile"
135
+ ).exists()
136
+
137
+ # Check multi-mode deployment readiness
138
+ is_valid, _ = validate_multi_mode_deployment(env_path)
139
+ if is_valid:
140
+ modes["openenv_serve"] = True
141
+ modes["uv_run"] = True
142
+ modes["python_module"] = True
143
+
144
+ return modes
145
+
146
+
147
+ def format_validation_report(env_name: str, is_valid: bool, issues: list[str]) -> str:
148
+ """
149
+ Format a validation report for display.
150
+
151
+ Returns:
152
+ Formatted report string
153
+ """
154
+ if is_valid:
155
+ return f"[OK] {env_name}: Ready for multi-mode deployment"
156
+
157
+ report = [f"[FAIL] {env_name}: Not ready for multi-mode deployment", ""]
158
+ report.append("Issues found:")
159
+ for issue in issues:
160
+ report.append(f" - {issue}")
161
+
162
+ return "\n".join(report)
src/core/openenv/cli/commands/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """OpenEnv CLI commands."""
8
+
9
+ from . import build, fork, init, push, serve, validate
10
+
11
+ __all__ = ["build", "fork", "init", "push", "serve", "validate"]
src/core/openenv/cli/commands/build.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Build Docker images for OpenEnv environments."""
8
+
9
+ from __future__ import annotations
10
+
11
+ import shutil
12
+ import subprocess
13
+ import tempfile
14
+ import sys
15
+ from pathlib import Path
16
+ from typing import Annotated
17
+
18
+ import typer
19
+
20
+ from .._cli_utils import console
21
+
22
+ app = typer.Typer(help="Build Docker images for OpenEnv environments")
23
+
24
+
25
+ def _detect_build_context(env_path: Path) -> tuple[str, Path, Path | None]:
26
+ """
27
+ Detect whether we're building a standalone or in-repo environment.
28
+
29
+ Returns:
30
+ tuple: (build_mode, build_context_path, repo_root)
31
+ - build_mode: "standalone" or "in-repo"
32
+ - build_context_path: Path to use as Docker build context
33
+ - repo_root: Path to repo root (None for standalone)
34
+ """
35
+ # Ensure env_path is absolute for proper comparison
36
+ env_path = env_path.absolute()
37
+
38
+ # Check if we're in a git repository
39
+ current = env_path
40
+ repo_root = None
41
+
42
+ # Walk up to find .git directory
43
+ for parent in [current] + list(current.parents):
44
+ if (parent / ".git").exists():
45
+ repo_root = parent
46
+ break
47
+
48
+ if repo_root is None:
49
+ # Not in a git repo = standalone
50
+ return "standalone", env_path, None
51
+
52
+ # Check if environment is under envs/ (in-repo pattern)
53
+ try:
54
+ rel_path = env_path.relative_to(repo_root)
55
+ rel_str = str(rel_path)
56
+ if (
57
+ rel_str.startswith("envs/")
58
+ or rel_str.startswith("envs\\")
59
+ or rel_str.startswith("envs/")
60
+ ):
61
+ # In-repo environment
62
+ return "in-repo", repo_root, repo_root
63
+ except ValueError:
64
+ pass
65
+
66
+ # Otherwise, it's standalone (environment outside repo structure)
67
+ return "standalone", env_path, None
68
+
69
+
70
+ def _prepare_standalone_build(env_path: Path, temp_dir: Path) -> Path:
71
+ """
72
+ Prepare a standalone environment for building.
73
+
74
+ For standalone builds:
75
+ 1. Copy environment to temp directory
76
+ 2. Ensure pyproject.toml depends on openenv
77
+
78
+ Returns:
79
+ Path to the prepared build directory
80
+ """
81
+ console.print("[cyan]Preparing standalone build...[/cyan]")
82
+
83
+ # Copy environment to temp directory
84
+ build_dir = temp_dir / env_path.name
85
+ shutil.copytree(env_path, build_dir, symlinks=True)
86
+
87
+ console.print(f"[cyan]Copied environment to:[/cyan] {build_dir}")
88
+
89
+ # Check if pyproject.toml has openenv dependency
90
+ pyproject_path = build_dir / "pyproject.toml"
91
+ if pyproject_path.exists():
92
+ with open(pyproject_path, "rb") as f:
93
+ try:
94
+ import tomli
95
+
96
+ pyproject = tomli.load(f)
97
+ deps = pyproject.get("project", {}).get("dependencies", [])
98
+
99
+ # Check if openenv dependency is declared
100
+ has_openenv = any(dep.startswith("openenv") for dep in deps)
101
+
102
+ if not has_openenv:
103
+ console.print(
104
+ "[yellow]Warning:[/yellow] pyproject.toml doesn't list the openenv dependency",
105
+ )
106
+ console.print(
107
+ "[yellow]You may need to add:[/yellow] openenv>=0.2.0",
108
+ )
109
+ except ImportError:
110
+ console.print(
111
+ "[yellow]Warning:[/yellow] tomli not available, skipping dependency check",
112
+ )
113
+
114
+ return build_dir
115
+
116
+
117
+ def _prepare_inrepo_build(env_path: Path, repo_root: Path, temp_dir: Path) -> Path:
118
+ """
119
+ Prepare an in-repo environment for building.
120
+
121
+ For in-repo builds:
122
+ 1. Create temp directory with environment and core
123
+ 2. Set up structure that matches expected layout
124
+
125
+ Returns:
126
+ Path to the prepared build directory
127
+ """
128
+ console.print("[cyan]Preparing in-repo build...[/cyan]")
129
+
130
+ # Copy environment to temp directory
131
+ build_dir = temp_dir / env_path.name
132
+ shutil.copytree(env_path, build_dir, symlinks=True)
133
+
134
+ # Copy OpenEnv package metadata + sources to temp directory.
135
+ # Keep the src/ layout since pyproject.toml uses package-dir = {"" = "src"}.
136
+ package_src = repo_root / "src" / "openenv"
137
+ package_dest = build_dir / "openenv"
138
+ if package_src.exists():
139
+ package_dest.mkdir(parents=True, exist_ok=True)
140
+ shutil.copytree(package_src, package_dest / "src" / "openenv", symlinks=True)
141
+
142
+ for filename in ("pyproject.toml", "README.md"):
143
+ src_file = repo_root / filename
144
+ if src_file.exists():
145
+ shutil.copy2(src_file, package_dest / filename)
146
+
147
+ console.print(f"[cyan]Copied OpenEnv package to:[/cyan] {package_dest}")
148
+
149
+ # Update pyproject.toml to reference local OpenEnv copy
150
+ pyproject_path = build_dir / "pyproject.toml"
151
+ if pyproject_path.exists():
152
+ with open(pyproject_path, "rb") as f:
153
+ try:
154
+ import tomli
155
+
156
+ pyproject = tomli.load(f)
157
+ deps = pyproject.get("project", {}).get("dependencies", [])
158
+
159
+ # Replace openenv/openenv-core with local reference
160
+ new_deps = []
161
+ for dep in deps:
162
+ if (
163
+ dep.startswith("openenv-core")
164
+ or dep.startswith("openenv_core")
165
+ or dep.startswith("openenv")
166
+ ):
167
+ # Skip - we'll use local core
168
+ continue
169
+ new_deps.append(dep)
170
+
171
+ # Write back with local core reference
172
+ pyproject["project"]["dependencies"] = new_deps + [
173
+ "openenv-core @ file:///app/env/openenv"
174
+ ]
175
+
176
+ # Write updated pyproject.toml
177
+ with open(pyproject_path, "wb") as out_f:
178
+ import tomli_w
179
+
180
+ tomli_w.dump(pyproject, out_f)
181
+
182
+ console.print(
183
+ "[cyan]Updated pyproject.toml to use local core[/cyan]"
184
+ )
185
+
186
+ # Remove old lockfile since dependencies changed
187
+ lockfile = build_dir / "uv.lock"
188
+ if lockfile.exists():
189
+ lockfile.unlink()
190
+ console.print("[cyan]Removed outdated uv.lock[/cyan]")
191
+
192
+ except ImportError:
193
+ console.print(
194
+ "[yellow]Warning:[/yellow] tomli/tomli_w not available, using pyproject.toml as-is",
195
+ )
196
+ else:
197
+ console.print(
198
+ "[yellow]Warning:[/yellow] OpenEnv package not found, building without it"
199
+ )
200
+
201
+ console.print(f"[cyan]Build directory prepared:[/cyan] {build_dir}")
202
+ return build_dir
203
+
204
+
205
+ def _run_command(
206
+ cmd: list[str],
207
+ cwd: Path | None = None,
208
+ check: bool = True,
209
+ ) -> subprocess.CompletedProcess:
210
+ """Run a shell command and handle errors."""
211
+ console.print(f"[bold cyan]Running:[/bold cyan] {' '.join(cmd)}")
212
+ try:
213
+ result = subprocess.run(
214
+ cmd, cwd=cwd, check=check, capture_output=True, text=True
215
+ )
216
+ if result.stdout:
217
+ console.print(result.stdout)
218
+ if result.stderr:
219
+ print(result.stderr, file=sys.stderr)
220
+ return result
221
+ except subprocess.CalledProcessError as e:
222
+ print(f"Error running command: {e}", file=sys.stderr)
223
+ if e.stdout:
224
+ console.print(e.stdout)
225
+ if e.stderr:
226
+ print(e.stderr, file=sys.stderr)
227
+ if check:
228
+ raise typer.Exit(1) from e
229
+ return e
230
+
231
+
232
+ def _build_docker_image(
233
+ env_path: Path,
234
+ tag: str | None = None,
235
+ context_path: Path | None = None,
236
+ dockerfile: Path | None = None,
237
+ build_args: dict[str, str] | None = None,
238
+ no_cache: bool = False,
239
+ ) -> bool:
240
+ """Build Docker image for the environment with smart context detection."""
241
+
242
+ # Detect build context (standalone vs in-repo)
243
+ build_mode, detected_context, repo_root = _detect_build_context(env_path)
244
+
245
+ console.print(f"[bold cyan]Build mode detected:[/bold cyan] {build_mode}")
246
+
247
+ # Use detected context unless explicitly overridden
248
+ if context_path is None:
249
+ context_path = detected_context
250
+
251
+ # Create temporary build directory
252
+ with tempfile.TemporaryDirectory() as temp_dir_str:
253
+ temp_dir = Path(temp_dir_str)
254
+
255
+ # Prepare build directory based on mode
256
+ if build_mode == "standalone":
257
+ build_dir = _prepare_standalone_build(env_path, temp_dir)
258
+ else: # in-repo
259
+ build_dir = _prepare_inrepo_build(env_path, repo_root, temp_dir)
260
+
261
+ # Determine Dockerfile path
262
+ if dockerfile is None:
263
+ # Look for Dockerfile in server/ subdirectory
264
+ dockerfile = build_dir / "server" / "Dockerfile"
265
+ if not dockerfile.exists():
266
+ # Fallback to root of build directory
267
+ dockerfile = build_dir / "Dockerfile"
268
+
269
+ if not dockerfile.exists():
270
+ console.print(
271
+ f"[bold red]Error:[/bold red] Dockerfile not found at {dockerfile}",
272
+ )
273
+ return False
274
+
275
+ # Generate tag if not provided
276
+ if tag is None:
277
+ env_name = env_path.name
278
+ if env_name.endswith("_env"):
279
+ env_name = env_name[:-4]
280
+ tag = f"openenv-{env_name}"
281
+
282
+ console.print(f"[bold cyan]Building Docker image:[/bold cyan] {tag}")
283
+ console.print(f"[bold cyan]Build context:[/bold cyan] {build_dir}")
284
+ console.print(f"[bold cyan]Dockerfile:[/bold cyan] {dockerfile}")
285
+
286
+ # Prepare build args
287
+ if build_args is None:
288
+ build_args = {}
289
+
290
+ # Add build mode and env name to build args
291
+ build_args["BUILD_MODE"] = build_mode
292
+ build_args["ENV_NAME"] = env_path.name.replace("_env", "")
293
+
294
+ # Build Docker command
295
+ cmd = ["docker", "build", "-t", tag, "-f", str(dockerfile)]
296
+
297
+ if no_cache:
298
+ cmd.append("--no-cache")
299
+
300
+ for key, value in build_args.items():
301
+ cmd.extend(["--build-arg", f"{key}={value}"])
302
+
303
+ cmd.append(str(build_dir))
304
+
305
+ result = _run_command(cmd, check=False)
306
+ return result.returncode == 0
307
+
308
+
309
+ def _push_docker_image(tag: str, registry: str | None = None) -> bool:
310
+ """Push Docker image to registry."""
311
+ if registry:
312
+ full_tag = f"{registry}/{tag}"
313
+ console.print(f"[bold cyan]Tagging image as {full_tag}[/bold cyan]")
314
+ _run_command(["docker", "tag", tag, full_tag])
315
+ tag = full_tag
316
+
317
+ console.print(f"[bold cyan]Pushing image:[/bold cyan] {tag}")
318
+ result = _run_command(["docker", "push", tag], check=False)
319
+ return result.returncode == 0
320
+
321
+
322
+ @app.command()
323
+ def build(
324
+ env_path: Annotated[
325
+ str | None,
326
+ typer.Argument(
327
+ help="Path to the environment directory (default: current directory)"
328
+ ),
329
+ ] = None,
330
+ tag: Annotated[
331
+ str | None,
332
+ typer.Option(
333
+ "--tag",
334
+ "-t",
335
+ help="Docker image tag (default: openenv-<env_name>)",
336
+ ),
337
+ ] = None,
338
+ context: Annotated[
339
+ str | None,
340
+ typer.Option(
341
+ "--context",
342
+ "-c",
343
+ help="Build context path (default: <env_path>/server)",
344
+ ),
345
+ ] = None,
346
+ dockerfile: Annotated[
347
+ str | None,
348
+ typer.Option(
349
+ "--dockerfile",
350
+ "-f",
351
+ help="Path to Dockerfile (default: <context>/Dockerfile)",
352
+ ),
353
+ ] = None,
354
+ no_cache: Annotated[
355
+ bool,
356
+ typer.Option(
357
+ "--no-cache",
358
+ help="Build without using cache",
359
+ ),
360
+ ] = False,
361
+ build_arg: Annotated[
362
+ list[str] | None,
363
+ typer.Option(
364
+ "--build-arg",
365
+ help="Build arguments (can be used multiple times, format: KEY=VALUE)",
366
+ ),
367
+ ] = None,
368
+ ) -> None:
369
+ """
370
+ Build Docker images for OpenEnv environments.
371
+
372
+ This command builds Docker images using the environment's pyproject.toml
373
+ and uv for dependency management. Run from the environment root directory.
374
+
375
+ Examples:
376
+ # Build from environment root (recommended)
377
+ $ cd my_env
378
+ $ openenv build
379
+
380
+ # Build with custom tag
381
+ $ openenv build -t my-custom-tag
382
+
383
+ # Build without cache
384
+ $ openenv build --no-cache
385
+
386
+ # Build with custom build arguments
387
+ $ openenv build --build-arg VERSION=1.0 --build-arg ENV=prod
388
+
389
+ # Build from different directory
390
+ $ openenv build envs/echo_env
391
+ """
392
+ # Determine environment path (default to current directory)
393
+ if env_path is None:
394
+ env_path_obj = Path.cwd()
395
+ else:
396
+ env_path_obj = Path(env_path)
397
+
398
+ # Validate environment path
399
+ if not env_path_obj.exists():
400
+ print(
401
+ f"Error: Environment path does not exist: {env_path_obj}",
402
+ file=sys.stderr,
403
+ )
404
+ raise typer.Exit(1)
405
+
406
+ if not env_path_obj.is_dir():
407
+ print(
408
+ f"Error: Environment path is not a directory: {env_path_obj}",
409
+ file=sys.stderr,
410
+ )
411
+ raise typer.Exit(1)
412
+
413
+ # Check for openenv.yaml to confirm this is an environment directory
414
+ openenv_yaml = env_path_obj / "openenv.yaml"
415
+ if not openenv_yaml.exists():
416
+ print(
417
+ f"Error: Not an OpenEnv environment directory (missing openenv.yaml): {env_path_obj}",
418
+ file=sys.stderr,
419
+ )
420
+ print(
421
+ "Hint: Run this command from the environment root directory or specify the path",
422
+ file=sys.stderr,
423
+ )
424
+ raise typer.Exit(1)
425
+
426
+ console.print(f"[bold]Building Docker image for:[/bold] {env_path_obj.name}")
427
+ console.print("=" * 60)
428
+
429
+ # Parse build args
430
+ build_args = {}
431
+ if build_arg:
432
+ for arg in build_arg:
433
+ if "=" in arg:
434
+ key, value = arg.split("=", 1)
435
+ build_args[key] = value
436
+ else:
437
+ print(
438
+ f"Warning: Invalid build arg format: {arg}",
439
+ file=sys.stderr,
440
+ )
441
+
442
+ # Convert string paths to Path objects
443
+ context_path_obj = Path(context) if context else None
444
+ dockerfile_path_obj = Path(dockerfile) if dockerfile else None
445
+
446
+ # Build Docker image
447
+ success = _build_docker_image(
448
+ env_path=env_path_obj,
449
+ tag=tag,
450
+ context_path=context_path_obj,
451
+ dockerfile=dockerfile_path_obj,
452
+ build_args=build_args if build_args else None,
453
+ no_cache=no_cache,
454
+ )
455
+
456
+ if not success:
457
+ print("✗ Docker build failed", file=sys.stderr)
458
+ raise typer.Exit(1)
459
+
460
+ console.print("[bold green]✓ Docker build successful[/bold green]")
461
+ console.print("\n[bold green]Done![/bold green]")
src/core/openenv/cli/commands/fork.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Fork (duplicate) a Hugging Face Space using the Hub API."""
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Annotated
12
+
13
+ import typer
14
+ from huggingface_hub import HfApi, login, whoami
15
+
16
+ from .._cli_utils import console
17
+
18
+ app = typer.Typer(
19
+ help="Fork (duplicate) an OpenEnv environment on Hugging Face to your account"
20
+ )
21
+
22
+
23
+ def _parse_key_value(s: str) -> tuple[str, str]:
24
+ """Parse KEY=VALUE string. Raises BadParameter if no '='."""
25
+ if "=" not in s:
26
+ raise typer.BadParameter(
27
+ f"Expected KEY=VALUE format, got: {s!r}. "
28
+ "Use --set-env KEY=VALUE or --set-secret KEY=VALUE"
29
+ )
30
+ key, _, value = s.partition("=")
31
+ key = key.strip()
32
+ if not key:
33
+ raise typer.BadParameter(f"Empty key in: {s!r}")
34
+ return key, value.strip()
35
+
36
+
37
+ def _ensure_hf_authenticated() -> str:
38
+ """Ensure user is authenticated with Hugging Face. Returns username."""
39
+ try:
40
+ user_info = whoami()
41
+ if isinstance(user_info, dict):
42
+ username = (
43
+ user_info.get("name")
44
+ or user_info.get("fullname")
45
+ or user_info.get("username")
46
+ )
47
+ else:
48
+ username = (
49
+ getattr(user_info, "name", None)
50
+ or getattr(user_info, "fullname", None)
51
+ or getattr(user_info, "username", None)
52
+ )
53
+ if not username:
54
+ raise ValueError("Could not extract username from whoami response")
55
+ console.print(f"[bold green]✓[/bold green] Authenticated as: {username}")
56
+ return username
57
+ except Exception:
58
+ console.print(
59
+ "[bold yellow]Not authenticated with Hugging Face. Please login...[/bold yellow]"
60
+ )
61
+ try:
62
+ login()
63
+ user_info = whoami()
64
+ if isinstance(user_info, dict):
65
+ username = (
66
+ user_info.get("name")
67
+ or user_info.get("fullname")
68
+ or user_info.get("username")
69
+ )
70
+ else:
71
+ username = (
72
+ getattr(user_info, "name", None)
73
+ or getattr(user_info, "fullname", None)
74
+ or getattr(user_info, "username", None)
75
+ )
76
+ if not username:
77
+ raise ValueError("Could not extract username from whoami response")
78
+ console.print(f"[bold green]✓[/bold green] Authenticated as: {username}")
79
+ return username
80
+ except Exception as e:
81
+ raise typer.BadParameter(
82
+ f"Hugging Face authentication failed: {e}. Please run login manually."
83
+ ) from e
84
+
85
+
86
+ @app.command()
87
+ def fork(
88
+ source_space: Annotated[
89
+ str,
90
+ typer.Argument(
91
+ help="Source Space ID in format 'owner/space-name' (e.g. org/my-openenv-space)"
92
+ ),
93
+ ],
94
+ repo_id: Annotated[
95
+ str | None,
96
+ typer.Option(
97
+ "--repo-id",
98
+ "-r",
99
+ help="Target repo ID for the fork (default: created under your account with same name)",
100
+ ),
101
+ ] = None,
102
+ private: Annotated[
103
+ bool,
104
+ typer.Option("--private", help="Create the forked Space as private"),
105
+ ] = False,
106
+ set_env: Annotated[
107
+ list[str],
108
+ typer.Option(
109
+ "--set-env",
110
+ "-e",
111
+ help="Set Space variable (public). Can be repeated. Format: KEY=VALUE",
112
+ ),
113
+ ] = [],
114
+ set_secret: Annotated[
115
+ list[str],
116
+ typer.Option(
117
+ "--set-secret",
118
+ "--secret",
119
+ "-s",
120
+ help="Set Space secret. Can be repeated. Format: KEY=VALUE",
121
+ ),
122
+ ] = [],
123
+ hardware: Annotated[
124
+ str | None,
125
+ typer.Option(
126
+ "--hardware",
127
+ "-H",
128
+ help="Request hardware (e.g. t4-medium, cpu-basic). See Hub docs for options.",
129
+ ),
130
+ ] = None,
131
+ ) -> None:
132
+ """
133
+ Fork (duplicate) a Hugging Face Space to your account using the Hub API.
134
+
135
+ Uses the Hugging Face duplicate_space API. You can set environment variables
136
+ and secrets, and request hardware/storage/sleep time at creation time.
137
+
138
+ Examples:
139
+ $ openenv fork owner/source-space
140
+ $ openenv fork owner/source-space --private
141
+ $ openenv fork owner/source-space --repo-id myuser/my-fork
142
+ $ openenv fork owner/source-space --set-env MODEL_ID=user/model --set-secret HF_TOKEN=hf_xxx
143
+ $ openenv fork owner/source-space --hardware t4-medium
144
+ """
145
+ if "/" not in source_space or source_space.count("/") != 1:
146
+ raise typer.BadParameter(
147
+ f"Invalid source Space ID: {source_space!r}. Expected format: 'owner/space-name'"
148
+ )
149
+
150
+ _ensure_hf_authenticated()
151
+ api = HfApi()
152
+
153
+ # Build kwargs for duplicate_space (only pass what we have)
154
+ dup_kwargs: dict = {
155
+ "from_id": source_space,
156
+ "private": private,
157
+ }
158
+ if set_env:
159
+ dup_kwargs["variables"] = [
160
+ {"key": k, "value": v} for k, v in (_parse_key_value(x) for x in set_env)
161
+ ]
162
+ if set_secret:
163
+ dup_kwargs["secrets"] = [
164
+ {"key": k, "value": v} for k, v in (_parse_key_value(x) for x in set_secret)
165
+ ]
166
+ # HF API requires hardware when duplicating; default to free cpu-basic
167
+ dup_kwargs["hardware"] = hardware if hardware is not None else "cpu-basic"
168
+ if repo_id is not None:
169
+ if "/" not in repo_id or repo_id.count("/") != 1:
170
+ raise typer.BadParameter(
171
+ f"Invalid --repo-id: {repo_id!r}. Expected format: 'username/repo-name'"
172
+ )
173
+ dup_kwargs["to_id"] = repo_id
174
+
175
+ console.print(f"[bold cyan]Forking Space {source_space}...[/bold cyan]")
176
+ try:
177
+ result = api.duplicate_space(**dup_kwargs)
178
+ except Exception as e:
179
+ console.print(f"[bold red]✗[/bold red] Fork failed: {e}")
180
+ raise typer.Exit(1) from e
181
+
182
+ # result is RepoUrl (str-like) or similar; get repo_id for display
183
+ if hasattr(result, "repo_id"):
184
+ new_repo_id = result.repo_id
185
+ elif isinstance(result, str):
186
+ # URL like https://huggingface.co/spaces/owner/name -> owner/name
187
+ if "/spaces/" in result:
188
+ new_repo_id = result.split("/spaces/")[-1].rstrip("/")
189
+ else:
190
+ new_repo_id = result
191
+ else:
192
+ new_repo_id = getattr(result, "repo_id", str(result))
193
+
194
+ console.print("[bold green]✓[/bold green] Space forked successfully")
195
+ console.print(
196
+ f"[bold]Space URL:[/bold] https://huggingface.co/spaces/{new_repo_id}"
197
+ )
src/core/openenv/cli/commands/init.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Initialize a new OpenEnv environment."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import random
6
+ import shutil
7
+ import subprocess
8
+ from importlib import resources
9
+ from pathlib import Path
10
+ from typing import Annotated, Dict, List, Tuple
11
+
12
+ import typer
13
+
14
+ from .._cli_utils import console
15
+
16
+ app = typer.Typer(help="Initialize a new OpenEnv environment")
17
+
18
+
19
+ def _snake_to_pascal(snake_str: str) -> str:
20
+ """Convert snake_case to PascalCase (e.g., 'my_env' -> 'MyEnv')."""
21
+ return "".join(word.capitalize() for word in snake_str.split("_"))
22
+
23
+
24
+ def _get_env_prefix(env_name: str) -> str:
25
+ """Extract the prefix for class names (e.g., 'my_env' -> 'My', 'test_env' -> 'Test')."""
26
+ # Remove trailing '_env' if present
27
+ if env_name.endswith("_env"):
28
+ base = env_name[:-4] # Remove '_env'
29
+ else:
30
+ base = env_name
31
+
32
+ # If empty or just one part, use the whole thing
33
+ if not base or "_" not in base:
34
+ return base.capitalize() if base else env_name.capitalize()
35
+
36
+ # PascalCase all parts except the last
37
+ parts = base.split("_")
38
+ return "".join(word.capitalize() for word in parts)
39
+
40
+
41
+ def _snake_to_camel(snake_str: str) -> str:
42
+ """Convert snake_case to camelCase (e.g., 'my_env' -> 'myEnv')."""
43
+ parts = snake_str.split("_")
44
+ return parts[0] + "".join(word.capitalize() for word in parts[1:])
45
+
46
+
47
+ def _snake_to_title(snake_str: str) -> str:
48
+ """Convert snake_case to Title Case (e.g., 'my_env' -> 'My Env')."""
49
+ return " ".join(word.capitalize() for word in snake_str.split("_"))
50
+
51
+
52
+ def _validate_env_name(name: str) -> str:
53
+ """Validate environment name (must be valid Python identifier in snake_case)."""
54
+ if not name:
55
+ raise typer.BadParameter("Environment name cannot be empty")
56
+
57
+ # Check if it's a valid Python identifier
58
+ if not name.isidentifier():
59
+ raise typer.BadParameter(
60
+ f"Environment name '{name}' is not a valid Python identifier. Use snake_case (e.g., 'my_env', 'game_env')."
61
+ )
62
+
63
+ # Check if it starts with a number
64
+ if name[0].isdigit():
65
+ raise typer.BadParameter(
66
+ f"Environment name '{name}' cannot start with a number."
67
+ )
68
+
69
+ return name
70
+
71
+
72
+ def _get_random_hf_space_config() -> Dict[str, str]:
73
+ """
74
+ Get random Hugging Face Space configuration values.
75
+
76
+ Returns:
77
+ Dictionary with 'emoji', 'colorFrom', and 'colorTo' keys
78
+ """
79
+ # Valid emojis (emoji-only characters)
80
+ emojis = [
81
+ "🎮",
82
+ "🎯",
83
+ "🚀",
84
+ "🌟",
85
+ "🎨",
86
+ "🎪",
87
+ "🎭",
88
+ "🎬",
89
+ "🎤",
90
+ "🎧",
91
+ "🎵",
92
+ "🎶",
93
+ "🎸",
94
+ "🎹",
95
+ "🥁",
96
+ "🎺",
97
+ "🎻",
98
+ "🎼",
99
+ "🎯",
100
+ "🎲",
101
+ "🎳",
102
+ "🎰",
103
+ "🎴",
104
+ "🃏",
105
+ "🀄",
106
+ "🎴",
107
+ "🎨",
108
+ "🖼️",
109
+ "🎬",
110
+ "🎭",
111
+ "🎪",
112
+ "🎤",
113
+ "🎧",
114
+ "🎵",
115
+ "🎶",
116
+ "🎸",
117
+ "🎹",
118
+ "🎺",
119
+ "🎻",
120
+ "🥁",
121
+ "🎯",
122
+ "🎲",
123
+ "🎳",
124
+ "🎰",
125
+ "🏀",
126
+ "⚽",
127
+ "🏈",
128
+ "⚾",
129
+ "🎾",
130
+ "🏐",
131
+ "🏉",
132
+ "🎱",
133
+ "🏓",
134
+ "🏸",
135
+ "🥅",
136
+ "🏒",
137
+ "🏑",
138
+ "🏏",
139
+ "⛳",
140
+ "🏹",
141
+ "🎣",
142
+ "🥊",
143
+ "🥋",
144
+ "🎽",
145
+ "🏅",
146
+ "🎖️",
147
+ "🏆",
148
+ "🥇",
149
+ "🥈",
150
+ "🥉",
151
+ "🔊",
152
+ "🔉",
153
+ "🔈",
154
+ "🔇",
155
+ "📢",
156
+ "📣",
157
+ "📯",
158
+ "🔔",
159
+ "🔕",
160
+ "📻",
161
+ "📡",
162
+ "💻",
163
+ "🖥️",
164
+ "🖨️",
165
+ "⌨️",
166
+ "🖱️",
167
+ "🖲️",
168
+ "🕹️",
169
+ "🗜️",
170
+ "💾",
171
+ "💿",
172
+ "📀",
173
+ "📼",
174
+ "📷",
175
+ "📸",
176
+ "📹",
177
+ "🎥",
178
+ "📽️",
179
+ "🎞️",
180
+ "📞",
181
+ "☎️",
182
+ "📟",
183
+ "📠",
184
+ "📺",
185
+ "📻",
186
+ "🎙️",
187
+ "🎚️",
188
+ "🎛️",
189
+ "⏱️",
190
+ "⏲️",
191
+ "⏰",
192
+ "🕰️",
193
+ "⌚",
194
+ "📱",
195
+ "📲",
196
+ "💻",
197
+ "⌨️",
198
+ "🖥️",
199
+ "🖨️",
200
+ "🖱️",
201
+ ]
202
+
203
+ # Valid colors from HF Spaces config reference
204
+ colors = ["red", "yellow", "green", "blue", "indigo", "purple", "pink", "gray"]
205
+
206
+ return {
207
+ "emoji": random.choice(emojis),
208
+ "colorFrom": random.choice(colors),
209
+ "colorTo": random.choice(colors),
210
+ }
211
+
212
+
213
+ def _create_template_replacements(env_name: str) -> Dict[str, str]:
214
+ """
215
+ Create comprehensive template replacement dictionary.
216
+
217
+ Supports all naming conventions:
218
+ - PascalCase for class names
219
+ - camelCase for variable names
220
+ - snake_case for module names, file paths
221
+ """
222
+ env_prefix = _get_env_prefix(env_name)
223
+ env_camel = _snake_to_camel(env_name)
224
+ env_title = _snake_to_title(env_name)
225
+
226
+ # Get random HF Space config values
227
+ hf_config = _get_random_hf_space_config()
228
+
229
+ replacements = {
230
+ # Template placeholders (MUST come first - full class names before partial)
231
+ "__ENV_CLASS_NAME__Environment": f"{env_prefix}Environment",
232
+ "__ENV_CLASS_NAME__Action": f"{env_prefix}Action",
233
+ "__ENV_CLASS_NAME__Observation": f"{env_prefix}Observation",
234
+ "__ENV_CLASS_NAME__Env": f"{env_prefix}Env",
235
+ # Template placeholders (partial - must come after full replacements)
236
+ "__ENV_NAME__": env_name,
237
+ "__ENV_CLASS_NAME__": env_prefix, # Use prefix, not full PascalCase
238
+ "__ENV_TITLE_NAME__": env_title,
239
+ "__ENV_CAMEL_NAME__": env_camel,
240
+ # Hugging Face Space config placeholders
241
+ "__HF_EMOJI__": hf_config["emoji"],
242
+ "__HF_COLOR_FROM__": hf_config["colorFrom"],
243
+ "__HF_COLOR_TO__": hf_config["colorTo"],
244
+ }
245
+
246
+ return replacements
247
+
248
+
249
+ def _replace_in_content(content: str, replacements: Dict[str, str]) -> str:
250
+ """Replace all occurrences in content using case-sensitive replacements."""
251
+ result = content
252
+ # Sort by length (longest first) to avoid partial replacements
253
+ for old, new in sorted(replacements.items(), key=lambda x: len(x[0]), reverse=True):
254
+ result = result.replace(old, new)
255
+ return result
256
+
257
+
258
+ def _should_rename_file(filename: str, env_name: str) -> Tuple[bool, str]:
259
+ """
260
+ Check if a file should be renamed and return the new name.
261
+
262
+ Handles template placeholders in filenames like:
263
+ - `__ENV_NAME___environment.py` → `<env_name>_environment.py`
264
+ """
265
+ # Check for template placeholder
266
+ if "__ENV_NAME__" in filename:
267
+ new_name = filename.replace("__ENV_NAME__", env_name)
268
+ return True, new_name
269
+
270
+ return False, filename
271
+
272
+
273
+ def _copy_and_template_file(
274
+ src_path: Path,
275
+ dest_path: Path,
276
+ replacements: Dict[str, str],
277
+ ) -> None:
278
+ """Copy a file and apply template replacements."""
279
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
280
+
281
+ try:
282
+ # Read source file
283
+ content = src_path.read_bytes()
284
+
285
+ # Try to decode as text and apply replacements
286
+ try:
287
+ text = content.decode("utf-8")
288
+ # Normalize line endings to LF before applying replacements
289
+ text = text.replace("\r\n", "\n").replace("\r", "\n")
290
+ text = _replace_in_content(text, replacements)
291
+ dest_path.write_text(text, encoding="utf-8", newline="\n")
292
+ except UnicodeDecodeError:
293
+ # Binary file, just copy
294
+ dest_path.write_bytes(content)
295
+ except Exception as e:
296
+ raise RuntimeError(
297
+ f"Failed to copy template file {src_path} to {dest_path}: {e}"
298
+ ) from e
299
+
300
+
301
+ def _copy_template_directory(
302
+ template_pkg: str,
303
+ template_dir: str,
304
+ dest_dir: Path,
305
+ replacements: Dict[str, str],
306
+ env_name: str,
307
+ ) -> List[Path]:
308
+ """Recursively copy template directory and apply replacements."""
309
+ created_files: List[Path] = []
310
+
311
+ # Get the package path using importlib.resources but avoid importing the template package
312
+ # We'll use the package's __file__ to get the directory path
313
+ import importlib
314
+
315
+ try:
316
+ # Import the parent package (not the template package itself)
317
+ if "." in template_pkg:
318
+ parent_pkg = ".".join(template_pkg.split(".")[:-1])
319
+ pkg = importlib.import_module(parent_pkg)
320
+ template_path = Path(pkg.__file__).parent / template_pkg.split(".")[-1]
321
+ else:
322
+ pkg = importlib.import_module(template_pkg.split(".")[0])
323
+ template_path = Path(pkg.__file__).parent / template_pkg.split(".")[-1]
324
+ except Exception:
325
+ # Fallback: try to use resources.files but handle import errors
326
+ try:
327
+ base = resources.files(template_pkg.split(".")[0])
328
+ template_path = base.joinpath(*template_pkg.split(".")[1:])
329
+ if not template_path.exists():
330
+ raise FileNotFoundError(f"Template directory not found: {template_pkg}")
331
+ except Exception as e:
332
+ raise FileNotFoundError(
333
+ f"Template directory not found: {template_pkg}"
334
+ ) from e
335
+
336
+ if template_dir:
337
+ template_path = template_path / template_dir
338
+
339
+ if not template_path.exists() or not template_path.is_dir():
340
+ raise FileNotFoundError(
341
+ f"Template directory not found: {template_pkg}.{template_dir}"
342
+ )
343
+
344
+ # Walk through all files in template directory using Path
345
+ for item in template_path.rglob("*"):
346
+ if item.is_file():
347
+ rel_path = item.relative_to(template_path)
348
+ dest_path = dest_dir / rel_path
349
+
350
+ # Apply filename templating
351
+ should_rename, new_name = _should_rename_file(dest_path.name, env_name)
352
+ if should_rename:
353
+ dest_path = dest_path.parent / new_name
354
+
355
+ # Copy and apply replacements
356
+ _copy_and_template_file(item, dest_path, replacements)
357
+ created_files.append(dest_path)
358
+
359
+ return created_files
360
+
361
+
362
+ def _generate_uv_lock(env_dir: Path) -> bool:
363
+ """Generate uv.lock from pyproject.toml using uv."""
364
+ pyproject_path = env_dir / "pyproject.toml"
365
+
366
+ if not pyproject_path.exists():
367
+ return False
368
+
369
+ try:
370
+ cmd = [
371
+ "uv",
372
+ "lock",
373
+ "--directory",
374
+ str(env_dir),
375
+ ]
376
+
377
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
378
+
379
+ if result.stdout:
380
+ console.print(result.stdout)
381
+
382
+ return True
383
+
384
+ except subprocess.CalledProcessError as e:
385
+ console.print(
386
+ f"[yellow]Warning: Could not generate uv.lock: {e.stderr}[/yellow]"
387
+ )
388
+ return False
389
+ except FileNotFoundError:
390
+ console.print(
391
+ "[yellow]Warning: 'uv' not found. Install it to generate uv.lock[/yellow]"
392
+ )
393
+ return False
394
+
395
+
396
+ @app.command()
397
+ def init(
398
+ env_name: Annotated[
399
+ str,
400
+ typer.Argument(
401
+ help="Name of the environment to create (snake_case, e.g., 'my_env')"
402
+ ),
403
+ ],
404
+ output_dir: Annotated[
405
+ str | None,
406
+ typer.Option(
407
+ "--output-dir",
408
+ "-o",
409
+ help="Output directory (defaults to current working directory)",
410
+ ),
411
+ ] = None,
412
+ ) -> None:
413
+ """
414
+ Initialize a new OpenEnv environment.
415
+
416
+ Creates a new directory with the environment name and generates all necessary
417
+ files based on the OpenEnv template structure.
418
+
419
+ Example:
420
+ $ openenv init my_game_env
421
+ $ openenv init my_env --output-dir /path/to/projects
422
+ """
423
+ # Validate environment name
424
+ env_name = _validate_env_name(env_name)
425
+
426
+ # Determine output directory
427
+ base_dir = Path(output_dir).resolve() if output_dir else Path.cwd().resolve()
428
+ env_dir = base_dir / env_name
429
+
430
+ # Check if directory already exists
431
+ if env_dir.exists():
432
+ if env_dir.is_file():
433
+ raise typer.BadParameter(f"Path '{env_dir}' exists and is a file")
434
+ if any(env_dir.iterdir()):
435
+ raise typer.BadParameter(
436
+ f"Directory '{env_dir}' already exists and is not empty. "
437
+ "Please choose a different name or remove the existing directory."
438
+ )
439
+
440
+ try:
441
+ # Create template replacements
442
+ replacements = _create_template_replacements(env_name)
443
+
444
+ # Create environment directory
445
+ env_dir.mkdir(parents=True, exist_ok=True)
446
+
447
+ console.print(
448
+ f"[bold cyan]Creating OpenEnv environment '{env_name}'...[/bold cyan]"
449
+ )
450
+
451
+ # Copy template files from template structure
452
+ template_pkg = "openenv.cli.templates.openenv_env"
453
+ created_files = _copy_template_directory(
454
+ template_pkg,
455
+ "",
456
+ env_dir,
457
+ replacements,
458
+ env_name,
459
+ )
460
+
461
+ console.print(f"[bold green]✓[/bold green] Created {len(created_files)} files")
462
+
463
+ # Generate uv.lock
464
+ console.print("\n[bold]Generating uv.lock...[/bold]")
465
+ if _generate_uv_lock(env_dir):
466
+ console.print("[green]✓[/green] Generated uv.lock")
467
+ else:
468
+ console.print("[yellow]⚠[/yellow] Could not generate uv.lock automatically")
469
+ console.print(" You can generate it manually with:")
470
+ console.print(f" cd {env_dir} && uv lock")
471
+
472
+ console.print(
473
+ f"\n[bold green]Environment created successfully at: {env_dir}[/bold green]"
474
+ )
475
+ console.print("\n[bold]Next steps:[/bold]")
476
+ console.print(f" cd {env_dir}")
477
+ console.print(
478
+ f" # Edit your environment implementation in server/{env_name}_environment.py"
479
+ )
480
+ console.print(" # Edit your models in models.py")
481
+ console.print(" # Install dependencies: uv sync")
482
+ console.print("\n # To integrate into OpenEnv repo:")
483
+ console.print(f" # 1. Copy this directory to <repo_root>/envs/{env_name}_env")
484
+ console.print(
485
+ f" # 2. Build from repo root: docker build -t {env_name}_env:latest -f envs/{env_name}_env/server/Dockerfile ."
486
+ )
487
+ console.print(
488
+ f" # 3. Run your image: docker run -p 8000:8000 {env_name}_env:latest"
489
+ )
490
+
491
+ except Exception as e:
492
+ # Cleanup on error
493
+ if env_dir.exists() and env_dir.is_dir():
494
+ try:
495
+ shutil.rmtree(env_dir)
496
+ except Exception:
497
+ pass
498
+
499
+ console.print(f"[bold red]Error:[/bold red] {e}")
500
+ raise typer.Exit(1) from e
src/core/openenv/cli/commands/push.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Push an OpenEnv environment to Hugging Face Spaces."""
8
+
9
+ from __future__ import annotations
10
+
11
+ import shutil
12
+ import sys
13
+ import tempfile
14
+ from fnmatch import fnmatch
15
+ from pathlib import Path
16
+ from typing import Annotated
17
+
18
+ import typer
19
+ import yaml
20
+ from huggingface_hub import HfApi, login, whoami
21
+
22
+ from .._cli_utils import console, validate_env_structure
23
+
24
+ app = typer.Typer(help="Push an OpenEnv environment to Hugging Face Spaces")
25
+
26
+
27
+ DEFAULT_PUSH_IGNORE_PATTERNS = [".*", "__pycache__", "*.pyc"]
28
+
29
+
30
+ def _path_matches_pattern(relative_path: Path, pattern: str) -> bool:
31
+ """Return True if a relative path matches an exclude pattern."""
32
+ normalized_pattern = pattern.strip()
33
+ if normalized_pattern.startswith("!"):
34
+ return False
35
+
36
+ while normalized_pattern.startswith("./"):
37
+ normalized_pattern = normalized_pattern[2:]
38
+
39
+ if normalized_pattern.startswith("/"):
40
+ normalized_pattern = normalized_pattern[1:]
41
+
42
+ if not normalized_pattern:
43
+ return False
44
+
45
+ posix_path = relative_path.as_posix()
46
+ pattern_candidates = [normalized_pattern]
47
+ if normalized_pattern.startswith("**/"):
48
+ # Gitignore-style "**/" can also match directly at the root.
49
+ pattern_candidates.append(normalized_pattern[3:])
50
+
51
+ # Support directory patterns such as "artifacts/" and "**/outputs/".
52
+ if normalized_pattern.endswith("/"):
53
+ dir_pattern_candidates: list[str] = []
54
+ for candidate in pattern_candidates:
55
+ base = candidate.rstrip("/")
56
+ if not base:
57
+ continue
58
+ dir_pattern_candidates.extend([base, f"{base}/*"])
59
+
60
+ return any(
61
+ fnmatch(posix_path, candidate) for candidate in dir_pattern_candidates
62
+ )
63
+
64
+ # Match both full relative path and basename for convenience.
65
+ return any(
66
+ fnmatch(posix_path, candidate) for candidate in pattern_candidates
67
+ ) or any(fnmatch(relative_path.name, candidate) for candidate in pattern_candidates)
68
+
69
+
70
+ def _should_exclude_path(relative_path: Path, ignore_patterns: list[str]) -> bool:
71
+ """Return True when the path should be excluded from staging/upload."""
72
+ return any(
73
+ _path_matches_pattern(relative_path, pattern) for pattern in ignore_patterns
74
+ )
75
+
76
+
77
+ def _read_ignore_file(ignore_path: Path) -> tuple[list[str], int]:
78
+ """Read ignore patterns from a file and return (patterns, ignored_negations)."""
79
+ patterns: list[str] = []
80
+ ignored_negations = 0
81
+
82
+ for line in ignore_path.read_text().splitlines():
83
+ stripped = line.strip()
84
+ if not stripped or stripped.startswith("#"):
85
+ continue
86
+ if stripped.startswith("!"):
87
+ ignored_negations += 1
88
+ continue
89
+ patterns.append(stripped)
90
+
91
+ return patterns, ignored_negations
92
+
93
+
94
+ def _load_ignore_patterns(env_dir: Path, exclude_file: str | None) -> list[str]:
95
+ """Load ignore patterns from defaults and an optional ignore file."""
96
+ patterns = list(DEFAULT_PUSH_IGNORE_PATTERNS)
97
+ ignored_negations = 0
98
+
99
+ def _merge_ignore_file(ignore_path: Path, *, source_label: str) -> None:
100
+ nonlocal ignored_negations
101
+ file_patterns, skipped_negations = _read_ignore_file(ignore_path)
102
+ patterns.extend(file_patterns)
103
+ ignored_negations += skipped_negations
104
+ console.print(
105
+ f"[bold green]✓[/bold green] Loaded {len(file_patterns)} ignore patterns from {source_label}: {ignore_path}"
106
+ )
107
+
108
+ # Optional source: explicit exclude file from CLI.
109
+ if exclude_file:
110
+ ignore_path = Path(exclude_file)
111
+ if not ignore_path.is_absolute():
112
+ ignore_path = env_dir / ignore_path
113
+ ignore_path = ignore_path.resolve()
114
+
115
+ if not ignore_path.exists() or not ignore_path.is_file():
116
+ raise typer.BadParameter(
117
+ f"Exclude file not found or not a file: {ignore_path}"
118
+ )
119
+
120
+ _merge_ignore_file(ignore_path, source_label="--exclude")
121
+
122
+ # Keep stable order while removing duplicates.
123
+ patterns = list(dict.fromkeys(patterns))
124
+
125
+ if ignored_negations > 0:
126
+ console.print(
127
+ f"[bold yellow]⚠[/bold yellow] Skipped {ignored_negations} negated ignore patterns ('!') because negation is not supported for push excludes"
128
+ )
129
+
130
+ return patterns
131
+
132
+
133
+ def _copytree_ignore_factory(env_dir: Path, ignore_patterns: list[str]):
134
+ """Build a shutil.copytree ignore callback from path-based patterns."""
135
+
136
+ def _ignore(path: str, names: list[str]) -> set[str]:
137
+ current_dir = Path(path)
138
+ ignored: set[str] = set()
139
+
140
+ for name in names:
141
+ candidate = current_dir / name
142
+ try:
143
+ relative_path = candidate.relative_to(env_dir)
144
+ except ValueError:
145
+ # candidate is not under env_dir (e.g. symlink or
146
+ # copytree root differs from env_dir); skip filtering.
147
+ continue
148
+ if _should_exclude_path(relative_path, ignore_patterns):
149
+ ignored.add(name)
150
+
151
+ return ignored
152
+
153
+ return _ignore
154
+
155
+
156
+ def _validate_openenv_directory(directory: Path) -> tuple[str, dict]:
157
+ """
158
+ Validate that the directory is an OpenEnv environment.
159
+
160
+ Returns:
161
+ Tuple of (env_name, manifest_data)
162
+ """
163
+ # Use the comprehensive validation function
164
+ try:
165
+ warnings = validate_env_structure(directory)
166
+ for warning in warnings:
167
+ console.print(f"[bold yellow]⚠[/bold yellow] {warning}")
168
+ except FileNotFoundError as e:
169
+ raise typer.BadParameter(f"Invalid OpenEnv environment structure: {e}") from e
170
+
171
+ # Load and validate manifest
172
+ manifest_path = directory / "openenv.yaml"
173
+ try:
174
+ with open(manifest_path, "r") as f:
175
+ manifest = yaml.safe_load(f)
176
+ except Exception as e:
177
+ raise typer.BadParameter(f"Failed to parse openenv.yaml: {e}") from e
178
+
179
+ if not isinstance(manifest, dict):
180
+ raise typer.BadParameter("openenv.yaml must be a YAML dictionary")
181
+
182
+ env_name = manifest.get("name")
183
+ if not env_name:
184
+ raise typer.BadParameter("openenv.yaml must contain a 'name' field")
185
+
186
+ return env_name, manifest
187
+
188
+
189
+ def _ensure_hf_authenticated() -> str:
190
+ """
191
+ Ensure user is authenticated with Hugging Face.
192
+
193
+ Returns:
194
+ Username of authenticated user
195
+ """
196
+ try:
197
+ # Try to get current user
198
+ user_info = whoami()
199
+ # Handle both dict and object return types
200
+ if isinstance(user_info, dict):
201
+ username = (
202
+ user_info.get("name")
203
+ or user_info.get("fullname")
204
+ or user_info.get("username")
205
+ )
206
+ else:
207
+ # If it's an object, try to get name attribute
208
+ username = (
209
+ getattr(user_info, "name", None)
210
+ or getattr(user_info, "fullname", None)
211
+ or getattr(user_info, "username", None)
212
+ )
213
+
214
+ if not username:
215
+ raise ValueError("Could not extract username from whoami response")
216
+
217
+ console.print(f"[bold green]✓[/bold green] Authenticated as: {username}")
218
+ return username
219
+ except Exception:
220
+ # Not authenticated, prompt for login
221
+ console.print(
222
+ "[bold yellow]Not authenticated with Hugging Face. Please login...[/bold yellow]"
223
+ )
224
+
225
+ try:
226
+ login()
227
+ # Verify login worked
228
+ user_info = whoami()
229
+ # Handle both dict and object return types
230
+ if isinstance(user_info, dict):
231
+ username = (
232
+ user_info.get("name")
233
+ or user_info.get("fullname")
234
+ or user_info.get("username")
235
+ )
236
+ else:
237
+ username = (
238
+ getattr(user_info, "name", None)
239
+ or getattr(user_info, "fullname", None)
240
+ or getattr(user_info, "username", None)
241
+ )
242
+
243
+ if not username:
244
+ raise ValueError("Could not extract username from whoami response")
245
+
246
+ console.print(f"[bold green]✓[/bold green] Authenticated as: {username}")
247
+ return username
248
+ except Exception as e:
249
+ raise typer.BadParameter(
250
+ f"Hugging Face authentication failed: {e}. Please run login manually."
251
+ ) from e
252
+
253
+
254
+ def _prepare_staging_directory(
255
+ env_dir: Path,
256
+ env_name: str,
257
+ staging_dir: Path,
258
+ ignore_patterns: list[str],
259
+ base_image: str | None = None,
260
+ enable_interface: bool = True,
261
+ ) -> None:
262
+ """
263
+ Prepare files for deployment.
264
+
265
+ This includes:
266
+ - Copying necessary files
267
+ - Modifying Dockerfile to optionally enable web interface and update base image
268
+ - Ensuring README has proper HF frontmatter (if interface enabled)
269
+ """
270
+ # Create staging directory structure
271
+ staging_dir.mkdir(parents=True, exist_ok=True)
272
+
273
+ # Copy all files from env directory
274
+ copy_ignore = _copytree_ignore_factory(env_dir, ignore_patterns)
275
+ for item in env_dir.iterdir():
276
+ relative_path = item.relative_to(env_dir)
277
+ if _should_exclude_path(relative_path, ignore_patterns):
278
+ continue
279
+
280
+ dest = staging_dir / item.name
281
+ if item.is_dir():
282
+ shutil.copytree(item, dest, dirs_exist_ok=True, ignore=copy_ignore)
283
+ else:
284
+ shutil.copy2(item, dest)
285
+
286
+ # Dockerfile must be at repo root for Hugging Face. Prefer root if present
287
+ # (it was copied there); otherwise move server/Dockerfile to root.
288
+ dockerfile_server_path = staging_dir / "server" / "Dockerfile"
289
+ dockerfile_root_path = staging_dir / "Dockerfile"
290
+ dockerfile_path: Path | None = None
291
+
292
+ if dockerfile_root_path.exists():
293
+ dockerfile_path = dockerfile_root_path
294
+ elif dockerfile_server_path.exists():
295
+ dockerfile_server_path.rename(dockerfile_root_path)
296
+ console.print(
297
+ "[bold cyan]Moved Dockerfile to repository root for deployment[/bold cyan]"
298
+ )
299
+ dockerfile_path = dockerfile_root_path
300
+
301
+ # Modify Dockerfile to optionally enable web interface and update base image
302
+ if dockerfile_path and dockerfile_path.exists():
303
+ dockerfile_content = dockerfile_path.read_text()
304
+ lines = dockerfile_content.split("\n")
305
+ new_lines = []
306
+ cmd_found = False
307
+ base_image_updated = False
308
+ web_interface_env_exists = "ENABLE_WEB_INTERFACE" in dockerfile_content
309
+ last_instruction = None
310
+
311
+ for line in lines:
312
+ stripped = line.strip()
313
+ token = stripped.split(maxsplit=1)[0] if stripped else ""
314
+ current_instruction = token.upper()
315
+
316
+ is_healthcheck_continuation = last_instruction == "HEALTHCHECK"
317
+
318
+ # Update base image if specified
319
+ if base_image and stripped.startswith("FROM") and not base_image_updated:
320
+ new_lines.append(f"FROM {base_image}")
321
+ base_image_updated = True
322
+ last_instruction = "FROM"
323
+ continue
324
+
325
+ if (
326
+ stripped.startswith("CMD")
327
+ and not cmd_found
328
+ and not web_interface_env_exists
329
+ and enable_interface
330
+ and not is_healthcheck_continuation
331
+ ):
332
+ new_lines.append("ENV ENABLE_WEB_INTERFACE=true")
333
+ cmd_found = True
334
+
335
+ new_lines.append(line)
336
+
337
+ if current_instruction:
338
+ last_instruction = current_instruction
339
+
340
+ if not cmd_found and not web_interface_env_exists and enable_interface:
341
+ new_lines.append("ENV ENABLE_WEB_INTERFACE=true")
342
+
343
+ if base_image and not base_image_updated:
344
+ new_lines.insert(0, f"FROM {base_image}")
345
+
346
+ dockerfile_path.write_text("\n".join(new_lines))
347
+
348
+ changes = []
349
+ if base_image and base_image_updated:
350
+ changes.append("updated base image")
351
+ if enable_interface and not web_interface_env_exists:
352
+ changes.append("enabled web interface")
353
+ if changes:
354
+ console.print(
355
+ f"[bold green]✓[/bold green] Updated Dockerfile: {', '.join(changes)}"
356
+ )
357
+ else:
358
+ console.print(
359
+ "[bold yellow]⚠[/bold yellow] No Dockerfile at server/ or repo root"
360
+ )
361
+
362
+ # Ensure README has proper HF frontmatter (only if interface enabled)
363
+ if enable_interface:
364
+ readme_path = staging_dir / "README.md"
365
+ if readme_path.exists():
366
+ readme_content = readme_path.read_text()
367
+ if "base_path: /web" not in readme_content:
368
+ # Check if frontmatter exists
369
+ if readme_content.startswith("---"):
370
+ # Add base_path to existing frontmatter
371
+ lines = readme_content.split("\n")
372
+ new_lines = []
373
+ _in_frontmatter = True
374
+ for i, line in enumerate(lines):
375
+ new_lines.append(line)
376
+ if line.strip() == "---" and i > 0:
377
+ # End of frontmatter, add base_path before this line
378
+ if "base_path:" not in "\n".join(new_lines):
379
+ new_lines.insert(-1, "base_path: /web")
380
+ _in_frontmatter = False
381
+ readme_path.write_text("\n".join(new_lines))
382
+ else:
383
+ # No frontmatter, add it
384
+ frontmatter = f"""---
385
+ title: {env_name.replace("_", " ").title()} Environment Server
386
+ emoji: 🔊
387
+ colorFrom: '#00C9FF'
388
+ colorTo: '#1B2845'
389
+ sdk: docker
390
+ pinned: false
391
+ app_port: 8000
392
+ base_path: /web
393
+ tags:
394
+ - openenv
395
+ ---
396
+
397
+ """
398
+ readme_path.write_text(frontmatter + readme_content)
399
+ console.print(
400
+ "[bold green]✓[/bold green] Updated README with HF Space frontmatter"
401
+ )
402
+ else:
403
+ console.print("[bold yellow]⚠[/bold yellow] No README.md found")
404
+
405
+
406
+ def _create_hf_space(
407
+ repo_id: str,
408
+ api: HfApi,
409
+ private: bool = False,
410
+ ) -> None:
411
+ """Create a Hugging Face Space if it doesn't exist."""
412
+ console.print(f"[bold cyan]Creating/verifying space: {repo_id}[/bold cyan]")
413
+
414
+ try:
415
+ api.create_repo(
416
+ repo_id=repo_id,
417
+ repo_type="space",
418
+ space_sdk="docker",
419
+ private=private,
420
+ exist_ok=True,
421
+ )
422
+ console.print(f"[bold green]✓[/bold green] Space {repo_id} is ready")
423
+ except Exception as e:
424
+ # Space might already exist, which is okay with exist_ok=True
425
+ # But if there's another error, log it
426
+ console.print(f"[bold yellow]⚠[/bold yellow] Space creation: {e}")
427
+
428
+
429
+ def _upload_to_hf_space(
430
+ repo_id: str,
431
+ staging_dir: Path,
432
+ api: HfApi,
433
+ ignore_patterns: list[str],
434
+ private: bool = False,
435
+ create_pr: bool = False,
436
+ commit_message: str | None = None,
437
+ ) -> None:
438
+ """Upload files to Hugging Face Space."""
439
+ if create_pr:
440
+ console.print(
441
+ f"[bold cyan]Uploading files to {repo_id} (will open a Pull Request)...[/bold cyan]"
442
+ )
443
+ else:
444
+ console.print(f"[bold cyan]Uploading files to {repo_id}...[/bold cyan]")
445
+
446
+ upload_kwargs: dict = {
447
+ "folder_path": str(staging_dir),
448
+ "repo_id": repo_id,
449
+ "repo_type": "space",
450
+ "create_pr": create_pr,
451
+ "ignore_patterns": ignore_patterns,
452
+ }
453
+ if commit_message:
454
+ upload_kwargs["commit_message"] = commit_message
455
+
456
+ try:
457
+ result = api.upload_folder(**upload_kwargs)
458
+ console.print("[bold green]✓[/bold green] Upload completed successfully")
459
+ if create_pr and result is not None and hasattr(result, "pr_url"):
460
+ console.print(f"[bold]Pull request:[/bold] {result.pr_url}")
461
+ console.print(
462
+ f"[bold]Space URL:[/bold] https://huggingface.co/spaces/{repo_id}"
463
+ )
464
+ except Exception as e:
465
+ console.print(f"[bold red]✗[/bold red] Upload failed: {e}")
466
+ raise typer.Exit(1) from e
467
+
468
+
469
+ @app.command()
470
+ def push(
471
+ directory: Annotated[
472
+ str | None,
473
+ typer.Argument(
474
+ help="Directory containing the OpenEnv environment (default: current directory)"
475
+ ),
476
+ ] = None,
477
+ repo_id: Annotated[
478
+ str | None,
479
+ typer.Option(
480
+ "--repo-id",
481
+ "-r",
482
+ help="Repository ID in format 'username/repo-name' (defaults to 'username/env-name' from openenv.yaml)",
483
+ ),
484
+ ] = None,
485
+ base_image: Annotated[
486
+ str | None,
487
+ typer.Option(
488
+ "--base-image",
489
+ "-b",
490
+ help="Base Docker image to use (overrides Dockerfile FROM)",
491
+ ),
492
+ ] = None,
493
+ interface: Annotated[
494
+ bool,
495
+ typer.Option(
496
+ "--interface",
497
+ help="Enable web interface (default: True if no registry specified)",
498
+ ),
499
+ ] = None,
500
+ no_interface: Annotated[
501
+ bool,
502
+ typer.Option(
503
+ "--no-interface",
504
+ help="Disable web interface",
505
+ ),
506
+ ] = False,
507
+ registry: Annotated[
508
+ str | None,
509
+ typer.Option(
510
+ "--registry",
511
+ help="Custom registry URL (e.g., docker.io/username). Disables web interface by default.",
512
+ ),
513
+ ] = None,
514
+ private: Annotated[
515
+ bool,
516
+ typer.Option(
517
+ "--private",
518
+ help="Deploy the space as private",
519
+ ),
520
+ ] = False,
521
+ create_pr: Annotated[
522
+ bool,
523
+ typer.Option(
524
+ "--create-pr",
525
+ help="Create a Pull Request instead of pushing to the default branch",
526
+ ),
527
+ ] = False,
528
+ exclude: Annotated[
529
+ str | None,
530
+ typer.Option(
531
+ "--exclude",
532
+ help="Optional additional ignore file with newline-separated glob patterns to exclude from Hugging Face uploads",
533
+ ),
534
+ ] = None,
535
+ ) -> None:
536
+ """
537
+ Push an OpenEnv environment to Hugging Face Spaces or a custom Docker registry.
538
+
539
+ This command:
540
+ 1. Validates that the directory is an OpenEnv environment (openenv.yaml present)
541
+ 2. Builds and pushes to Hugging Face Spaces or custom Docker registry
542
+ 3. Optionally enables web interface for deployment
543
+
544
+ The web interface is enabled by default when pushing to HuggingFace Spaces,
545
+ but disabled by default when pushing to a custom Docker registry.
546
+
547
+ Examples:
548
+ # Push to HuggingFace Spaces from current directory (web interface enabled)
549
+ $ cd my_env
550
+ $ openenv push
551
+
552
+ # Push to HuggingFace repo and open a Pull Request
553
+ $ openenv push my-org/my-env --create-pr
554
+ $ openenv push --repo-id my-org/my-env --create-pr
555
+
556
+ # Push to HuggingFace without web interface
557
+ $ openenv push --no-interface
558
+
559
+ # Push to Docker Hub
560
+ $ openenv push --registry docker.io/myuser
561
+
562
+ # Push to GitHub Container Registry
563
+ $ openenv push --registry ghcr.io/myorg
564
+
565
+ # Push to custom registry with web interface
566
+ $ openenv push --registry myregistry.io/path1/path2 --interface
567
+
568
+ # Push to specific HuggingFace repo
569
+ $ openenv push --repo-id my-org/my-env
570
+
571
+ # Push privately with custom base image
572
+ $ openenv push --private --base-image ghcr.io/meta-pytorch/openenv-base:latest
573
+ """
574
+ # Handle interface flag logic
575
+ if no_interface and interface:
576
+ console.print(
577
+ "[bold red]Error:[/bold red] Cannot specify both --interface and --no-interface",
578
+ file=sys.stderr,
579
+ )
580
+ raise typer.Exit(1)
581
+
582
+ # Determine if web interface should be enabled
583
+ if no_interface:
584
+ enable_interface = False
585
+ elif interface is not None:
586
+ enable_interface = interface
587
+ elif registry is not None:
588
+ # Custom registry: disable interface by default
589
+ enable_interface = False
590
+ else:
591
+ # HuggingFace: enable interface by default
592
+ enable_interface = True
593
+
594
+ # Determine directory
595
+ if directory:
596
+ env_dir = Path(directory).resolve()
597
+ else:
598
+ env_dir = Path.cwd().resolve()
599
+
600
+ if not env_dir.exists() or not env_dir.is_dir():
601
+ raise typer.BadParameter(f"Directory does not exist: {env_dir}")
602
+
603
+ # Check for openenv.yaml to confirm this is an environment directory
604
+ openenv_yaml = env_dir / "openenv.yaml"
605
+ if not openenv_yaml.exists():
606
+ console.print(
607
+ f"[bold red]Error:[/bold red] Not an OpenEnv environment directory (missing openenv.yaml): {env_dir}",
608
+ )
609
+ console.print(
610
+ "[yellow]Hint:[/yellow] Run this command from the environment root directory",
611
+ )
612
+ raise typer.Exit(1)
613
+
614
+ # Validate OpenEnv environment
615
+ console.print(
616
+ f"[bold cyan]Validating OpenEnv environment in {env_dir}...[/bold cyan]"
617
+ )
618
+ env_name, manifest = _validate_openenv_directory(env_dir)
619
+ console.print(f"[bold green]✓[/bold green] Found OpenEnv environment: {env_name}")
620
+
621
+ # Handle custom registry push
622
+ if registry:
623
+ console.print("[bold cyan]Preparing to push to custom registry...[/bold cyan]")
624
+ if enable_interface:
625
+ console.print("[bold cyan]Web interface will be enabled[/bold cyan]")
626
+
627
+ # Import build functions
628
+ from .build import _build_docker_image, _push_docker_image
629
+
630
+ # Prepare build args for custom registry deployment
631
+ build_args = {}
632
+ if enable_interface:
633
+ build_args["ENABLE_WEB_INTERFACE"] = "true"
634
+
635
+ # Build Docker image from the environment directory
636
+ tag = f"{registry}/{env_name}"
637
+ console.print(f"[bold cyan]Building Docker image: {tag}[/bold cyan]")
638
+
639
+ success = _build_docker_image(
640
+ env_path=env_dir,
641
+ tag=tag,
642
+ build_args=build_args if build_args else None,
643
+ )
644
+
645
+ if not success:
646
+ console.print("[bold red]✗ Docker build failed[/bold red]")
647
+ raise typer.Exit(1)
648
+
649
+ console.print("[bold green]✓ Docker build successful[/bold green]")
650
+
651
+ # Push to registry
652
+ console.print(f"[bold cyan]Pushing to registry: {registry}[/bold cyan]")
653
+
654
+ success = _push_docker_image(
655
+ tag, registry=None
656
+ ) # Tag already includes registry
657
+
658
+ if not success:
659
+ console.print("[bold red]✗ Docker push failed[/bold red]")
660
+ raise typer.Exit(1)
661
+
662
+ console.print("\n[bold green]✓ Deployment complete![/bold green]")
663
+ console.print(f"[bold]Image:[/bold] {tag}")
664
+ return
665
+
666
+ ignore_patterns = _load_ignore_patterns(env_dir, exclude)
667
+
668
+ # Ensure authentication for HuggingFace
669
+ username = _ensure_hf_authenticated()
670
+
671
+ # Determine repo_id
672
+ if not repo_id:
673
+ repo_id = f"{username}/{env_name}"
674
+
675
+ # Validate repo_id format
676
+ if "/" not in repo_id or repo_id.count("/") != 1:
677
+ raise typer.BadParameter(
678
+ f"Invalid repo-id format: {repo_id}. Expected format: 'username/repo-name'"
679
+ )
680
+
681
+ # Initialize Hugging Face API
682
+ api = HfApi()
683
+
684
+ # Prepare staging directory
685
+ deployment_type = (
686
+ "with web interface" if enable_interface else "without web interface"
687
+ )
688
+ console.print(
689
+ f"[bold cyan]Preparing files for Hugging Face deployment ({deployment_type})...[/bold cyan]"
690
+ )
691
+ with tempfile.TemporaryDirectory() as tmpdir:
692
+ staging_dir = Path(tmpdir) / "staging"
693
+ _prepare_staging_directory(
694
+ env_dir,
695
+ env_name,
696
+ staging_dir,
697
+ ignore_patterns=ignore_patterns,
698
+ base_image=base_image,
699
+ enable_interface=enable_interface,
700
+ )
701
+
702
+ # Create/verify space (no-op if exists; needed when pushing to own new repo)
703
+ if not create_pr:
704
+ _create_hf_space(repo_id, api, private=private)
705
+ # When create_pr we rely on upload_folder to create branch and PR
706
+
707
+ # Upload files
708
+ _upload_to_hf_space(
709
+ repo_id,
710
+ staging_dir,
711
+ api,
712
+ private=private,
713
+ create_pr=create_pr,
714
+ ignore_patterns=ignore_patterns,
715
+ )
716
+
717
+ console.print("\n[bold green]✓ Deployment complete![/bold green]")
718
+ console.print(f"Visit your space at: https://huggingface.co/spaces/{repo_id}")
src/core/openenv/cli/commands/serve.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Serve OpenEnv environments locally (TO BE IMPLEMENTED)."""
8
+
9
+ from __future__ import annotations
10
+
11
+ from pathlib import Path
12
+ from typing import Annotated
13
+
14
+ import typer
15
+
16
+ from .._cli_utils import console
17
+
18
+ app = typer.Typer(help="Serve OpenEnv environments locally")
19
+
20
+
21
+ @app.command()
22
+ def serve(
23
+ env_path: Annotated[
24
+ str | None,
25
+ typer.Argument(
26
+ help="Path to the environment directory (default: current directory)"
27
+ ),
28
+ ] = None,
29
+ port: Annotated[
30
+ int,
31
+ typer.Option("--port", "-p", help="Port to serve on"),
32
+ ] = 8000,
33
+ host: Annotated[
34
+ str,
35
+ typer.Option("--host", help="Host to bind to"),
36
+ ] = "0.0.0.0",
37
+ reload: Annotated[
38
+ bool,
39
+ typer.Option("--reload", help="Enable auto-reload on code changes"),
40
+ ] = False,
41
+ ) -> None:
42
+ """
43
+ Serve an OpenEnv environment locally.
44
+
45
+ TODO: This command is currently not implemented and has been deferred for later.
46
+
47
+ Planned functionality:
48
+ - Run environment server locally without Docker
49
+ - Support multiple deployment modes (local, notebook, cluster)
50
+ - Auto-reload for development
51
+ - Integration with environment's [project.scripts] entry point
52
+
53
+ For now, use Docker-based serving:
54
+ 1. Build the environment: openenv build
55
+ 2. Run the container: docker run -p 8000:8000 <image-name>
56
+
57
+ Or use uv directly:
58
+ uv run --project . server --port 8000
59
+ """
60
+ console.print("[bold yellow]⚠ This command is not yet implemented[/bold yellow]\n")
61
+
62
+ console.print(
63
+ "The [bold cyan]openenv serve[/bold cyan] command has been deferred for later."
64
+ )
65
+
66
+ console.print("[bold]Alternative approaches:[/bold]\n")
67
+
68
+ console.print("[cyan]Option 1: Docker-based serving (recommended)[/cyan]")
69
+ console.print(" 1. Build the environment:")
70
+ console.print(" [dim]$ openenv build[/dim]")
71
+ console.print(" 2. Run the Docker container:")
72
+ console.print(
73
+ f" [dim]$ docker run -p {port}:{port} openenv-<env-name>:latest[/dim]\n"
74
+ )
75
+
76
+ console.print("[cyan]Option 2: Direct execution with uv[/cyan]")
77
+
78
+ # Determine environment path
79
+ if env_path is None:
80
+ env_path_obj = Path.cwd()
81
+ else:
82
+ env_path_obj = Path(env_path)
83
+
84
+ # Check for openenv.yaml
85
+ openenv_yaml = env_path_obj / "openenv.yaml"
86
+ if openenv_yaml.exists():
87
+ console.print(" From your environment directory:")
88
+ console.print(f" [dim]$ cd {env_path_obj}[/dim]")
89
+ console.print(f" [dim]$ uv run --project . server --port {port}[/dim]\n")
90
+ else:
91
+ console.print(" From an environment directory with pyproject.toml:")
92
+ console.print(f" [dim]$ uv run --project . server --port {port}[/dim]\n")
93
+
94
+ raise typer.Exit(0)
src/core/openenv/cli/commands/validate.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ OpenEnv validate command.
9
+
10
+ This module provides the 'openenv validate' command to check if environments
11
+ are properly configured for multi-mode deployment.
12
+ """
13
+
14
+ from pathlib import Path
15
+
16
+ import typer
17
+
18
+ from openenv.cli._validation import (
19
+ format_validation_report,
20
+ get_deployment_modes,
21
+ validate_multi_mode_deployment,
22
+ )
23
+
24
+
25
+ def validate(
26
+ env_path: str | None = typer.Argument(
27
+ None, help="Path to the environment directory (default: current directory)"
28
+ ),
29
+ verbose: bool = typer.Option(
30
+ False, "--verbose", "-v", help="Show detailed information"
31
+ ),
32
+ ) -> None:
33
+ """
34
+ Validate an environment for standardized structure and deployment readiness.
35
+
36
+ This command checks if an environment is properly configured with:
37
+ - Required files (pyproject.toml, openenv.yaml, server/app.py, etc.)
38
+ - Docker deployment support
39
+ - uv run server capability
40
+ - python -m module execution
41
+
42
+ Examples:
43
+ # Validate current directory (recommended)
44
+ $ cd my_env
45
+ $ openenv validate
46
+
47
+ # Validate with detailed output
48
+ $ openenv validate --verbose
49
+
50
+ # Validate specific environment
51
+ $ openenv validate envs/echo_env
52
+ """
53
+ # Determine environment path (default to current directory)
54
+ if env_path is None:
55
+ env_path_obj = Path.cwd()
56
+ else:
57
+ env_path_obj = Path(env_path)
58
+
59
+ if not env_path_obj.exists():
60
+ typer.echo(f"Error: Path does not exist: {env_path_obj}", err=True)
61
+ raise typer.Exit(1)
62
+
63
+ if not env_path_obj.is_dir():
64
+ typer.echo(f"Error: Path is not a directory: {env_path_obj}", err=True)
65
+ raise typer.Exit(1)
66
+
67
+ # Check for openenv.yaml to confirm this is an environment directory
68
+ openenv_yaml = env_path_obj / "openenv.yaml"
69
+ if not openenv_yaml.exists():
70
+ typer.echo(
71
+ f"Error: Not an OpenEnv environment directory (missing openenv.yaml): {env_path_obj}",
72
+ err=True,
73
+ )
74
+ typer.echo(
75
+ "Hint: Run this command from the environment root directory or specify the path",
76
+ err=True,
77
+ )
78
+ raise typer.Exit(1)
79
+
80
+ env_name = env_path_obj.name
81
+ if env_name.endswith("_env"):
82
+ base_name = env_name[:-4]
83
+ else:
84
+ base_name = env_name
85
+
86
+ # Run validation
87
+ is_valid, issues = validate_multi_mode_deployment(env_path_obj)
88
+
89
+ # Show validation report
90
+ report = format_validation_report(base_name, is_valid, issues)
91
+ typer.echo(report)
92
+
93
+ # Show deployment modes if verbose
94
+ if verbose:
95
+ typer.echo("\nSupported deployment modes:")
96
+ modes = get_deployment_modes(env_path_obj)
97
+ for mode, supported in modes.items():
98
+ status = "[YES]" if supported else "[NO]"
99
+ typer.echo(f" {status} {mode}")
100
+
101
+ if is_valid:
102
+ typer.echo("\nUsage examples:")
103
+ typer.echo(f" cd {env_path_obj.name} && uv run server")
104
+ typer.echo(f" cd {env_path_obj.name} && openenv build")
105
+ typer.echo(f" cd {env_path_obj.name} && openenv push")
106
+
107
+ if not is_valid:
108
+ raise typer.Exit(1)
src/core/openenv/cli/templates/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """OpenEnv CLI templates package."""
src/core/openenv/cli/templates/openenv_env/.dockerignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .venv
2
+ .git
3
+ .gitignore
4
+ .env
5
+ __pycache__/
6
+ *.pyc
7
+ *.pyo
8
+ *.pyd
9
+ *.pyw
10
+ *.pyz
11
+ *.pywz
12
+ *.pyzw
13
+ *.pyzwz
14
+
15
+
src/core/openenv/cli/templates/openenv_env/README.md ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: __ENV_TITLE_NAME__ Environment Server
3
+ emoji: __HF_EMOJI__
4
+ colorFrom: __HF_COLOR_FROM__
5
+ colorTo: __HF_COLOR_TO__
6
+ sdk: docker
7
+ pinned: false
8
+ app_port: 8000
9
+ base_path: /web
10
+ tags:
11
+ - openenv
12
+ ---
13
+
14
+ # __ENV_TITLE_NAME__ Environment
15
+
16
+ A simple test environment that echoes back messages. Perfect for testing the env APIs as well as demonstrating environment usage patterns.
17
+
18
+ ## Quick Start
19
+
20
+ The simplest way to use the __ENV_TITLE_NAME__ environment is through the `__ENV_CLASS_NAME__Env` class:
21
+
22
+ ```python
23
+ from __ENV_NAME__ import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Env
24
+
25
+ try:
26
+ # Create environment from Docker image
27
+ __ENV_NAME__env = __ENV_CLASS_NAME__Env.from_docker_image("__ENV_NAME__-env:latest")
28
+
29
+ # Reset
30
+ result = __ENV_NAME__env.reset()
31
+ print(f"Reset: {result.observation.echoed_message}")
32
+
33
+ # Send multiple messages
34
+ messages = ["Hello, World!", "Testing echo", "Final message"]
35
+
36
+ for msg in messages:
37
+ result = __ENV_NAME__env.step(__ENV_CLASS_NAME__Action(message=msg))
38
+ print(f"Sent: '{msg}'")
39
+ print(f" → Echoed: '{result.observation.echoed_message}'")
40
+ print(f" → Length: {result.observation.message_length}")
41
+ print(f" → Reward: {result.reward}")
42
+
43
+ finally:
44
+ # Always clean up
45
+ __ENV_NAME__env.close()
46
+ ```
47
+
48
+ That's it! The `__ENV_CLASS_NAME__Env.from_docker_image()` method handles:
49
+ - Starting the Docker container
50
+ - Waiting for the server to be ready
51
+ - Connecting to the environment
52
+ - Container cleanup when you call `close()`
53
+
54
+ ## Building the Docker Image
55
+
56
+ Before using the environment, you need to build the Docker image:
57
+
58
+ ```bash
59
+ # From project root
60
+ docker build -t __ENV_NAME__-env:latest -f server/Dockerfile .
61
+ ```
62
+
63
+ ## Deploying to Hugging Face Spaces
64
+
65
+ You can easily deploy your OpenEnv environment to Hugging Face Spaces using the `openenv push` command:
66
+
67
+ ```bash
68
+ # From the environment directory (where openenv.yaml is located)
69
+ openenv push
70
+
71
+ # Or specify options
72
+ openenv push --namespace my-org --private
73
+ ```
74
+
75
+ The `openenv push` command will:
76
+ 1. Validate that the directory is an OpenEnv environment (checks for `openenv.yaml`)
77
+ 2. Prepare a custom build for Hugging Face Docker space (enables web interface)
78
+ 3. Upload to Hugging Face (ensuring you're logged in)
79
+
80
+ ### Prerequisites
81
+
82
+ - Authenticate with Hugging Face: The command will prompt for login if not already authenticated
83
+
84
+ ### Options
85
+
86
+ - `--directory`, `-d`: Directory containing the OpenEnv environment (defaults to current directory)
87
+ - `--repo-id`, `-r`: Repository ID in format 'username/repo-name' (defaults to 'username/env-name' from openenv.yaml)
88
+ - `--base-image`, `-b`: Base Docker image to use (overrides Dockerfile FROM)
89
+ - `--private`: Deploy the space as private (default: public)
90
+
91
+ ### Examples
92
+
93
+ ```bash
94
+ # Push to your personal namespace (defaults to username/env-name from openenv.yaml)
95
+ openenv push
96
+
97
+ # Push to a specific repository
98
+ openenv push --repo-id my-org/my-env
99
+
100
+ # Push with a custom base image
101
+ openenv push --base-image ghcr.io/meta-pytorch/openenv-base:latest
102
+
103
+ # Push as a private space
104
+ openenv push --private
105
+
106
+ # Combine options
107
+ openenv push --repo-id my-org/my-env --base-image custom-base:latest --private
108
+ ```
109
+
110
+ After deployment, your space will be available at:
111
+ `https://huggingface.co/spaces/<repo-id>`
112
+
113
+ The deployed space includes:
114
+ - **Web Interface** at `/web` - Interactive UI for exploring the environment
115
+ - **API Documentation** at `/docs` - Full OpenAPI/Swagger interface
116
+ - **Health Check** at `/health` - Container health monitoring
117
+ - **WebSocket** at `/ws` - Persistent session endpoint for low-latency interactions
118
+
119
+ ## Environment Details
120
+
121
+ ### Action
122
+ **__ENV_CLASS_NAME__Action**: Contains a single field
123
+ - `message` (str) - The message to echo back
124
+
125
+ ### Observation
126
+ **__ENV_CLASS_NAME__Observation**: Contains the echo response and metadata
127
+ - `echoed_message` (str) - The message echoed back
128
+ - `message_length` (int) - Length of the message
129
+ - `reward` (float) - Reward based on message length (length × 0.1)
130
+ - `done` (bool) - Always False for echo environment
131
+ - `metadata` (dict) - Additional info like step count
132
+
133
+ ### Reward
134
+ The reward is calculated as: `message_length × 0.1`
135
+ - "Hi" → reward: 0.2
136
+ - "Hello, World!" → reward: 1.3
137
+ - Empty message → reward: 0.0
138
+
139
+ ## Advanced Usage
140
+
141
+ ### Connecting to an Existing Server
142
+
143
+ If you already have a __ENV_TITLE_NAME__ environment server running, you can connect directly:
144
+
145
+ ```python
146
+ from __ENV_NAME__ import __ENV_CLASS_NAME__Env
147
+
148
+ # Connect to existing server
149
+ __ENV_NAME__env = __ENV_CLASS_NAME__Env(base_url="<ENV_HTTP_URL_HERE>")
150
+
151
+ # Use as normal
152
+ result = __ENV_NAME__env.reset()
153
+ result = __ENV_NAME__env.step(__ENV_CLASS_NAME__Action(message="Hello!"))
154
+ ```
155
+
156
+ Note: When connecting to an existing server, `__ENV_NAME__env.close()` will NOT stop the server.
157
+
158
+ ### Using the Context Manager
159
+
160
+ The client supports context manager usage for automatic connection management:
161
+
162
+ ```python
163
+ from __ENV_NAME__ import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Env
164
+
165
+ # Connect with context manager (auto-connects and closes)
166
+ with __ENV_CLASS_NAME__Env(base_url="http://localhost:8000") as env:
167
+ result = env.reset()
168
+ print(f"Reset: {result.observation.echoed_message}")
169
+ # Multiple steps with low latency
170
+ for msg in ["Hello", "World", "!"]:
171
+ result = env.step(__ENV_CLASS_NAME__Action(message=msg))
172
+ print(f"Echoed: {result.observation.echoed_message}")
173
+ ```
174
+
175
+ The client uses WebSocket connections for:
176
+ - **Lower latency**: No HTTP connection overhead per request
177
+ - **Persistent session**: Server maintains your environment state
178
+ - **Efficient for episodes**: Better for many sequential steps
179
+
180
+ ### Concurrent WebSocket Sessions
181
+
182
+ The server supports multiple concurrent WebSocket connections. To enable this,
183
+ modify `server/app.py` to use factory mode:
184
+
185
+ ```python
186
+ # In server/app.py - use factory mode for concurrent sessions
187
+ app = create_app(
188
+ __ENV_CLASS_NAME__Environment, # Pass class, not instance
189
+ __ENV_CLASS_NAME__Action,
190
+ __ENV_CLASS_NAME__Observation,
191
+ max_concurrent_envs=4, # Allow 4 concurrent sessions
192
+ )
193
+ ```
194
+
195
+ Then multiple clients can connect simultaneously:
196
+
197
+ ```python
198
+ from __ENV_NAME__ import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Env
199
+ from concurrent.futures import ThreadPoolExecutor
200
+
201
+ def run_episode(client_id: int):
202
+ with __ENV_CLASS_NAME__Env(base_url="http://localhost:8000") as env:
203
+ result = env.reset()
204
+ for i in range(10):
205
+ result = env.step(__ENV_CLASS_NAME__Action(message=f"Client {client_id}, step {i}"))
206
+ return client_id, result.observation.message_length
207
+
208
+ # Run 4 episodes concurrently
209
+ with ThreadPoolExecutor(max_workers=4) as executor:
210
+ results = list(executor.map(run_episode, range(4)))
211
+ ```
212
+
213
+ ## Development & Testing
214
+
215
+ ### Direct Environment Testing
216
+
217
+ Test the environment logic directly without starting the HTTP server:
218
+
219
+ ```bash
220
+ # From the server directory
221
+ python3 server/__ENV_NAME___environment.py
222
+ ```
223
+
224
+ This verifies that:
225
+ - Environment resets correctly
226
+ - Step executes actions properly
227
+ - State tracking works
228
+ - Rewards are calculated correctly
229
+
230
+ ### Running Locally
231
+
232
+ Run the server locally for development:
233
+
234
+ ```bash
235
+ uvicorn server.app:app --reload
236
+ ```
237
+
238
+ ## Project Structure
239
+
240
+ ```
241
+ __ENV_NAME__/
242
+ ├── .dockerignore # Docker build exclusions
243
+ ├── __init__.py # Module exports
244
+ ├── README.md # This file
245
+ ├── openenv.yaml # OpenEnv manifest
246
+ ├── pyproject.toml # Project metadata and dependencies
247
+ ├── uv.lock # Locked dependencies (generated)
248
+ ├── client.py # __ENV_CLASS_NAME__Env client
249
+ ├── models.py # Action and Observation models
250
+ └── server/
251
+ ├── __init__.py # Server module exports
252
+ ├── __ENV_NAME___environment.py # Core environment logic
253
+ ├── app.py # FastAPI application (HTTP + WebSocket endpoints)
254
+ └── Dockerfile # Container image definition
255
+ ```
src/core/openenv/cli/templates/openenv_env/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """__ENV_TITLE_NAME__ Environment."""
8
+
9
+ from .client import __ENV_CLASS_NAME__Env
10
+ from .models import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Observation
11
+
12
+ __all__ = [
13
+ "__ENV_CLASS_NAME__Action",
14
+ "__ENV_CLASS_NAME__Observation",
15
+ "__ENV_CLASS_NAME__Env",
16
+ ]
src/core/openenv/cli/templates/openenv_env/client.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """__ENV_TITLE_NAME__ Environment Client."""
8
+
9
+ from typing import Dict
10
+
11
+ from openenv.core.client_types import StepResult
12
+ from openenv.core.env_server.types import State
13
+ from openenv.core import EnvClient
14
+
15
+ from .models import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Observation
16
+
17
+
18
+ class __ENV_CLASS_NAME__Env(
19
+ EnvClient[__ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Observation]
20
+ ):
21
+ """
22
+ Client for the __ENV_TITLE_NAME__ Environment.
23
+
24
+ This client maintains a persistent WebSocket connection to the environment server,
25
+ enabling efficient multi-step interactions with lower latency.
26
+ Each client instance has its own dedicated environment session on the server.
27
+
28
+ Example:
29
+ >>> # Connect to a running server
30
+ >>> with __ENV_CLASS_NAME__Env(base_url="http://localhost:8000") as client:
31
+ ... result = client.reset()
32
+ ... print(result.observation.echoed_message)
33
+ ...
34
+ ... result = client.step(__ENV_CLASS_NAME__Action(message="Hello!"))
35
+ ... print(result.observation.echoed_message)
36
+
37
+ Example with Docker:
38
+ >>> # Automatically start container and connect
39
+ >>> client = __ENV_CLASS_NAME__Env.from_docker_image("__ENV_NAME__-env:latest")
40
+ >>> try:
41
+ ... result = client.reset()
42
+ ... result = client.step(__ENV_CLASS_NAME__Action(message="Test"))
43
+ ... finally:
44
+ ... client.close()
45
+ """
46
+
47
+ def _step_payload(self, action: __ENV_CLASS_NAME__Action) -> Dict:
48
+ """
49
+ Convert __ENV_CLASS_NAME__Action to JSON payload for step message.
50
+
51
+ Args:
52
+ action: __ENV_CLASS_NAME__Action instance
53
+
54
+ Returns:
55
+ Dictionary representation suitable for JSON encoding
56
+ """
57
+ return {
58
+ "message": action.message,
59
+ }
60
+
61
+ def _parse_result(self, payload: Dict) -> StepResult[__ENV_CLASS_NAME__Observation]:
62
+ """
63
+ Parse server response into StepResult[__ENV_CLASS_NAME__Observation].
64
+
65
+ Args:
66
+ payload: JSON response data from server
67
+
68
+ Returns:
69
+ StepResult with __ENV_CLASS_NAME__Observation
70
+ """
71
+ obs_data = payload.get("observation", {})
72
+ observation = __ENV_CLASS_NAME__Observation(
73
+ echoed_message=obs_data.get("echoed_message", ""),
74
+ message_length=obs_data.get("message_length", 0),
75
+ done=payload.get("done", False),
76
+ reward=payload.get("reward"),
77
+ metadata=obs_data.get("metadata", {}),
78
+ )
79
+
80
+ return StepResult(
81
+ observation=observation,
82
+ reward=payload.get("reward"),
83
+ done=payload.get("done", False),
84
+ )
85
+
86
+ def _parse_state(self, payload: Dict) -> State:
87
+ """
88
+ Parse server response into State object.
89
+
90
+ Args:
91
+ payload: JSON response from state request
92
+
93
+ Returns:
94
+ State object with episode_id and step_count
95
+ """
96
+ return State(
97
+ episode_id=payload.get("episode_id"),
98
+ step_count=payload.get("step_count", 0),
99
+ )
src/core/openenv/cli/templates/openenv_env/models.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Data models for the __ENV_TITLE_NAME__ Environment.
9
+
10
+ The __ENV_NAME__ environment is a simple test environment that echoes back messages.
11
+ """
12
+
13
+ from pydantic import Field
14
+
15
+ from openenv.core.env_server.types import Action, Observation
16
+
17
+
18
+ class __ENV_CLASS_NAME__Action(Action):
19
+ """Action for the __ENV_TITLE_NAME__ environment - just a message to echo."""
20
+
21
+ message: str = Field(..., description="Message to echo back")
22
+
23
+
24
+ class __ENV_CLASS_NAME__Observation(Observation):
25
+ """Observation from the __ENV_TITLE_NAME__ environment - the echoed message."""
26
+
27
+ echoed_message: str = Field(default="", description="The echoed message")
28
+ message_length: int = Field(default=0, description="Length of the echoed message")
src/core/openenv/cli/templates/openenv_env/openenv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ spec_version: 1
2
+ name: __ENV_NAME__
3
+ type: space
4
+ runtime: fastapi
5
+ app: server.app:app
6
+ port: 8000
7
+
src/core/openenv/cli/templates/openenv_env/pyproject.toml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ [build-system]
8
+ requires = ["setuptools>=45", "wheel"]
9
+ build-backend = "setuptools.build_meta"
10
+
11
+ [project]
12
+ name = "openenv-__ENV_NAME__"
13
+ version = "0.1.0"
14
+ description = "__ENV_TITLE_NAME__ environment for OpenEnv"
15
+ requires-python = ">=3.10"
16
+ dependencies = [
17
+ # Core OpenEnv runtime (provides FastAPI server + HTTP client types)
18
+ # install from github
19
+ # "openenv-core[core] @ git+https://github.com/meta-pytorch/OpenEnv.git",
20
+ "openenv-core[core]>=0.2.1",
21
+ # Environment-specific dependencies
22
+ # Add all dependencies needed for your environment here
23
+ # Examples:
24
+ # "numpy>=1.19.0",
25
+ # "torch>=2.0.0",
26
+ # "gymnasium>=0.29.0",
27
+ # "openspiel>=1.0.0",
28
+ # "smolagents>=1.22.0,<2",
29
+ ]
30
+
31
+ [project.optional-dependencies]
32
+ dev = [
33
+ "pytest>=8.0.0",
34
+ "pytest-cov>=4.0.0",
35
+ ]
36
+
37
+ [project.scripts]
38
+ # Server entry point - enables running via: uv run --project . server
39
+ # or: python -m __ENV_NAME__.server.app
40
+ server = "__ENV_NAME__.server.app:main"
41
+
42
+ [tool.setuptools]
43
+ include-package-data = true
44
+ packages = ["__ENV_NAME__", "__ENV_NAME__.server"]
45
+ package-dir = { "__ENV_NAME__" = ".", "__ENV_NAME__.server" = "server" }
src/core/openenv/cli/templates/openenv_env/server/Dockerfile ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Multi-stage build using openenv-base
8
+ # This Dockerfile is flexible and works for both:
9
+ # - In-repo environments (with local OpenEnv sources)
10
+ # - Standalone environments (with openenv from PyPI/Git)
11
+ # The build script (openenv build) handles context detection and sets appropriate build args.
12
+
13
+ ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
14
+ FROM ${BASE_IMAGE} AS builder
15
+
16
+ WORKDIR /app
17
+
18
+ # Ensure git is available (required for installing dependencies from VCS)
19
+ RUN apt-get update && \
20
+ apt-get install -y --no-install-recommends git && \
21
+ rm -rf /var/lib/apt/lists/*
22
+
23
+ # Build argument to control whether we're building standalone or in-repo
24
+ ARG BUILD_MODE=in-repo
25
+ ARG ENV_NAME=__ENV_NAME__
26
+
27
+ # Copy environment code (always at root of build context)
28
+ COPY . /app/env
29
+
30
+ # For in-repo builds, openenv is already vendored in the build context
31
+ # For standalone builds, openenv will be installed via pyproject.toml
32
+ WORKDIR /app/env
33
+
34
+ # Ensure uv is available (for local builds where base image lacks it)
35
+ RUN if ! command -v uv >/dev/null 2>&1; then \
36
+ curl -LsSf https://astral.sh/uv/install.sh | sh && \
37
+ mv /root/.local/bin/uv /usr/local/bin/uv && \
38
+ mv /root/.local/bin/uvx /usr/local/bin/uvx; \
39
+ fi
40
+
41
+ # Install dependencies using uv sync
42
+ # If uv.lock exists, use it; otherwise resolve on the fly
43
+ RUN --mount=type=cache,target=/root/.cache/uv \
44
+ if [ -f uv.lock ]; then \
45
+ uv sync --frozen --no-install-project --no-editable; \
46
+ else \
47
+ uv sync --no-install-project --no-editable; \
48
+ fi
49
+
50
+ RUN --mount=type=cache,target=/root/.cache/uv \
51
+ if [ -f uv.lock ]; then \
52
+ uv sync --frozen --no-editable; \
53
+ else \
54
+ uv sync --no-editable; \
55
+ fi
56
+
57
+ # Final runtime stage
58
+ FROM ${BASE_IMAGE}
59
+
60
+ WORKDIR /app
61
+
62
+ # Copy the virtual environment from builder
63
+ COPY --from=builder /app/env/.venv /app/.venv
64
+
65
+ # Copy the environment code
66
+ COPY --from=builder /app/env /app/env
67
+
68
+ # Set PATH to use the virtual environment
69
+ ENV PATH="/app/.venv/bin:$PATH"
70
+
71
+ # Set PYTHONPATH so imports work correctly
72
+ ENV PYTHONPATH="/app/env:$PYTHONPATH"
73
+
74
+ # Health check
75
+ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
76
+ CMD curl -f http://localhost:8000/health || exit 1
77
+
78
+ # Run the FastAPI server
79
+ # The module path is constructed to work with the /app/env structure
80
+ CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
src/core/openenv/cli/templates/openenv_env/server/__ENV_NAME___environment.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ __ENV_TITLE_NAME__ Environment Implementation.
9
+
10
+ A simple test environment that echoes back messages sent to it.
11
+ Perfect for testing HTTP server infrastructure.
12
+ """
13
+
14
+ from uuid import uuid4
15
+
16
+ from openenv.core.env_server.interfaces import Environment
17
+ from openenv.core.env_server.types import State
18
+
19
+ from models import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Observation
20
+
21
+
22
+ class __ENV_CLASS_NAME__Environment(Environment):
23
+ """
24
+ A simple echo environment that echoes back messages.
25
+
26
+ This environment is designed for testing the HTTP server infrastructure.
27
+ It maintains minimal state and simply echoes back whatever message it receives.
28
+
29
+ Example:
30
+ >>> env = __ENV_CLASS_NAME__Environment()
31
+ >>> obs = env.reset()
32
+ >>> print(obs.echoed_message) # "__ENV_TITLE_NAME__ environment ready!"
33
+ >>>
34
+ >>> obs = env.step(__ENV_CLASS_NAME__Action(message="Hello"))
35
+ >>> print(obs.echoed_message) # "Hello"
36
+ >>> print(obs.message_length) # 5
37
+ """
38
+
39
+ # Enable concurrent WebSocket sessions.
40
+ # Set to True if your environment isolates state between instances.
41
+ # When True, multiple WebSocket clients can connect simultaneously, each
42
+ # getting their own environment instance (when using factory mode in app.py).
43
+ SUPPORTS_CONCURRENT_SESSIONS: bool = True
44
+
45
+ def __init__(self):
46
+ """Initialize the __ENV_NAME__ environment."""
47
+ self._state = State(episode_id=str(uuid4()), step_count=0)
48
+ self._reset_count = 0
49
+
50
+ def reset(self) -> __ENV_CLASS_NAME__Observation:
51
+ """
52
+ Reset the environment.
53
+
54
+ Returns:
55
+ __ENV_CLASS_NAME__Observation with a ready message
56
+ """
57
+ self._state = State(episode_id=str(uuid4()), step_count=0)
58
+ self._reset_count += 1
59
+
60
+ return __ENV_CLASS_NAME__Observation(
61
+ echoed_message="__ENV_TITLE_NAME__ environment ready!",
62
+ message_length=0,
63
+ done=False,
64
+ reward=0.0,
65
+ )
66
+
67
+ def step(self, action: __ENV_CLASS_NAME__Action) -> __ENV_CLASS_NAME__Observation: # type: ignore[override]
68
+ """
69
+ Execute a step in the environment by echoing the message.
70
+
71
+ Args:
72
+ action: __ENV_CLASS_NAME__Action containing the message to echo
73
+
74
+ Returns:
75
+ __ENV_CLASS_NAME__Observation with the echoed message and its length
76
+ """
77
+ self._state.step_count += 1
78
+
79
+ message = action.message
80
+ length = len(message)
81
+
82
+ # Simple reward: longer messages get higher rewards
83
+ reward = length * 0.1
84
+
85
+ return __ENV_CLASS_NAME__Observation(
86
+ echoed_message=message,
87
+ message_length=length,
88
+ done=False,
89
+ reward=reward,
90
+ metadata={"original_message": message, "step": self._state.step_count},
91
+ )
92
+
93
+ @property
94
+ def state(self) -> State:
95
+ """
96
+ Get the current environment state.
97
+
98
+ Returns:
99
+ Current State with episode_id and step_count
100
+ """
101
+ return self._state
src/core/openenv/cli/templates/openenv_env/server/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """__ENV_TITLE_NAME__ environment server components."""
8
+
9
+ from .__ENV_NAME___environment import __ENV_CLASS_NAME__Environment
10
+
11
+ __all__ = ["__ENV_CLASS_NAME__Environment"]
src/core/openenv/cli/templates/openenv_env/server/app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ FastAPI application for the __ENV_TITLE_NAME__ Environment.
9
+
10
+ This module creates an HTTP server that exposes the __ENV_CLASS_NAME__Environment
11
+ over HTTP and WebSocket endpoints, compatible with EnvClient.
12
+
13
+ Endpoints:
14
+ - POST /reset: Reset the environment
15
+ - POST /step: Execute an action
16
+ - GET /state: Get current environment state
17
+ - GET /schema: Get action/observation schemas
18
+ - WS /ws: WebSocket endpoint for persistent sessions
19
+
20
+ Usage:
21
+ # Development (with auto-reload):
22
+ uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
23
+
24
+ # Production:
25
+ uvicorn server.app:app --host 0.0.0.0 --port 8000 --workers 4
26
+
27
+ # Or run directly:
28
+ python -m server.app
29
+ """
30
+
31
+ try:
32
+ from openenv.core.env_server.http_server import create_app
33
+ except Exception as e: # pragma: no cover
34
+ raise ImportError(
35
+ "openenv is required for the web interface. Install dependencies with '\n uv sync\n'"
36
+ ) from e
37
+
38
+ # Import from local models.py (PYTHONPATH includes /app/env in Docker)
39
+ from models import __ENV_CLASS_NAME__Action, __ENV_CLASS_NAME__Observation
40
+ from .__ENV_NAME___environment import __ENV_CLASS_NAME__Environment
41
+
42
+
43
+ # Create the app with web interface and README integration
44
+ app = create_app(
45
+ __ENV_CLASS_NAME__Environment,
46
+ __ENV_CLASS_NAME__Action,
47
+ __ENV_CLASS_NAME__Observation,
48
+ env_name="__ENV_NAME__",
49
+ max_concurrent_envs=1, # increase this number to allow more concurrent WebSocket sessions
50
+ )
51
+
52
+
53
+ def main(host: str = "0.0.0.0", port: int = 8000):
54
+ """
55
+ Entry point for direct execution via uv run or python -m.
56
+
57
+ This function enables running the server without Docker:
58
+ uv run --project . server
59
+ uv run --project . server --port 8001
60
+ python -m __ENV_NAME__.server.app
61
+
62
+ Args:
63
+ host: Host address to bind to (default: "0.0.0.0")
64
+ port: Port number to listen on (default: 8000)
65
+
66
+ For production deployments, consider using uvicorn directly with
67
+ multiple workers:
68
+ uvicorn __ENV_NAME__.server.app:app --workers 4
69
+ """
70
+ import uvicorn
71
+
72
+ uvicorn.run(app, host=host, port=port)
73
+
74
+
75
+ if __name__ == "__main__":
76
+ import argparse
77
+
78
+ parser = argparse.ArgumentParser()
79
+ parser.add_argument("--port", type=int, default=8000)
80
+ args = parser.parse_args()
81
+ main(port=args.port)
src/core/openenv/cli/templates/openenv_env/server/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openenv[core]>=0.2.0
2
+ fastapi>=0.115.0
3
+ uvicorn>=0.24.0
4
+
5
+
6
+
src/core/openenv/core/README.md ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # <img width="35" height="35" alt="image" src="https://github.com/user-attachments/assets/2700a971-e5d6-4036-b03f-2f89c9791609" /> OpenEnv: Agentic Execution Environments
2
+
3
+ An e2e framework for creating, deploying and using isolated execution environments for agentic RL training, built using Gymnasium style simple APIs. OpenEnv provides a standard for interacting with agentic execution environments via simple Gymnasium style APIs - step(), reset(), state(). Users of agentic execution environments can interact with the environment during RL training loops using these simple APIs.
4
+
5
+ In addition to making it easier for researchers and RL framework writers, we also provide tools for environment creators making it easier for them to create richer environments and make them available over familiar protocols like HTTP and packaged using canonical technologies like docker. Environment creators can use the OpenEnv framework to create environments that are isolated, secure, and easy to deploy and use.
6
+
7
+
8
+ ## Overview
9
+ `openenv.core` provides the foundational building blocks for creating and interacting with containerized environments over HTTP. It enables you to build agent environments that can be deployed as Docker containers and accessed via a simple HTTP API.
10
+
11
+ > ⚠️ **Early Development Warning** OpenEnv is currently in an experimental
12
+ > stage. You should expect bugs, incomplete features, and APIs that may change
13
+ > in future versions. The project welcomes bugfixes, but to make sure things are
14
+ > well coordinated you should discuss any significant change before starting the
15
+ > work. It's recommended that you signal your intention to contribute in the
16
+ > issue tracker, either by filing a new issue or by claiming an existing one.
17
+
18
+
19
+ # OpenEnv Core
20
+
21
+ Core components for OpenEnv - a framework for building HTTP-based agentic environments.
22
+
23
+ ## Features
24
+
25
+ - **EnvClient**: Async-first client for interacting with remote environments
26
+ - **SyncEnvClient**: Synchronous wrapper via `.sync()` for sync codebases
27
+ - **HTTPEnvServer**: FastAPI-based server wrapper for exposing environments over HTTP/WebSocket
28
+ - **Container Providers**: Pluggable architecture for running containers (Docker, Kubernetes, etc.)
29
+ - **Type System**: Strongly-typed Action/Observation/State interfaces
30
+ - **Web Interface**: Optional web UI for interacting with environments
31
+
32
+ ## Installation
33
+
34
+ ```bash
35
+ pip install "openenv[core]"
36
+ ```
37
+
38
+ For development:
39
+ ```bash
40
+ pip install "openenv[core]"
41
+ ```
42
+
43
+ ## Quick Start
44
+
45
+ ### Creating an Environment Client
46
+
47
+ EnvClient is **async by default**. Use `async with` and `await` for all operations:
48
+
49
+ ```python
50
+ import asyncio
51
+ from openenv.core import EnvClient, StepResult
52
+ from dataclasses import dataclass
53
+ from typing import Any
54
+
55
+ @dataclass
56
+ class MyAction:
57
+ text: str
58
+
59
+ @dataclass
60
+ class MyObservation:
61
+ response: str
62
+
63
+ class MyEnvClient(EnvClient[MyAction, MyObservation, Any]):
64
+ def _step_payload(self, action: MyAction) -> dict:
65
+ return {"text": action.text}
66
+
67
+ def _parse_result(self, payload: dict) -> StepResult[MyObservation]:
68
+ obs_data = payload["observation"]
69
+ return StepResult(
70
+ observation=MyObservation(**obs_data),
71
+ reward=payload.get("reward"),
72
+ done=payload.get("done", False)
73
+ )
74
+
75
+ def _parse_state(self, payload: dict) -> Any:
76
+ return payload
77
+
78
+ # Async usage (recommended)
79
+ async def main():
80
+ client = await MyEnvClient.from_docker_image("my-env:latest")
81
+ async with client:
82
+ result = await client.reset()
83
+ step_result = await client.step(MyAction(text="hello"))
84
+
85
+ asyncio.run(main())
86
+
87
+ # Sync usage (via .sync() wrapper)
88
+ with MyEnvClient(base_url="http://localhost:8000").sync() as client:
89
+ result = client.reset()
90
+ step_result = client.step(MyAction(text="hello"))
91
+ ```
92
+
93
+ ### Creating an Environment Server
94
+
95
+ ```python
96
+ from openenv.core.env_server import Environment, HTTPEnvServer, create_app
97
+ from dataclasses import dataclass
98
+
99
+ @dataclass
100
+ class MyAction:
101
+ text: str
102
+
103
+ @dataclass
104
+ class MyObservation:
105
+ response: str
106
+ reward: float = 0.0
107
+ done: bool = False
108
+
109
+ class MyEnvironment(Environment):
110
+ def reset(self) -> MyObservation:
111
+ return MyObservation(response="Ready")
112
+
113
+ def step(self, action: MyAction) -> MyObservation:
114
+ return MyObservation(
115
+ response=f"Echo: {action.text}",
116
+ reward=1.0,
117
+ done=False
118
+ )
119
+
120
+ # Create FastAPI app
121
+ env = MyEnvironment()
122
+ app = create_app(env, MyAction, MyObservation)
123
+
124
+ # Run with: uvicorn module:app --host 0.0.0.0 --port 8000
125
+ ```
126
+
127
+ ## Container Providers
128
+
129
+ OpenEnv Core supports multiple container providers:
130
+
131
+ ### Local Docker Provider
132
+
133
+ ```python
134
+ from openenv.core.containers.runtime import LocalDockerProvider
135
+
136
+ provider = LocalDockerProvider()
137
+ base_url = provider.start_container("my-env:latest")
138
+ provider.wait_for_ready(base_url)
139
+ # Use environment...
140
+ provider.stop_container()
141
+ ```
142
+
143
+ ### Kubernetes Provider (Coming Soon)
144
+
145
+ ```python
146
+ from openenv.core.containers.runtime import KubernetesProvider
147
+
148
+ provider = KubernetesProvider(namespace="envs")
149
+ base_url = provider.start_container("my-env:latest")
150
+ # Use environment...
151
+ provider.stop_container()
152
+ ```
153
+
154
+
155
+ ## API Reference
156
+
157
+ ### EnvClient
158
+
159
+ Async base class for environment clients. Key methods:
160
+
161
+ - `async connect()`: Establish WebSocket connection
162
+ - `async reset(**kwargs)`: Reset environment
163
+ - `async step(action)`: Execute action
164
+ - `async state()`: Get current state
165
+ - `async close()`: Close connection and cleanup
166
+ - `sync()`: Return a SyncEnvClient wrapper for synchronous usage
167
+
168
+ Abstract methods to implement:
169
+ - `_step_payload(action)`: Convert action to JSON
170
+ - `_parse_result(payload)`: Parse response to StepResult
171
+ - `_parse_state(payload)`: Parse state response
172
+
173
+ ### SyncEnvClient
174
+
175
+ Synchronous wrapper around EnvClient. Use `client.sync()` to get one:
176
+
177
+ ```python
178
+ sync_client = async_client.sync()
179
+ with sync_client:
180
+ result = sync_client.reset()
181
+ result = sync_client.step(action)
182
+ ```
183
+
184
+ ### HTTPEnvServer
185
+
186
+ Server wrapper with these methods:
187
+
188
+ - `register_routes(app)`: Register endpoints on FastAPI app
189
+ - `_deserialize_action(data)`: Convert JSON to Action
190
+ - `_serialize_observation(obs)`: Convert Observation to JSON
191
+
192
+ ### Environment Interface
193
+
194
+ Base interface for environment implementations:
195
+
196
+ - `reset()`: Reset environment and return initial observation
197
+ - `step(action)`: Execute action and return observation
198
+ - `state`: Property returning current environment state
199
+
200
+ ## License
201
+
202
+ This project is licensed under the BSD-3-Clause License - see the LICENSE file for details.
203
+
204
+ ## Contributing
205
+
206
+ Contributions are welcome! Please see the main OpenEnv repository for contribution guidelines.
207
+
208
+ ## Links
209
+
210
+ - **Homepage**: https://github.com/meta-pytorch/OpenEnv
211
+ - **Documentation**: https://github.com/meta-pytorch/OpenEnv/blob/main/README.md
212
+ - **Bug Tracker**: https://github.com/meta-pytorch/OpenEnv/issues
src/core/openenv/core/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Core components for agentic environments."""
8
+
9
+ # Re-export main components from submodules for convenience
10
+ from .env_server import * # noqa: F403
11
+ from . import env_server
12
+ from .env_client import EnvClient
13
+ from .sync_client import SyncEnvClient
14
+ from .generic_client import GenericEnvClient, GenericAction
15
+ from .mcp_client import MCPClientBase, MCPToolClient
16
+
17
+ __all__ = [
18
+ "EnvClient",
19
+ "SyncEnvClient",
20
+ "GenericEnvClient",
21
+ "GenericAction",
22
+ "MCPClientBase",
23
+ "MCPToolClient",
24
+ ] + env_server.__all__ # type: ignore
src/core/openenv/core/client_types.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Type definitions for EnvTorch
2
+ from dataclasses import dataclass
3
+ from typing import Generic, Optional, TypeVar
4
+
5
+ # Generic type for observations
6
+ ObsT = TypeVar("ObsT")
7
+ StateT = TypeVar("StateT")
8
+
9
+
10
+ @dataclass
11
+ class StepResult(Generic[ObsT]):
12
+ """
13
+ Represents the result of one environment step.
14
+
15
+ Attributes:
16
+ observation: The environment's observation after the action.
17
+ reward: Scalar reward for this step (optional).
18
+ done: Whether the episode is finished.
19
+ """
20
+
21
+ observation: ObsT
22
+ reward: Optional[float] = None
23
+ done: bool = False
src/core/openenv/core/containers/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Container management for environment servers."""
src/core/openenv/core/containers/images/Dockerfile ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ #
8
+ # OpenEnv Base Image
9
+ #
10
+ # This is the standard base image for all OpenEnv environment servers.
11
+ # It includes the minimal dependencies needed to run HTTP environment servers
12
+ # and uv for fast dependency management.
13
+ #
14
+ # Build from repo root: docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
15
+ # Tag: docker tag openenv-base:latest openenv-base:0.2.0
16
+ #
17
+
18
+ FROM ghcr.io/astral-sh/uv:0.5.27-python3.11-bookworm-slim AS builder
19
+
20
+ # Set working directory
21
+ WORKDIR /app
22
+
23
+ # Copy core pyproject.toml and lockfile for dependency installation
24
+ COPY pyproject.toml uv.lock* ./
25
+
26
+ # Install core dependencies using uv with cache mount
27
+ RUN --mount=type=cache,target=/root/.cache/uv \
28
+ uv pip install --system -r pyproject.toml
29
+
30
+ # Final runtime stage
31
+ FROM python:3.11-slim
32
+
33
+ # Set metadata
34
+ LABEL maintainer="OpenEnv Team"
35
+ LABEL description="Base image for OpenEnv based environment servers with uv"
36
+ LABEL version="0.2.0"
37
+
38
+ # Install system dependencies
39
+ RUN apt-get update && apt-get install -y --no-install-recommends \
40
+ curl \
41
+ ca-certificates \
42
+ && rm -rf /var/lib/apt/lists/*
43
+
44
+ # Copy uv from builder
45
+ COPY --from=builder /usr/local/bin/uv /usr/local/bin/uvx /usr/local/bin/
46
+
47
+ # Copy installed Python packages from builder
48
+ COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
49
+
50
+ # Copy console scripts installed by pip (uvicorn, fastapi, etc.)
51
+ COPY --from=builder /usr/local/bin/uvicorn /usr/local/bin/fastapi /usr/local/bin/
52
+
53
+ # Set working directory
54
+ WORKDIR /app
55
+
56
+ # Default environment variables
57
+ ENV PYTHONPATH=/app/src
58
+ ENV PYTHONUNBUFFERED=1
59
+ ENV UV_SYSTEM_PYTHON=1
60
+
61
+ # Default expose port (can be overridden)
62
+ EXPOSE 8000
63
+
64
+ # Note: CMD should be specified in child Dockerfiles
src/core/openenv/core/containers/images/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenEnv Base Image
2
+
3
+ Standard base image for all OpenEnv environment servers.
4
+
5
+ ## What's Included
6
+
7
+ | Layer | Size | Contents |
8
+ |-------|------|----------|
9
+ | python:3.11-slim | 200 MB | Base Python runtime |
10
+ | + Dependencies | 100 MB | FastAPI, uvicorn, requests |
11
+ | **Total** | **~300 MB** | Ready for environment servers |
12
+
13
+ ## Image Sizes
14
+
15
+ ```
16
+ openenv-base:latest 300 MB (python + fastapi + uvicorn)
17
+ ```
18
+ echo-env:latest 500 MB (python + fastapi + uvicorn + app)
19
+ coding-env:latest 520 MB (python + fastapi + uvicorn + app + tools)
20
+ another-env:latest 510 MB (python + fastapi + uvicorn + app)
21
+ ---
22
+ Total: 1.5 GB (with lots of duplication)
23
+ ```
24
+
25
+ ### With Base Images (✅ Solution)
26
+ ```
27
+ openenv-base:latest 300 MB (python + fastapi + uvicorn)
28
+ echo-env:latest 50 MB (app only, uses base)
29
+ coding-env:latest 70 MB (app + tools, uses base)
30
+ another-env:latest 45 MB (app only, uses base)
31
+ ---
32
+ Total: 465 MB (base shared, minimal duplication)
33
+ ```
34
+
35
+ ## Building the Base Image
36
+
37
+ ```bash
38
+ # From project root
39
+ docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
40
+ ```
41
+
42
+ ## Usage in Environment Dockerfiles
43
+
44
+ Each environment Dockerfile should start with:
45
+
46
+ ```dockerfile
47
+ FROM openenv-base:latest
48
+
49
+ # Copy only environment-specific files
50
+ COPY src/openenv/core/ /app/src/openenv/core/
51
+ COPY envs/my_env/ /app/envs/my_env/
52
+
53
+ # Run the server
54
+ CMD ["uvicorn", "envs.my_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
55
+ ```
56
+
57
+ ## Base Image Contents
58
+
59
+ - Python 3.11-slim
60
+ - FastAPI >= 0.104.0
61
+ - Uvicorn >= 0.24.0
62
+ - Requests >= 2.25.0
63
+ - curl (for health checks)
64
+
65
+ ## Example: Building Echo Environment
66
+
67
+ ```bash
68
+ # Step 1: Build base image (do this once)
69
+ docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
70
+
71
+ # Step 2: Build echo environment (uses base)
72
+ docker build -t echo-env:latest -f envs/echo_env/server/Dockerfile .
73
+
74
+ # Step 3: Run echo environment
75
+ docker run -p 8000:8000 echo-env:latest
76
+ ```
77
+
78
+ ## Updating the Base
79
+
80
+ When dependencies need updating:
81
+
82
+ 1. Update `src/openenv/core/containers/images/Dockerfile`
83
+ 2. Rebuild base image
84
+ 3. Rebuild all environment images (they'll use new base)
85
+
86
+ ```bash
87
+ # Update base
88
+ docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
89
+
90
+ # Rebuild environments (they automatically use new base)
91
+ docker build -t echo-env:latest -f envs/echo_env/server/Dockerfile .
92
+ ```
src/core/openenv/core/containers/runtime/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Container runtime providers."""
8
+
9
+ from .providers import (
10
+ ContainerProvider,
11
+ DockerSwarmProvider,
12
+ KubernetesProvider,
13
+ LocalDockerProvider,
14
+ RuntimeProvider,
15
+ )
16
+ from .uv_provider import UVProvider
17
+
18
+ __all__ = [
19
+ "ContainerProvider",
20
+ "DockerSwarmProvider",
21
+ "LocalDockerProvider",
22
+ "KubernetesProvider",
23
+ "RuntimeProvider",
24
+ "UVProvider",
25
+ ]
src/core/openenv/core/containers/runtime/daytona_provider.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Daytona container provider for running OpenEnv environments in Daytona cloud sandboxes.
9
+
10
+ Requires the ``daytona`` SDK: ``pip install daytona>=0.10``
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import json
16
+ import os
17
+ import shlex
18
+ import time
19
+ from typing import Any, Callable, Dict, Optional
20
+
21
+ import yaml
22
+
23
+ from .providers import ContainerProvider
24
+
25
+
26
+ class DaytonaProvider(ContainerProvider):
27
+ """
28
+ Container provider that runs environments in Daytona cloud sandboxes.
29
+
30
+ Example:
31
+ >>> provider = DaytonaProvider(api_key="your-key")
32
+ >>> image = DaytonaProvider.image_from_dockerfile("envs/echo_env/server/Dockerfile")
33
+ >>> base_url = provider.start_container(image)
34
+ >>> provider.wait_for_ready(base_url)
35
+ >>> provider.stop_container()
36
+ """
37
+
38
+ _dockerfile_registry: Dict[str, Dict[str, Any]] = {}
39
+
40
+ def __init__(
41
+ self,
42
+ *,
43
+ api_key: Optional[str] = None,
44
+ public: bool = False,
45
+ resources: Optional[Any] = None,
46
+ auto_stop_interval: int = 15,
47
+ target: Optional[str] = None,
48
+ on_snapshot_create_logs: Optional[Callable[[str], None]] = None,
49
+ cmd: Optional[str] = None,
50
+ create_timeout: float = 300,
51
+ ):
52
+ """
53
+ Args:
54
+ api_key: Daytona API key. Falls back to ``DAYTONA_API_KEY`` env var.
55
+ public: If True, the sandbox preview is publicly accessible.
56
+ resources: Optional ``daytona.Resources`` instance for CPU/memory.
57
+ auto_stop_interval: Minutes of inactivity before auto-stop (0 disables).
58
+ target: Daytona target region (e.g. "us").
59
+ on_snapshot_create_logs: Callback for snapshot build log lines.
60
+ cmd: Shell command to start the server inside the sandbox.
61
+ create_timeout: Seconds to wait for sandbox creation (default 300).
62
+ Heavy images (e.g. with Playwright/Chromium) may need more.
63
+ """
64
+ from daytona import Daytona, DaytonaConfig
65
+
66
+ config_kwargs: Dict[str, Any] = {}
67
+ resolved_key = api_key or os.environ.get("DAYTONA_API_KEY")
68
+ if resolved_key:
69
+ config_kwargs["api_key"] = resolved_key
70
+ if target:
71
+ config_kwargs["target"] = target
72
+
73
+ self._daytona = Daytona(DaytonaConfig(**config_kwargs))
74
+ self._public = public
75
+ self._resources = resources
76
+ self._auto_stop_interval = auto_stop_interval
77
+ self._on_snapshot_create_logs = on_snapshot_create_logs
78
+ self._cmd = cmd
79
+ self._create_timeout = create_timeout
80
+ self._sandbox: Any = None
81
+ self._preview_url: Optional[str] = None
82
+
83
+ def _discover_server_cmd(self, sandbox: Any, port: int = 8000) -> str:
84
+ """Discover the server command from ``openenv.yaml`` inside *sandbox*.
85
+
86
+ Finds the file, reads the ``app`` field, and constructs a command
87
+ of the form ``cd <env_root> && python -m uvicorn <app> --host 0.0.0.0 --port <port>``.
88
+
89
+ Raises:
90
+ ValueError: If ``openenv.yaml`` is not found or lacks an ``app`` field.
91
+ """
92
+ yaml_path = self._find_openenv_yaml(sandbox)
93
+ if yaml_path is None:
94
+ raise ValueError(
95
+ "Could not find openenv.yaml inside the sandbox. "
96
+ "Pass an explicit cmd= to DaytonaProvider or start_container()."
97
+ )
98
+
99
+ cat_resp = sandbox.process.exec(f"cat {shlex.quote(yaml_path)}", timeout=10)
100
+ content = cat_resp.result if hasattr(cat_resp, "result") else str(cat_resp)
101
+ app = self._parse_app_field(content)
102
+ if app is None:
103
+ raise ValueError(
104
+ f"openenv.yaml at {yaml_path} does not contain an 'app' field. "
105
+ "Pass an explicit cmd= to DaytonaProvider or start_container()."
106
+ )
107
+
108
+ # The directory containing openenv.yaml is the env root
109
+ env_root = yaml_path.rsplit("/", 1)[0]
110
+ return (
111
+ f"cd {shlex.quote(env_root)} && "
112
+ f"python -m uvicorn {shlex.quote(app)} --host 0.0.0.0 --port {port}"
113
+ )
114
+
115
+ def _find_openenv_yaml(self, sandbox: Any) -> Optional[str]:
116
+ """Locate ``openenv.yaml`` inside the sandbox.
117
+
118
+ Tries the modern layout path ``/app/env/openenv.yaml`` first,
119
+ then falls back to a ``find`` command for the old layout.
120
+ """
121
+ # Fast path: modern Dockerfile layout
122
+ resp = sandbox.process.exec(
123
+ "test -f /app/env/openenv.yaml && echo found", timeout=10
124
+ )
125
+ out = resp.result if hasattr(resp, "result") else str(resp)
126
+ if "found" in (out or ""):
127
+ return "/app/env/openenv.yaml"
128
+
129
+ # Fallback: search for it (redirect stderr so error messages
130
+ # like "No such file or directory" don't get mistaken for paths).
131
+ resp = sandbox.process.exec(
132
+ "find /app -maxdepth 4 -name openenv.yaml -print -quit 2>/dev/null",
133
+ timeout=10,
134
+ )
135
+ path = (resp.result if hasattr(resp, "result") else str(resp) or "").strip()
136
+ if path and path.startswith("/"):
137
+ return path
138
+
139
+ return None
140
+
141
+ @staticmethod
142
+ def _parse_app_field(yaml_content: str) -> Optional[str]:
143
+ """Extract the ``app`` value from raw openenv.yaml content.
144
+
145
+ Uses PyYAML to handle comments, quotes, and nested keys correctly.
146
+ """
147
+ try:
148
+ data = yaml.safe_load(yaml_content) or {}
149
+ except Exception:
150
+ return None
151
+
152
+ if not isinstance(data, dict):
153
+ return None
154
+
155
+ value = data.get("app")
156
+ if isinstance(value, str):
157
+ value = value.strip()
158
+ return value if value else None
159
+ return None
160
+
161
+ @staticmethod
162
+ def _parse_dockerfile_cmd(dockerfile_content: str) -> Optional[str]:
163
+ """Extract the server command from the last ``CMD`` in a Dockerfile.
164
+
165
+ Handles exec form (``CMD ["prog", "arg"]``) and shell form
166
+ (``CMD prog arg``). When a Dockerfile has multiple ``CMD``
167
+ instructions (e.g. multi-stage builds), the last one wins - same
168
+ semantics as Docker itself. Lines where ``CMD`` appears inside a
169
+ comment are ignored.
170
+
171
+ Returns:
172
+ The command as a single string, or ``None`` if no ``CMD`` found.
173
+ """
174
+ import re
175
+
176
+ last_cmd: Optional[str] = None
177
+ for line in dockerfile_content.splitlines():
178
+ stripped = line.strip()
179
+ # Skip comments
180
+ if stripped.startswith("#"):
181
+ continue
182
+ match = re.match(r"CMD\s+(.+)", stripped, flags=re.IGNORECASE)
183
+ if match:
184
+ last_cmd = match.group(1).strip()
185
+
186
+ if last_cmd is None:
187
+ return None
188
+
189
+ # Exec form: CMD ["executable", "param1", ...]
190
+ if last_cmd.startswith("["):
191
+ try:
192
+ parts = json.loads(last_cmd)
193
+ if isinstance(parts, list) and all(isinstance(p, str) for p in parts):
194
+ return " ".join(parts)
195
+ except (json.JSONDecodeError, TypeError):
196
+ pass
197
+
198
+ # Shell form: CMD executable param1 ...
199
+ return last_cmd if last_cmd else None
200
+
201
+ @staticmethod
202
+ def strip_buildkit_syntax(dockerfile_content: str) -> str:
203
+ """Remove BuildKit ``--mount=...`` flags from ``RUN`` instructions.
204
+
205
+ Handles single-line flags, multi-line continuations, and multiple
206
+ ``--mount`` flags spread across continuation lines. Only leading
207
+ ``--mount`` flags are removed (before the actual command starts).
208
+
209
+ Daytona's ``Image.from_dockerfile`` does not support BuildKit
210
+ ``--mount`` syntax. This helper strips the flags so that standard
211
+ Dockerfiles (like the ones generated by ``openenv build``) can
212
+ be used directly.
213
+ """
214
+ import re
215
+
216
+ def strip_leading_mounts(text: str) -> str:
217
+ remaining = text
218
+ while True:
219
+ match = re.match(r"\s*--mount=\S+\s*", remaining)
220
+ if not match:
221
+ return remaining
222
+ remaining = remaining[match.end() :]
223
+
224
+ lines = dockerfile_content.split("\n")
225
+ result: list[str] = []
226
+ in_run = False
227
+ in_mount_prefix = False
228
+
229
+ for line in lines:
230
+ line_out = line
231
+ run_start = False
232
+ if re.match(r"\s*RUN(\s+|$)", line, flags=re.IGNORECASE):
233
+ in_run = True
234
+ in_mount_prefix = True
235
+ run_start = True
236
+
237
+ if in_run and in_mount_prefix:
238
+ original_ends_with_slash = line_out.rstrip().endswith("\\")
239
+ if run_start:
240
+ match = re.match(r"(\s*RUN\s+)(.*)$", line_out, flags=re.IGNORECASE)
241
+ if match:
242
+ run_prefix, remainder = match.group(1), match.group(2)
243
+ else:
244
+ run_prefix, remainder = line_out, ""
245
+ new_remainder = strip_leading_mounts(remainder)
246
+ line_out = run_prefix + new_remainder
247
+ content_for_check = new_remainder
248
+ else:
249
+ new_remainder = strip_leading_mounts(line_out)
250
+ line_out = new_remainder
251
+ content_for_check = new_remainder
252
+
253
+ if original_ends_with_slash and not line_out.rstrip().endswith("\\"):
254
+ line_out = line_out.rstrip() + " \\"
255
+
256
+ if content_for_check.strip() not in ("", "\\"):
257
+ in_mount_prefix = False
258
+
259
+ if in_run and not line_out.rstrip().endswith("\\"):
260
+ in_run = False
261
+ in_mount_prefix = False
262
+
263
+ result.append(line_out)
264
+
265
+ return "\n".join(result)
266
+
267
+ @classmethod
268
+ def image_from_dockerfile(
269
+ cls,
270
+ dockerfile_path: str,
271
+ context_dir: str | None = None,
272
+ ) -> str:
273
+ """Validate a Dockerfile and return a ``dockerfile:`` URI for
274
+ :meth:`start_container`.
275
+
276
+ Eagerly validates the Dockerfile (existence, COPY sources,
277
+ BuildKit stripping) and stores the processed content in an
278
+ internal registry. The actual ``daytona.Image`` is created
279
+ later inside ``start_container``.
280
+
281
+ Args:
282
+ dockerfile_path: Path to the Dockerfile on disk.
283
+ context_dir: Build context directory. Defaults to the
284
+ Dockerfile's grandparent directory, matching the
285
+ ``openenv init`` convention where Dockerfiles live in
286
+ ``<env>/server/Dockerfile`` and the build context is
287
+ ``<env>/``. Pass explicitly for non-standard layouts
288
+ (e.g. ``context_dir="."`` for repo-root contexts).
289
+
290
+ Returns:
291
+ A ``"dockerfile:<abs_path>"`` string to pass to
292
+ ``start_container``.
293
+
294
+ Raises:
295
+ FileNotFoundError: If *dockerfile_path* does not exist.
296
+ ValueError: If *context_dir* is given but does not exist,
297
+ or if COPY sources in the Dockerfile cannot be found
298
+ under the resolved context directory.
299
+ """
300
+ import pathlib
301
+ import re
302
+
303
+ src = pathlib.Path(dockerfile_path).resolve()
304
+ if not src.is_file():
305
+ raise FileNotFoundError(f"Dockerfile not found: {dockerfile_path}")
306
+
307
+ if context_dir is not None:
308
+ ctx = pathlib.Path(context_dir)
309
+ if not ctx.is_dir():
310
+ raise ValueError(f"context_dir does not exist: {context_dir}")
311
+ else:
312
+ # Default: grandparent of the Dockerfile, matching the
313
+ # openenv init layout (<env>/server/Dockerfile -> <env>/).
314
+ ctx = src.parent.parent
315
+
316
+ content = src.read_text()
317
+ stripped = cls.strip_buildkit_syntax(content)
318
+
319
+ # Validate that COPY sources exist under the context directory.
320
+ # This catches mismatches early (e.g. a Dockerfile expecting repo
321
+ # root as context when we defaulted to the env directory).
322
+ for line in stripped.splitlines():
323
+ m = re.match(r"^\s*COPY\s+(?!--from=)(\S+)\s+", line, re.IGNORECASE)
324
+ if not m:
325
+ continue
326
+ copy_src = m.group(1)
327
+ if copy_src.startswith("/"):
328
+ continue
329
+ resolved = ctx / copy_src
330
+ if not resolved.exists() and not any(ctx.glob(copy_src)):
331
+ raise ValueError(
332
+ f"Dockerfile COPY source '{copy_src}' not found "
333
+ f"under context_dir '{ctx}'. This Dockerfile may "
334
+ f"expect a different build context (e.g. the repo "
335
+ f"root). Pass context_dir explicitly."
336
+ )
337
+
338
+ # Parse CMD from the original Dockerfile so start_container can
339
+ # use it as a fallback when openenv.yaml is unavailable.
340
+ parsed_cmd = cls._parse_dockerfile_cmd(content)
341
+
342
+ cls._dockerfile_registry[str(src)] = {
343
+ "stripped_content": stripped,
344
+ "context_dir": str(ctx),
345
+ "server_cmd": parsed_cmd,
346
+ }
347
+
348
+ return f"dockerfile:{src}"
349
+
350
+ def start_container(
351
+ self,
352
+ image: str,
353
+ port: Optional[int] = None,
354
+ env_vars: Optional[Dict[str, str]] = None,
355
+ **kwargs: Any,
356
+ ) -> str:
357
+ """
358
+ Create a Daytona sandbox from a Docker image or snapshot.
359
+
360
+ Daytona does not execute the image's CMD (known bug — ENTRYPOINT
361
+ runs, CMD does not). The server command is resolved in order:
362
+
363
+ 1. Explicit ``cmd`` passed to the constructor.
364
+ 2. ``cmd`` key in ``**kwargs`` (popped before forwarding).
365
+ 3. Auto-discovered from ``openenv.yaml`` inside the sandbox.
366
+ 4. ``CMD`` parsed from the Dockerfile (when *image* came from
367
+ ``image_from_dockerfile``).
368
+
369
+ Args:
370
+ image: Docker image name (e.g. ``"echo-env:latest"``),
371
+ ``"snapshot:<name>"`` to create from a pre-built snapshot,
372
+ or ``"dockerfile:<path>"`` returned by
373
+ :meth:`image_from_dockerfile`.
374
+ port: Must be ``None`` or ``8000``. Daytona exposes port 8000
375
+ via its preview proxy; other ports raise ``ValueError``.
376
+ env_vars: Environment variables forwarded to the sandbox.
377
+ **kwargs: ``cmd`` (str) to override the server command;
378
+ remaining kwargs passed through to ``Daytona.create()``.
379
+
380
+ Returns:
381
+ HTTPS preview URL for the sandbox (base_url).
382
+ """
383
+ if port is not None and port != 8000:
384
+ raise ValueError(
385
+ f"DaytonaProvider only supports port 8000 (got {port}). "
386
+ "The Daytona preview proxy routes to port 8000 inside the sandbox."
387
+ )
388
+
389
+ # Resolve the server command (may be None; discovery happens after
390
+ # sandbox creation when we can inspect the filesystem).
391
+ cmd = kwargs.pop("cmd", None) or self._cmd
392
+
393
+ # CMD parsed from Dockerfile (populated for "dockerfile:" images).
394
+ parsed_cmd: Optional[str] = None
395
+
396
+ # Build creation params
397
+ create_kwargs: Dict[str, Any] = {}
398
+ if env_vars:
399
+ create_kwargs["env_vars"] = env_vars
400
+ if self._public:
401
+ create_kwargs["public"] = True
402
+ if self._auto_stop_interval != 15:
403
+ create_kwargs["auto_stop_interval"] = self._auto_stop_interval
404
+
405
+ if image.startswith("snapshot:"):
406
+ from daytona import CreateSandboxFromSnapshotParams
407
+
408
+ snapshot_name = image[len("snapshot:") :]
409
+ params = CreateSandboxFromSnapshotParams(
410
+ snapshot=snapshot_name, **create_kwargs
411
+ )
412
+ elif image.startswith("dockerfile:"):
413
+ from daytona import CreateSandboxFromImageParams, Image
414
+
415
+ dockerfile_path = image[len("dockerfile:") :]
416
+ meta = self._dockerfile_registry.get(dockerfile_path)
417
+ if meta is None:
418
+ raise ValueError(
419
+ f"No registered Dockerfile metadata for {dockerfile_path}. "
420
+ "Call DaytonaProvider.image_from_dockerfile() first."
421
+ )
422
+
423
+ parsed_cmd = meta.get("server_cmd")
424
+
425
+ # Build the daytona Image from the pre-stripped content.
426
+ import pathlib
427
+ import uuid
428
+
429
+ ctx = pathlib.Path(meta["context_dir"])
430
+ tmp_name = f".daytona-{uuid.uuid4().hex[:8]}.dockerfile"
431
+ tmp_path = ctx / tmp_name
432
+ try:
433
+ tmp_path.write_text(meta["stripped_content"])
434
+ daytona_image = Image.from_dockerfile(str(tmp_path))
435
+ finally:
436
+ tmp_path.unlink(missing_ok=True)
437
+
438
+ img_kwargs: Dict[str, Any] = {
439
+ "image": daytona_image,
440
+ **create_kwargs,
441
+ }
442
+ if self._resources is not None:
443
+ img_kwargs["resources"] = self._resources
444
+ params = CreateSandboxFromImageParams(**img_kwargs)
445
+ else:
446
+ from daytona import CreateSandboxFromImageParams
447
+
448
+ img_kwargs = {"image": image, **create_kwargs}
449
+ if self._resources is not None:
450
+ img_kwargs["resources"] = self._resources
451
+ params = CreateSandboxFromImageParams(**img_kwargs)
452
+
453
+ # Create sandbox
454
+ extra: Dict[str, Any] = dict(kwargs)
455
+ if self._on_snapshot_create_logs is not None:
456
+ extra["on_snapshot_create_logs"] = self._on_snapshot_create_logs
457
+
458
+ self._sandbox = self._daytona.create(
459
+ params, timeout=self._create_timeout, **extra
460
+ )
461
+
462
+ try:
463
+ # Discover server command from openenv.yaml if not explicitly set.
464
+ if cmd is None:
465
+ try:
466
+ cmd = self._discover_server_cmd(self._sandbox)
467
+ except ValueError:
468
+ # Fall back to CMD parsed from Dockerfile (if available).
469
+ if parsed_cmd:
470
+ cmd = parsed_cmd
471
+ else:
472
+ raise
473
+
474
+ # Wrap in bash -c so compound commands (cd ... && uvicorn ...)
475
+ # are handled correctly by nohup. Write PID so we can check
476
+ # if the process crashed later in wait_for_ready().
477
+ escaped_cmd = shlex.quote(cmd)
478
+ self._sandbox.process.exec(
479
+ f"nohup bash -c {escaped_cmd} > /tmp/openenv-server.log 2>&1 &"
480
+ " echo $! > /tmp/openenv-server.pid",
481
+ timeout=10,
482
+ )
483
+
484
+ # Get a signed preview URL for port 8000. The token is
485
+ # embedded in the URL itself so no extra headers are needed.
486
+ signed = self._sandbox.create_signed_preview_url(
487
+ 8000, expires_in_seconds=86400
488
+ )
489
+ self._preview_url = signed.url
490
+ except Exception:
491
+ self.stop_container()
492
+ raise
493
+
494
+ return self._preview_url
495
+
496
+ def refresh_preview_url(self) -> str:
497
+ """Get a fresh signed preview URL (valid for 24h).
498
+
499
+ Daytona signed URLs expire after at most 24 hours. Call this to
500
+ get a new one for long-running sessions. The returned URL points
501
+ to the same sandbox — clients will need to reconnect using it.
502
+ """
503
+ if self._sandbox is None:
504
+ raise RuntimeError("No active sandbox to refresh URL for.")
505
+ signed = self._sandbox.create_signed_preview_url(8000, expires_in_seconds=86400)
506
+ self._preview_url = signed.url
507
+ return self._preview_url
508
+
509
+ def stop_container(self) -> None:
510
+ """Delete the Daytona sandbox."""
511
+ if self._sandbox is None:
512
+ return
513
+
514
+ try:
515
+ self._daytona.delete(self._sandbox)
516
+ finally:
517
+ self._sandbox = None
518
+ self._preview_url = None
519
+
520
+ def wait_for_ready(self, base_url: str, timeout_s: float = 120.0) -> None:
521
+ """
522
+ Poll the /health endpoint until the sandbox is ready.
523
+
524
+ Uses a longer default timeout (120s) than Docker providers because
525
+ Daytona sandboxes may have cold-start latency.
526
+
527
+ Args:
528
+ base_url: Preview URL returned by ``start_container()``.
529
+ timeout_s: Maximum seconds to wait.
530
+
531
+ Raises:
532
+ TimeoutError: If the sandbox doesn't become ready in time.
533
+ RuntimeError: If the server process died (detected via PID check).
534
+ """
535
+ import requests
536
+
537
+ health_url = f"{base_url}/health"
538
+
539
+ deadline = time.time() + timeout_s
540
+ while time.time() < deadline:
541
+ try:
542
+ response = requests.get(health_url, timeout=5.0)
543
+ if response.status_code == 200:
544
+ return
545
+ except requests.RequestException:
546
+ pass
547
+
548
+ # Early exit: if the server process died, raise immediately
549
+ # instead of waiting for the full health-check timeout.
550
+ if self._sandbox is not None:
551
+ resp = self._sandbox.process.exec(
552
+ "kill -0 $(cat /tmp/openenv-server.pid) 2>/dev/null"
553
+ " && echo RUNNING || echo DEAD",
554
+ timeout=10,
555
+ )
556
+ out = resp.result if hasattr(resp, "result") else str(resp)
557
+ if "DEAD" in (out or ""):
558
+ log_resp = self._sandbox.process.exec(
559
+ "cat /tmp/openenv-server.log 2>/dev/null", timeout=10
560
+ )
561
+ log = (
562
+ log_resp.result
563
+ if hasattr(log_resp, "result")
564
+ else str(log_resp)
565
+ )
566
+ raise RuntimeError(f"Server process died.\nLog:\n{log}")
567
+
568
+ time.sleep(1.0)
569
+
570
+ raise TimeoutError(
571
+ f"Daytona sandbox at {base_url} did not become ready within {timeout_s}s"
572
+ )
src/core/openenv/core/containers/runtime/providers.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Container provider abstractions for running environment servers.
9
+
10
+ This module provides a pluggable architecture for different container providers
11
+ (local Docker, Kubernetes, cloud providers, etc.) to be used with EnvClient.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from abc import ABC, abstractmethod
17
+ from typing import Any, Dict, Optional, Sequence
18
+
19
+
20
+ class ContainerProvider(ABC):
21
+ """
22
+ Abstract base class for container providers.
23
+
24
+ Providers implement this interface to support different container platforms:
25
+ - LocalDockerProvider: Runs containers on local Docker daemon
26
+ - KubernetesProvider: Runs containers in Kubernetes cluster
27
+ - FargateProvider: Runs containers on AWS Fargate
28
+ - CloudRunProvider: Runs containers on Google Cloud Run
29
+
30
+ The provider manages a single container lifecycle and provides the base URL
31
+ for connecting to it.
32
+
33
+ Example:
34
+ >>> provider = LocalDockerProvider()
35
+ >>> base_url = provider.start_container("echo-env:latest")
36
+ >>> print(base_url) # http://localhost:8000
37
+ >>> # Use the environment via base_url
38
+ >>> provider.stop_container()
39
+ """
40
+
41
+ @abstractmethod
42
+ def start_container(
43
+ self,
44
+ image: str,
45
+ port: Optional[int] = None,
46
+ env_vars: Optional[Dict[str, str]] = None,
47
+ **kwargs: Any,
48
+ ) -> str:
49
+ """
50
+ Start a container from the specified image.
51
+
52
+ Args:
53
+ image: Container image name (e.g., "echo-env:latest")
54
+ port: Port to expose (if None, provider chooses)
55
+ env_vars: Environment variables to pass to container
56
+ **kwargs: Provider-specific options
57
+
58
+ Returns:
59
+ Base URL to connect to the container (e.g., "http://localhost:8000")
60
+
61
+ Raises:
62
+ RuntimeError: If container fails to start
63
+ """
64
+ pass
65
+
66
+ @abstractmethod
67
+ def stop_container(self) -> None:
68
+ """
69
+ Stop and remove the running container.
70
+
71
+ This cleans up the container that was started by start_container().
72
+ """
73
+ pass
74
+
75
+ @abstractmethod
76
+ def wait_for_ready(self, base_url: str, timeout_s: float = 30.0) -> None:
77
+ """
78
+ Wait for the container to be ready to accept requests.
79
+
80
+ This typically polls the /health endpoint until it returns 200.
81
+
82
+ Args:
83
+ base_url: Base URL of the container
84
+ timeout_s: Maximum time to wait
85
+
86
+ Raises:
87
+ TimeoutError: If container doesn't become ready in time
88
+ """
89
+ pass
90
+
91
+
92
+ class LocalDockerProvider(ContainerProvider):
93
+ """
94
+ Container provider for local Docker daemon.
95
+
96
+ This provider runs containers on the local machine using Docker.
97
+ Useful for development and testing.
98
+
99
+ Example:
100
+ >>> provider = LocalDockerProvider()
101
+ >>> base_url = provider.start_container("echo-env:latest")
102
+ >>> # Container running on http://localhost:<random-port>
103
+ >>> provider.stop_container()
104
+ """
105
+
106
+ def __init__(self):
107
+ """Initialize the local Docker provider."""
108
+ self._container_id: Optional[str] = None
109
+ self._container_name: Optional[str] = None
110
+
111
+ # Check if Docker is available
112
+ import subprocess
113
+
114
+ try:
115
+ subprocess.run(
116
+ ["docker", "version"],
117
+ check=True,
118
+ capture_output=True,
119
+ timeout=5,
120
+ )
121
+ except (
122
+ subprocess.CalledProcessError,
123
+ FileNotFoundError,
124
+ subprocess.TimeoutExpired,
125
+ ):
126
+ raise RuntimeError(
127
+ "Docker is not available. Please install Docker Desktop or Docker Engine."
128
+ )
129
+
130
+ def start_container(
131
+ self,
132
+ image: str,
133
+ port: Optional[int] = None,
134
+ env_vars: Optional[Dict[str, str]] = None,
135
+ **kwargs: Any,
136
+ ) -> str:
137
+ """
138
+ Start a Docker container locally.
139
+
140
+ Args:
141
+ image: Docker image name
142
+ port: Port to expose (if None, finds available port)
143
+ env_vars: Environment variables for the container
144
+ **kwargs: Additional Docker run options
145
+
146
+ Returns:
147
+ Base URL to connect to the container
148
+ """
149
+ import subprocess
150
+ import time
151
+
152
+ # Find available port if not specified
153
+ if port is None:
154
+ port = self._find_available_port()
155
+
156
+ # Generate container name
157
+ self._container_name = self._generate_container_name(image)
158
+
159
+ # Build docker run command
160
+ cmd = [
161
+ "docker",
162
+ "run",
163
+ "-d", # Detached
164
+ "--name",
165
+ self._container_name,
166
+ "-p",
167
+ f"{port}:8000", # Map port
168
+ ]
169
+
170
+ # Add environment variables
171
+ if env_vars:
172
+ for key, value in env_vars.items():
173
+ cmd.extend(["-e", f"{key}={value}"])
174
+
175
+ # Add image
176
+ cmd.append(image)
177
+
178
+ # Run container
179
+ try:
180
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
181
+ self._container_id = result.stdout.strip()
182
+ except subprocess.CalledProcessError as e:
183
+ error_msg = f"Failed to start Docker container.\nCommand: {' '.join(cmd)}\nExit code: {e.returncode}\nStderr: {e.stderr}\nStdout: {e.stdout}"
184
+ raise RuntimeError(error_msg) from e
185
+
186
+ # Wait a moment for container to start
187
+ time.sleep(1)
188
+
189
+ base_url = f"http://localhost:{port}"
190
+ return base_url
191
+
192
+ def stop_container(self) -> None:
193
+ """
194
+ Stop and remove the Docker container.
195
+ """
196
+ if self._container_id is None:
197
+ return
198
+
199
+ import subprocess
200
+
201
+ try:
202
+ # Stop container
203
+ subprocess.run(
204
+ ["docker", "stop", self._container_id],
205
+ capture_output=True,
206
+ check=True,
207
+ timeout=10,
208
+ )
209
+
210
+ # Remove container
211
+ subprocess.run(
212
+ ["docker", "rm", self._container_id],
213
+ capture_output=True,
214
+ check=True,
215
+ timeout=10,
216
+ )
217
+ except subprocess.CalledProcessError:
218
+ # Container might already be stopped/removed
219
+ pass
220
+ finally:
221
+ self._container_id = None
222
+ self._container_name = None
223
+
224
+ def wait_for_ready(self, base_url: str, timeout_s: float = 30.0) -> None:
225
+ """
226
+ Wait for container to be ready by polling /health endpoint.
227
+
228
+ Args:
229
+ base_url: Base URL of the container
230
+ timeout_s: Maximum time to wait
231
+
232
+ Raises:
233
+ TimeoutError: If container doesn't become ready
234
+ """
235
+ import time
236
+ import requests
237
+
238
+ start_time = time.time()
239
+ health_url = f"{base_url}/health"
240
+
241
+ # Bypass proxy for localhost to avoid proxy issues
242
+ proxies = {"http": None, "https": None}
243
+
244
+ while time.time() - start_time < timeout_s:
245
+ try:
246
+ response = requests.get(health_url, timeout=2.0, proxies=proxies)
247
+ if response.status_code == 200:
248
+ return
249
+ except requests.RequestException:
250
+ pass
251
+
252
+ time.sleep(0.5)
253
+
254
+ raise TimeoutError(
255
+ f"Container at {base_url} did not become ready within {timeout_s}s"
256
+ )
257
+
258
+ def _find_available_port(self) -> int:
259
+ """
260
+ Find an available port on localhost.
261
+
262
+ Returns:
263
+ An available port number
264
+ """
265
+ import socket
266
+
267
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
268
+ s.bind(("", 0))
269
+ s.listen(1)
270
+ port = s.getsockname()[1]
271
+ return port
272
+
273
+ def _generate_container_name(self, image: str) -> str:
274
+ """
275
+ Generate a unique container name based on image name and timestamp.
276
+
277
+ Args:
278
+ image: Docker image name
279
+
280
+ Returns:
281
+ A unique container name
282
+ """
283
+ import time
284
+
285
+ clean_image = image.split("/")[-1].split(":")[0]
286
+ timestamp = int(time.time() * 1000)
287
+ return f"{clean_image}-{timestamp}"
288
+
289
+
290
+ class DockerSwarmProvider(ContainerProvider):
291
+ """
292
+ Container provider that uses Docker Swarm services for local concurrency.
293
+
294
+ This provider creates a replicated Swarm service backed by the local Docker
295
+ engine. The built-in load-balancer fans requests across the replicas,
296
+ allowing multiple container instances to run concurrently on the developer
297
+ workstation (mirroring the workflow described in the Docker stack docs).
298
+ """
299
+
300
+ def __init__(
301
+ self,
302
+ *,
303
+ auto_init_swarm: bool = True,
304
+ overlay_network: Optional[str] = None,
305
+ ):
306
+ """
307
+ Args:
308
+ auto_init_swarm: Whether to call ``docker swarm init`` when Swarm
309
+ is not active. Otherwise, user must manually initialize Swarm.
310
+ overlay_network: Optional overlay network name for the service.
311
+ When provided, the network is created with
312
+ ``docker network create --driver overlay --attachable`` if it
313
+ does not already exist.
314
+ """
315
+ self._service_name: Optional[str] = None
316
+ self._service_id: Optional[str] = None
317
+ self._published_port: Optional[int] = None
318
+ self._overlay_network = overlay_network
319
+ self._auto_init_swarm = auto_init_swarm
320
+
321
+ self._ensure_docker_available()
322
+ self._ensure_swarm_initialized()
323
+ if self._overlay_network:
324
+ self._ensure_overlay_network(self._overlay_network)
325
+
326
+ def start_container(
327
+ self,
328
+ image: str,
329
+ port: Optional[int] = None,
330
+ env_vars: Optional[Dict[str, str]] = None,
331
+ **kwargs: Any,
332
+ ) -> str:
333
+ """
334
+ Start (or scale) a Swarm service for the given image.
335
+
336
+ Supported kwargs:
337
+ replicas (int): Number of container replicas (default: 2).
338
+ cpu_limit (float | str): CPU limit passed to ``--limit-cpu``.
339
+ memory_limit (str): Memory limit passed to ``--limit-memory``.
340
+ constraints (Sequence[str]): Placement constraints.
341
+ labels (Dict[str, str]): Service labels.
342
+ command (Sequence[str] | str): Override container command.
343
+ """
344
+ import shlex
345
+ import subprocess
346
+ import time
347
+
348
+ allowed_kwargs = {
349
+ "replicas",
350
+ "cpu_limit",
351
+ "memory_limit",
352
+ "constraints",
353
+ "labels",
354
+ "command",
355
+ }
356
+ unknown = set(kwargs) - allowed_kwargs
357
+ if unknown:
358
+ raise ValueError(f"Unsupported kwargs for DockerSwarmProvider: {unknown}")
359
+
360
+ replicas = int(kwargs.get("replicas", 2))
361
+ cpu_limit = kwargs.get("cpu_limit")
362
+ memory_limit = kwargs.get("memory_limit")
363
+ constraints: Optional[Sequence[str]] = kwargs.get("constraints")
364
+ labels: Optional[Dict[str, str]] = kwargs.get("labels")
365
+ command_override = kwargs.get("command")
366
+
367
+ if port is None:
368
+ port = self._find_available_port()
369
+
370
+ self._service_name = self._generate_service_name(image)
371
+ self._published_port = port
372
+
373
+ cmd = [
374
+ "docker",
375
+ "service",
376
+ "create",
377
+ "--detach",
378
+ "--name",
379
+ self._service_name,
380
+ "--replicas",
381
+ str(max(1, replicas)),
382
+ "--publish",
383
+ f"{port}:8000",
384
+ ]
385
+
386
+ if self._overlay_network:
387
+ cmd.extend(["--network", self._overlay_network])
388
+
389
+ if env_vars:
390
+ for key, value in env_vars.items():
391
+ cmd.extend(["--env", f"{key}={value}"])
392
+
393
+ if cpu_limit is not None:
394
+ cmd.extend(["--limit-cpu", str(cpu_limit)])
395
+
396
+ if memory_limit is not None:
397
+ cmd.extend(["--limit-memory", str(memory_limit)])
398
+
399
+ if constraints:
400
+ for constraint in constraints:
401
+ cmd.extend(["--constraint", constraint])
402
+
403
+ if labels:
404
+ for key, value in labels.items():
405
+ cmd.extend(["--label", f"{key}={value}"])
406
+
407
+ cmd.append(image)
408
+
409
+ if command_override:
410
+ if isinstance(command_override, str):
411
+ cmd.extend(shlex.split(command_override))
412
+ else:
413
+ cmd.extend(command_override)
414
+
415
+ try:
416
+ result = subprocess.run(
417
+ cmd,
418
+ capture_output=True,
419
+ text=True,
420
+ check=True,
421
+ )
422
+ self._service_id = result.stdout.strip()
423
+ except subprocess.CalledProcessError as e:
424
+ error_msg = (
425
+ "Failed to start Docker Swarm service.\n"
426
+ f"Command: {' '.join(cmd)}\n"
427
+ f"Exit code: {e.returncode}\n"
428
+ f"Stdout: {e.stdout}\n"
429
+ f"Stderr: {e.stderr}"
430
+ )
431
+ raise RuntimeError(error_msg) from e
432
+
433
+ # Give Swarm a brief moment to schedule the tasks.
434
+ time.sleep(1.0)
435
+
436
+ return f"http://localhost:{port}"
437
+
438
+ def stop_container(self) -> None:
439
+ """
440
+ Remove the Swarm service (and keep the Swarm manager running).
441
+ """
442
+ if not self._service_name:
443
+ return
444
+
445
+ import subprocess
446
+
447
+ try:
448
+ subprocess.run(
449
+ ["docker", "service", "rm", self._service_name],
450
+ capture_output=True,
451
+ check=True,
452
+ timeout=10,
453
+ )
454
+ except subprocess.CalledProcessError:
455
+ # Service may already be gone; ignore.
456
+ pass
457
+ finally:
458
+ self._service_name = None
459
+ self._service_id = None
460
+ self._published_port = None
461
+
462
+ def wait_for_ready(self, base_url: str, timeout_s: float = 30.0) -> None:
463
+ """
464
+ Wait for at least one replica to become healthy by polling /health.
465
+
466
+ Note: With Swarm's load balancer, requests round-robin across replicas,
467
+ so this only verifies that at least one replica is responding. Some
468
+ replicas may still be starting when this returns.
469
+ """
470
+ import time
471
+ import requests
472
+
473
+ deadline = time.time() + timeout_s
474
+ health_url = f"{base_url}/health"
475
+
476
+ # Bypass proxy for localhost to avoid proxy issues
477
+ proxies = {"http": None, "https": None}
478
+
479
+ while time.time() < deadline:
480
+ try:
481
+ response = requests.get(health_url, timeout=2.0, proxies=proxies)
482
+ if response.status_code == 200:
483
+ return
484
+ except requests.RequestException:
485
+ pass
486
+
487
+ time.sleep(0.5)
488
+
489
+ raise TimeoutError(
490
+ f"Swarm service at {base_url} did not become ready within {timeout_s}s"
491
+ )
492
+
493
+ def _ensure_docker_available(self) -> None:
494
+ import subprocess
495
+
496
+ try:
497
+ subprocess.run(
498
+ ["docker", "version"],
499
+ check=True,
500
+ capture_output=True,
501
+ timeout=5,
502
+ )
503
+ except (
504
+ subprocess.CalledProcessError,
505
+ FileNotFoundError,
506
+ subprocess.TimeoutExpired,
507
+ ) as exc:
508
+ raise RuntimeError(
509
+ "Docker is not available. Please install Docker Desktop or Docker Engine."
510
+ ) from exc
511
+
512
+ def _ensure_swarm_initialized(self) -> None:
513
+ import subprocess
514
+
515
+ try:
516
+ result = subprocess.run(
517
+ ["docker", "info", "--format", "{{.Swarm.LocalNodeState}}"],
518
+ capture_output=True,
519
+ text=True,
520
+ check=True,
521
+ timeout=5,
522
+ )
523
+ state = result.stdout.strip().lower()
524
+ if state == "active":
525
+ return
526
+ except subprocess.CalledProcessError:
527
+ state = "unknown"
528
+
529
+ if not self._auto_init_swarm:
530
+ raise RuntimeError(
531
+ f"Docker Swarm is not active (state={state}). Enable Swarm manually or pass auto_init_swarm=True."
532
+ )
533
+
534
+ try:
535
+ subprocess.run(
536
+ ["docker", "swarm", "init"],
537
+ check=True,
538
+ capture_output=True,
539
+ timeout=10,
540
+ )
541
+ except subprocess.CalledProcessError as e:
542
+ raise RuntimeError("Failed to initialize Docker Swarm") from e
543
+
544
+ def _ensure_overlay_network(self, network: str) -> None:
545
+ import subprocess
546
+
547
+ inspect = subprocess.run(
548
+ ["docker", "network", "inspect", network],
549
+ capture_output=True,
550
+ text=True,
551
+ check=False,
552
+ )
553
+ if inspect.returncode == 0:
554
+ return
555
+
556
+ try:
557
+ subprocess.run(
558
+ [
559
+ "docker",
560
+ "network",
561
+ "create",
562
+ "--driver",
563
+ "overlay",
564
+ "--attachable",
565
+ network,
566
+ ],
567
+ check=True,
568
+ capture_output=True,
569
+ timeout=10,
570
+ )
571
+ except subprocess.CalledProcessError as e:
572
+ raise RuntimeError(f"Failed to create overlay network '{network}'") from e
573
+
574
+ def _find_available_port(self) -> int:
575
+ import socket
576
+
577
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
578
+ s.bind(("", 0))
579
+ s.listen(1)
580
+ port = s.getsockname()[1]
581
+ return port
582
+
583
+ def _generate_service_name(self, image: str) -> str:
584
+ import time
585
+
586
+ clean_image = image.split("/")[-1].split(":")[0]
587
+ timestamp = int(time.time() * 1000)
588
+ return f"{clean_image}-swarm-{timestamp}"
589
+
590
+
591
+ class KubernetesProvider(ContainerProvider):
592
+ """
593
+ Container provider for Kubernetes clusters.
594
+
595
+ This provider creates pods in a Kubernetes cluster and exposes them
596
+ via services or port-forwarding.
597
+
598
+ Example:
599
+ >>> provider = KubernetesProvider(namespace="envtorch-dev")
600
+ >>> base_url = provider.start_container("echo-env:latest")
601
+ >>> # Pod running in k8s, accessible via service or port-forward
602
+ >>> provider.stop_container()
603
+ """
604
+
605
+ pass
606
+
607
+
608
+ class RuntimeProvider(ABC):
609
+ """
610
+ Abstract base class for runtime providers that are not container providers.
611
+ Providers implement this interface to support different runtime platforms:
612
+ - UVProvider: Runs environments via `uv run`
613
+
614
+ The provider manages a single runtime lifecycle and provides the base URL
615
+ for connecting to it.
616
+
617
+ Example:
618
+ >>> provider = UVProvider(project_path="/path/to/env")
619
+ >>> base_url = provider.start()
620
+ >>> print(base_url) # http://localhost:8000
621
+ >>> provider.stop()
622
+ """
623
+
624
+ @abstractmethod
625
+ def start(
626
+ self,
627
+ port: Optional[int] = None,
628
+ env_vars: Optional[Dict[str, str]] = None,
629
+ **kwargs: Any,
630
+ ) -> str:
631
+ """
632
+ Start a runtime from the specified image.
633
+
634
+ Args:
635
+ image: Runtime image name
636
+ port: Port to expose (if None, provider chooses)
637
+ env_vars: Environment variables for the runtime
638
+ **kwargs: Additional runtime options
639
+ """
640
+
641
+ @abstractmethod
642
+ def stop(self) -> None:
643
+ """
644
+ Stop the runtime.
645
+ """
646
+ pass
647
+
648
+ @abstractmethod
649
+ def wait_for_ready(self, timeout_s: float = 30.0) -> None:
650
+ """
651
+ Wait for the runtime to be ready to accept requests.
652
+ """
653
+ pass
654
+
655
+ def __enter__(self) -> "RuntimeProvider":
656
+ """
657
+ Enter the runtime provider.
658
+ """
659
+ self.start()
660
+ return self
661
+
662
+ def __exit__(self, exc_type, exc, tb) -> None:
663
+ """
664
+ Exit the runtime provider.
665
+ """
666
+ self.stop()
667
+ return False
src/core/openenv/core/containers/runtime/uv_provider.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Providers for launching ASGI applications via ``uv run``."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import socket
7
+ import subprocess
8
+ import time
9
+ from typing import Dict, Optional
10
+
11
+ import requests
12
+
13
+ from .providers import RuntimeProvider
14
+
15
+
16
+ def _check_uv_installed() -> None:
17
+ try:
18
+ subprocess.check_output(["uv", "--version"])
19
+ except FileNotFoundError as exc:
20
+ raise RuntimeError(
21
+ "`uv` executable not found. Install uv from https://docs.astral.sh and ensure it is on PATH."
22
+ ) from exc
23
+
24
+
25
+ def _find_free_port() -> int:
26
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
27
+ sock.bind(("", 0))
28
+ sock.listen(1)
29
+ return sock.getsockname()[1]
30
+
31
+
32
+ def _create_uv_command(
33
+ *,
34
+ host: str,
35
+ port: int,
36
+ reload: bool,
37
+ workers: int,
38
+ app: str,
39
+ project_path: str,
40
+ ) -> list[str]:
41
+ command: list[str] = ["uv", "run", "--isolated", "--project", project_path]
42
+
43
+ command.append("--")
44
+ command.extend(
45
+ [
46
+ "uvicorn",
47
+ app,
48
+ "--host",
49
+ host,
50
+ "--port",
51
+ str(port),
52
+ "--workers",
53
+ str(workers),
54
+ ]
55
+ )
56
+
57
+ if reload:
58
+ command.append("--reload")
59
+
60
+ return command
61
+
62
+
63
+ def _poll_health(health_url: str, timeout_s: float) -> None:
64
+ """Poll a health endpoint until it returns HTTP 200 or times out."""
65
+
66
+ deadline = time.time() + timeout_s
67
+ while time.time() < deadline:
68
+ try:
69
+ timeout = max(0.0001, min(deadline - time.time(), 2.0))
70
+ response = requests.get(health_url, timeout=timeout)
71
+ if response.status_code == 200:
72
+ return
73
+ except requests.RequestException:
74
+ continue
75
+
76
+ time.sleep(0.5)
77
+
78
+ raise TimeoutError(f"Server did not become ready within {timeout_s:.1f} seconds")
79
+
80
+
81
+ class UVProvider(RuntimeProvider):
82
+ """
83
+ RuntimeProvider implementation backed by ``uv run``.
84
+
85
+ Args:
86
+ project_path: Local path to a uv project (passed to ``uv run --project``)
87
+ app: ASGI application path for uvicorn (defaults to ``server.app:app``)
88
+ host: Host interface to bind to (defaults to ``0.0.0.0``)
89
+ reload: Whether to enable uvicorn's reload mode
90
+ env_vars: Environment variables to pass through to the spawned process
91
+ context_timeout_s: How long to wait for the environment to become ready
92
+
93
+ Example:
94
+ >>> provider = UVProvider(project_path="/path/to/env")
95
+ >>> base_url = provider.start()
96
+ >>> print(base_url) # http://localhost:8000
97
+ >>> # Use the environment via base_url
98
+ >>> provider.stop()
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ *,
104
+ project_path: str,
105
+ app: str = "server.app:app",
106
+ host: str = "0.0.0.0",
107
+ reload: bool = False,
108
+ env_vars: Optional[Dict[str, str]] = None,
109
+ context_timeout_s: float = 60.0,
110
+ ):
111
+ """Initialize the UVProvider."""
112
+ self.project_path = os.path.abspath(project_path)
113
+ self.app = app
114
+ self.host = host
115
+ self.reload = reload
116
+ self.env_vars = env_vars
117
+ self.context_timeout_s = context_timeout_s
118
+ _check_uv_installed()
119
+ self._process = None
120
+ self._base_url = None
121
+
122
+ def start(
123
+ self,
124
+ port: Optional[int] = None,
125
+ env_vars: Optional[Dict[str, str]] = None,
126
+ workers: int = 1,
127
+ **_: Dict[str, str],
128
+ ) -> str:
129
+ """
130
+ Start the environment via `uv run`.
131
+
132
+ Args:
133
+ port: The port to bind the environment to
134
+ env_vars: Environment variables to pass to the environment
135
+ workers: The number of workers to use
136
+
137
+ Returns:
138
+ The base URL of the environment
139
+
140
+ Raises:
141
+ RuntimeError: If the environment is already running
142
+ """
143
+ if self._process is not None and self._process.poll() is None:
144
+ raise RuntimeError("UVProvider is already running")
145
+
146
+ bind_port = port or _find_free_port()
147
+
148
+ command = _create_uv_command(
149
+ host=self.host,
150
+ port=bind_port,
151
+ reload=self.reload,
152
+ workers=workers,
153
+ app=self.app,
154
+ project_path=self.project_path,
155
+ )
156
+
157
+ env = os.environ.copy()
158
+
159
+ if self.env_vars:
160
+ env.update(self.env_vars)
161
+ if env_vars:
162
+ env.update(env_vars)
163
+
164
+ try:
165
+ self._process = subprocess.Popen(command, env=env)
166
+ except OSError as exc:
167
+ raise RuntimeError(f"Failed to launch `uv run`: {exc}") from exc
168
+
169
+ client_host = "127.0.0.1" if self.host in {"0.0.0.0", "::"} else self.host
170
+ self._base_url = f"http://{client_host}:{bind_port}"
171
+ return self._base_url
172
+
173
+ def wait_for_ready(self, timeout_s: float = 60.0) -> None:
174
+ """
175
+ Wait for the environment to become ready.
176
+
177
+ Args:
178
+ timeout_s: The timeout to wait for the environment to become ready
179
+
180
+ Raises:
181
+ RuntimeError: If the environment is not running
182
+ TimeoutError: If the environment does not become ready within the timeout
183
+ """
184
+ if self._process and self._process.poll() is not None:
185
+ code = self._process.returncode
186
+ raise RuntimeError(f"uv process exited prematurely with code {code}")
187
+
188
+ _poll_health(f"{self._base_url}/health", timeout_s=timeout_s)
189
+
190
+ def stop(self) -> None:
191
+ """
192
+ Stop the environment.
193
+
194
+ Raises:
195
+ RuntimeError: If the environment is not running
196
+ """
197
+ if self._process is None:
198
+ return
199
+
200
+ if self._process.poll() is None:
201
+ self._process.terminate()
202
+ try:
203
+ self._process.wait(timeout=10.0)
204
+ except subprocess.TimeoutExpired:
205
+ self._process.kill()
206
+ self._process.wait(timeout=5.0)
207
+
208
+ self._process = None
209
+ self._base_url = None
210
+
211
+ @property
212
+ def base_url(self) -> str:
213
+ """
214
+ The base URL of the environment.
215
+
216
+ Returns:
217
+ The base URL of the environment
218
+
219
+ Raises:
220
+ RuntimeError: If the environment is not running
221
+ """
222
+ if self._base_url is None:
223
+ raise RuntimeError("UVProvider has not been started")
224
+ return self._base_url
src/core/openenv/core/containers/test_local_docker_provider.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ End-to-end test for LocalDockerProvider.
4
+
5
+ This script tests the complete flow:
6
+ 1. Start a container using LocalDockerProvider
7
+ 2. Wait for it to be ready
8
+ 3. Make HTTP requests to test the environment
9
+ 4. Clean up the container
10
+ """
11
+
12
+ import sys
13
+ from pathlib import Path
14
+
15
+ # Add src to path
16
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent))
17
+
18
+ import requests
19
+
20
+ from openenv.core.containers.runtime import LocalDockerProvider
21
+
22
+
23
+ # TODO: Remove this test or make it a functional test sicne this will be tested in e2e test for echo env
24
+ def test_local_docker_provider():
25
+ """Test LocalDockerProvider end-to-end."""
26
+ print("=" * 60)
27
+ print("LocalDockerProvider End-to-End Test")
28
+ print("=" * 60)
29
+ print()
30
+
31
+ provider = None
32
+
33
+ try:
34
+ # Step 1: Create provider
35
+ print("Step 1: Creating LocalDockerProvider...")
36
+ provider = LocalDockerProvider()
37
+ print("✓ Provider created\n")
38
+
39
+ # Step 2: Start container
40
+ print("Step 2: Starting echo-env container...")
41
+ base_url = provider.start_container("echo-env:latest")
42
+ print(f"✓ Container started at: {base_url}")
43
+ if provider._container_id:
44
+ print(f" Container ID: {provider._container_id[:12]}...")
45
+ if provider._container_name:
46
+ print(f" Container name: {provider._container_name}\n")
47
+
48
+ # Step 3: Wait for ready
49
+ print("Step 3: Waiting for container to be ready...")
50
+ provider.wait_for_ready(base_url, timeout_s=30.0)
51
+ print("✓ Container is ready!\n")
52
+
53
+ # Step 4: Test health endpoint
54
+ print("Step 4: Testing /health endpoint...")
55
+ response = requests.get(f"{base_url}/health")
56
+ print(f" Status: {response.status_code}")
57
+ print(f" Response: {response.json()}")
58
+ assert response.status_code == 200
59
+ assert response.json()["status"] == "healthy"
60
+ print("✓ Health check passed\n")
61
+
62
+ # Step 5: Test reset endpoint
63
+ print("Step 5: Testing /reset endpoint...")
64
+ response = requests.post(
65
+ f"{base_url}/reset",
66
+ json={},
67
+ headers={"Content-Type": "application/json"},
68
+ )
69
+ print(f" Status: {response.status_code}")
70
+ data = response.json()
71
+ print(f" Message: {data['observation']['echoed_message']}")
72
+ print(f" Reward: {data['reward']}")
73
+ print(f" Done: {data['done']}")
74
+ assert response.status_code == 200
75
+ assert data["observation"]["echoed_message"] == "Echo environment ready!"
76
+ print("✓ Reset test passed\n")
77
+
78
+ # Step 6: Test step endpoint
79
+ print("Step 6: Testing /step endpoint...")
80
+ response = requests.post(
81
+ f"{base_url}/step",
82
+ json={"action": {"message": "Hello from LocalDockerProvider!"}},
83
+ headers={"Content-Type": "application/json"},
84
+ )
85
+ print(f" Status: {response.status_code}")
86
+ data = response.json()
87
+ print(f" Echoed: {data['observation']['echoed_message']}")
88
+ print(f" Length: {data['observation']['message_length']}")
89
+ print(f" Reward: {data['reward']}")
90
+ assert response.status_code == 200
91
+ assert (
92
+ data["observation"]["echoed_message"] == "Hello from LocalDockerProvider!"
93
+ )
94
+ assert data["observation"]["message_length"] == 31
95
+ print("✓ Step test passed\n")
96
+
97
+ # Step 7: Test state endpoint
98
+ print("Step 7: Testing /state endpoint...")
99
+ response = requests.get(f"{base_url}/state")
100
+ print(f" Status: {response.status_code}")
101
+ data = response.json()
102
+ print(f" Episode ID: {data['episode_id']}")
103
+ print(f" Step count: {data['step_count']}")
104
+ assert response.status_code == 200
105
+ assert data["step_count"] == 1 # One step from above
106
+ print("✓ State test passed\n")
107
+
108
+ # Step 8: Multiple steps
109
+ print("Step 8: Testing multiple steps...")
110
+ for i in range(3):
111
+ response = requests.post(
112
+ f"{base_url}/step",
113
+ json={"action": {"message": f"Message {i + 1}"}},
114
+ headers={"Content-Type": "application/json"},
115
+ )
116
+ assert response.status_code == 200
117
+ print(f" Step {i + 1}: ✓")
118
+
119
+ # Check state updated
120
+ response = requests.get(f"{base_url}/state")
121
+ data = response.json()
122
+ assert data["step_count"] == 4 # 1 + 3 more steps
123
+ print(f" Final step count: {data['step_count']}")
124
+ print("✓ Multiple steps test passed\n")
125
+
126
+ print("=" * 60)
127
+ print("✓ All tests passed!")
128
+ print("=" * 60)
129
+ print()
130
+
131
+ return True
132
+
133
+ except Exception as e:
134
+ print(f"\n❌ Test failed: {e}")
135
+ import traceback
136
+
137
+ traceback.print_exc()
138
+ return False
139
+
140
+ finally:
141
+ # Step 9: Cleanup
142
+ if provider is not None:
143
+ print("\nStep 9: Cleaning up container...")
144
+ try:
145
+ provider.stop_container()
146
+ print("✓ Container stopped and removed\n")
147
+ except Exception as e:
148
+ print(f"⚠️ Cleanup warning: {e}\n")
149
+
150
+
151
+ def test_provider_with_custom_port():
152
+ """Test provider with custom port."""
153
+ print("=" * 60)
154
+ print("LocalDockerProvider with Custom Port Test")
155
+ print("=" * 60)
156
+ print()
157
+
158
+ provider = None
159
+
160
+ try:
161
+ provider = LocalDockerProvider()
162
+
163
+ print("Starting container on custom port 8123...")
164
+ base_url = provider.start_container("echo-env:latest", port=8123)
165
+ print(f"✓ Started at: {base_url}")
166
+ assert ":8123" in base_url
167
+
168
+ print("Waiting for ready...")
169
+ provider.wait_for_ready(base_url)
170
+ print("✓ Ready!")
171
+
172
+ print("Testing health...")
173
+ response = requests.get(f"{base_url}/health")
174
+ assert response.status_code == 200
175
+ print("✓ Health check passed")
176
+
177
+ print("\n✓ Custom port test passed!\n")
178
+ return True
179
+
180
+ except Exception as e:
181
+ print(f"\n❌ Test failed: {e}")
182
+ return False
183
+
184
+ finally:
185
+ if provider is not None:
186
+ provider.stop_container()
187
+ print("✓ Cleaned up\n")
188
+
189
+
190
+ def test_provider_with_env_vars():
191
+ """Test provider with environment variables."""
192
+ print("=" * 60)
193
+ print("LocalDockerProvider with Environment Variables Test")
194
+ print("=" * 60)
195
+ print()
196
+
197
+ provider = None
198
+
199
+ try:
200
+ provider = LocalDockerProvider()
201
+
202
+ print("Starting container with environment variables...")
203
+ base_url = provider.start_container(
204
+ "echo-env:latest", env_vars={"DEBUG": "true", "LOG_LEVEL": "info"}
205
+ )
206
+ print(f"✓ Started at: {base_url}")
207
+
208
+ print("Waiting for ready...")
209
+ provider.wait_for_ready(base_url)
210
+ print("✓ Ready!")
211
+
212
+ print("Testing health...")
213
+ response = requests.get(f"{base_url}/health")
214
+ assert response.status_code == 200
215
+ print("✓ Health check passed")
216
+
217
+ print("\n✓ Environment variables test passed!\n")
218
+ return True
219
+
220
+ except Exception as e:
221
+ print(f"\n❌ Test failed: {e}")
222
+ return False
223
+
224
+ finally:
225
+ if provider is not None:
226
+ provider.stop_container()
227
+ print("✓ Cleaned up\n")
228
+
229
+
230
+ if __name__ == "__main__":
231
+ print()
232
+ print("🐳 LocalDockerProvider Test Suite")
233
+ print()
234
+
235
+ results = []
236
+
237
+ # Run basic test
238
+ results.append(("Basic End-to-End", test_local_docker_provider()))
239
+
240
+ # Run custom port test
241
+ results.append(("Custom Port", test_provider_with_custom_port()))
242
+
243
+ # Run environment variables test
244
+ results.append(("Environment Variables", test_provider_with_env_vars()))
245
+
246
+ # Summary
247
+ print("=" * 60)
248
+ print("Test Summary")
249
+ print("=" * 60)
250
+ for name, passed in results:
251
+ status = "✓ PASSED" if passed else "✗ FAILED"
252
+ print(f"{name:25} {status}")
253
+ print("=" * 60)
254
+
255
+ all_passed = all(result for _, result in results)
256
+ if all_passed:
257
+ print("\n🎉 All tests passed!")
258
+ exit(0)
259
+ else:
260
+ print("\n❌ Some tests failed")
261
+ exit(1)
src/core/openenv/core/env_client.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Environment client for persistent sessions.
9
+
10
+ This module provides a WebSocket-based client that maintains a persistent connection
11
+ to an environment server, enabling efficient multi-step interactions without
12
+ the overhead of HTTP request/response cycles.
13
+
14
+ The client is async by default. For synchronous usage, use the `.sync()` method
15
+ to get a `SyncEnvClient` wrapper.
16
+
17
+ Example (async):
18
+ >>> async with GenericEnvClient(base_url="ws://localhost:8000") as env:
19
+ ... result = await env.reset()
20
+ ... result = await env.step({"code": "print('hello')"})
21
+
22
+ Example (sync wrapper):
23
+ >>> env = GenericEnvClient(base_url="ws://localhost:8000").sync()
24
+ >>> with env:
25
+ ... result = env.reset()
26
+ ... result = env.step({"code": "print('hello')"})
27
+ """
28
+
29
+ from __future__ import annotations
30
+
31
+ import asyncio
32
+ import json
33
+ import os
34
+ from abc import ABC, abstractmethod
35
+ from typing import Any, Dict, Generic, Optional, Type, TYPE_CHECKING, TypeVar
36
+
37
+ from .client_types import StepResult, StateT
38
+ from .containers.runtime import LocalDockerProvider, UVProvider
39
+ from .utils import convert_to_ws_url
40
+
41
+ if TYPE_CHECKING:
42
+ from .containers.runtime import ContainerProvider, RuntimeProvider
43
+ from .sync_client import SyncEnvClient
44
+ from websockets.asyncio.client import ClientConnection
45
+
46
+ from websockets.asyncio.client import connect as ws_connect
47
+
48
+ ActT = TypeVar("ActT")
49
+ ObsT = TypeVar("ObsT")
50
+ EnvClientT = TypeVar("EnvClientT", bound="EnvClient")
51
+
52
+
53
+ class EnvClient(ABC, Generic[ActT, ObsT, StateT]):
54
+ """
55
+ Async environment client for persistent sessions.
56
+
57
+ This client maintains a persistent WebSocket connection to an environment
58
+ server, enabling efficient multi-step interactions. Each client instance
59
+ corresponds to a dedicated environment session on the server.
60
+
61
+ The client is async by default. For synchronous usage, use the `.sync()`
62
+ method to get a `SyncEnvClient` wrapper.
63
+
64
+ Features:
65
+ - Lower latency for sequential interactions
66
+ - Session state is maintained server-side
67
+ - Better suited for long-running episodes
68
+ - Async by default for modern Python async/await patterns
69
+
70
+ Example (async):
71
+ >>> from envs.coding_env.client import CodingEnv
72
+ >>>
73
+ >>> # Connect to a server using async context manager
74
+ >>> async with CodingEnv(base_url="ws://localhost:8000") as env:
75
+ ... result = await env.reset(seed=42)
76
+ ... while not result.done:
77
+ ... action = agent.predict(result.observation)
78
+ ... result = await env.step(action)
79
+
80
+ Example (sync wrapper):
81
+ >>> env = CodingEnv(base_url="ws://localhost:8000").sync()
82
+ >>> with env:
83
+ ... result = env.reset(seed=42)
84
+ ... result = env.step(action)
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ base_url: str,
90
+ connect_timeout_s: float = 10.0,
91
+ message_timeout_s: float = 60.0,
92
+ max_message_size_mb: float = 100.0,
93
+ provider: Optional["ContainerProvider | RuntimeProvider"] = None,
94
+ mode: Optional[str] = None,
95
+ ):
96
+ """
97
+ Initialize environment client.
98
+
99
+ Args:
100
+ base_url: Base URL of the environment server (http:// or ws://).
101
+ Will be converted to ws:// if http:// is provided.
102
+ connect_timeout_s: Timeout for establishing WebSocket connection
103
+ message_timeout_s: Timeout for receiving responses to messages
104
+ max_message_size_mb: Maximum WebSocket message size in megabytes.
105
+ Default 100MB to handle large observations (screenshots, DOM, etc.)
106
+ provider: Optional container/runtime provider for lifecycle management.
107
+ Can be a ContainerProvider (Docker) or RuntimeProvider (UV).
108
+ mode: Communication mode: 'simulation' for Gym-style API (default) or
109
+ 'production' for MCP JSON-RPC protocol. Can also be set via the
110
+ OPENENV_CLIENT_MODE environment variable. Constructor parameter
111
+ takes precedence over environment variable. Case-insensitive.
112
+ """
113
+ # Determine mode (constructor > env var > default)
114
+ if mode is None:
115
+ mode = os.environ.get("OPENENV_CLIENT_MODE", "simulation")
116
+
117
+ # Normalize and validate mode
118
+ mode = mode.lower()
119
+ if mode not in ("simulation", "production"):
120
+ raise ValueError(
121
+ f"Invalid mode: '{mode}'. Must be 'simulation' or 'production'. "
122
+ f"Set via constructor parameter or OPENENV_CLIENT_MODE environment variable."
123
+ )
124
+
125
+ # Store mode (use object.__setattr__ to bypass immutability)
126
+ object.__setattr__(self, "_mode", mode)
127
+
128
+ # Convert HTTP URL to WebSocket URL
129
+ ws_url = convert_to_ws_url(base_url)
130
+
131
+ self._ws_url = f"{ws_url}/ws"
132
+ self._connect_timeout = connect_timeout_s
133
+ self._message_timeout = message_timeout_s
134
+ self._max_message_size = int(
135
+ max_message_size_mb * 1024 * 1024
136
+ ) # Convert MB to bytes
137
+ self._provider = provider
138
+ self._ws: Optional[ClientConnection] = None
139
+
140
+ def __setattr__(self, name: str, value: Any) -> None:
141
+ """Prevent modification of _mode after initialization."""
142
+ if name == "_mode" and hasattr(self, "_mode"):
143
+ raise AttributeError("Cannot modify mode after initialization")
144
+ super().__setattr__(name, value)
145
+
146
+ async def connect(self) -> "EnvClient":
147
+ """
148
+ Establish WebSocket connection to the server.
149
+
150
+ Returns:
151
+ self for method chaining
152
+
153
+ Raises:
154
+ ConnectionError: If connection cannot be established
155
+ """
156
+ if self._ws is not None:
157
+ return self
158
+
159
+ # Bypass proxy for localhost connections
160
+ ws_url_lower = self._ws_url.lower()
161
+ is_localhost = "localhost" in ws_url_lower or "127.0.0.1" in ws_url_lower
162
+
163
+ old_no_proxy = os.environ.get("NO_PROXY")
164
+ if is_localhost:
165
+ # Set NO_PROXY to bypass proxy for localhost
166
+ current_no_proxy = old_no_proxy or ""
167
+ if "localhost" not in current_no_proxy.lower():
168
+ os.environ["NO_PROXY"] = (
169
+ f"{current_no_proxy},localhost,127.0.0.1"
170
+ if current_no_proxy
171
+ else "localhost,127.0.0.1"
172
+ )
173
+
174
+ try:
175
+ self._ws = await ws_connect(
176
+ self._ws_url,
177
+ open_timeout=self._connect_timeout,
178
+ max_size=self._max_message_size,
179
+ )
180
+ except Exception as e:
181
+ raise ConnectionError(f"Failed to connect to {self._ws_url}: {e}") from e
182
+ finally:
183
+ # Restore original NO_PROXY value
184
+ if is_localhost:
185
+ if old_no_proxy is None:
186
+ os.environ.pop("NO_PROXY", None)
187
+ else:
188
+ os.environ["NO_PROXY"] = old_no_proxy
189
+
190
+ return self
191
+
192
+ async def disconnect(self) -> None:
193
+ """Close the WebSocket connection."""
194
+ if self._ws is not None:
195
+ try:
196
+ # Send close message
197
+ await self._send({"type": "close"})
198
+ except Exception:
199
+ pass # Best effort
200
+ try:
201
+ await self._ws.close()
202
+ except Exception:
203
+ pass
204
+ self._ws = None
205
+
206
+ async def _ensure_connected(self) -> None:
207
+ """Ensure WebSocket connection is established."""
208
+ if self._ws is None:
209
+ await self.connect()
210
+
211
+ async def _send(self, message: Dict[str, Any]) -> None:
212
+ """Send a message over the WebSocket."""
213
+ await self._ensure_connected()
214
+ assert self._ws is not None
215
+ await self._ws.send(json.dumps(message))
216
+
217
+ async def _receive(self) -> Dict[str, Any]:
218
+ """Receive and parse a message from the WebSocket."""
219
+ assert self._ws is not None
220
+ raw = await asyncio.wait_for(self._ws.recv(), timeout=self._message_timeout)
221
+ return json.loads(raw)
222
+
223
+ async def _send_and_receive(self, message: Dict[str, Any]) -> Dict[str, Any]:
224
+ """Send a message and wait for response."""
225
+ await self._send(message)
226
+ response = await self._receive()
227
+
228
+ # Check for error response
229
+ if response.get("type") == "error":
230
+ error_data = response.get("data", {})
231
+ raise RuntimeError(
232
+ f"Server error: {error_data.get('message', 'Unknown error')} "
233
+ f"(code: {error_data.get('code', 'UNKNOWN')})"
234
+ )
235
+
236
+ return response
237
+
238
+ @classmethod
239
+ async def from_docker_image(
240
+ cls: Type[EnvClientT],
241
+ image: str,
242
+ provider: Optional["ContainerProvider"] = None,
243
+ **kwargs: Any,
244
+ ) -> EnvClientT:
245
+ """
246
+ Create an environment client by spinning up a Docker container.
247
+
248
+ Args:
249
+ image: Docker image name to run (e.g., "coding-env:latest")
250
+ provider: Container provider to use (defaults to LocalDockerProvider)
251
+ **kwargs: Additional arguments to pass to provider.start_container()
252
+
253
+ Returns:
254
+ Connected client instance
255
+ """
256
+ if provider is None:
257
+ provider = LocalDockerProvider()
258
+
259
+ # Start container
260
+ base_url = provider.start_container(image, **kwargs)
261
+
262
+ # Wait for server to be ready
263
+ provider.wait_for_ready(base_url)
264
+
265
+ # Create and connect client
266
+ client = cls(base_url=base_url, provider=provider)
267
+ await client.connect()
268
+
269
+ return client
270
+
271
+ @classmethod
272
+ async def from_env(
273
+ cls: Type[EnvClientT],
274
+ repo_id: str,
275
+ *,
276
+ use_docker: bool = True,
277
+ provider: Optional["ContainerProvider | RuntimeProvider"] = None,
278
+ **provider_kwargs: Any,
279
+ ) -> EnvClientT:
280
+ """
281
+ Create a client from a Hugging Face Space.
282
+
283
+ Args:
284
+ repo_id: Hugging Face space identifier ``{org}/{space}``.
285
+ use_docker: When ``True`` (default) pull from the HF registry and
286
+ launch via :class:`LocalDockerProvider`. When ``False`` run the
287
+ space locally with :class:`UVProvider`.
288
+ provider: Optional provider instance to reuse. Must be a
289
+ :class:`ContainerProvider` when ``use_docker=True`` and a
290
+ :class:`RuntimeProvider` otherwise.
291
+ provider_kwargs: Additional keyword arguments forwarded to
292
+ either the container provider's ``start_container`` (docker)
293
+ or to the ``UVProvider`` constructor/start (uv). When
294
+ ``use_docker=False``, the ``project_path`` argument can be
295
+ used to override the default git URL
296
+ (``git+https://huggingface.co/spaces/{repo_id}``).
297
+
298
+ Returns:
299
+ Connected client instance
300
+
301
+ Examples:
302
+ >>> # Pull and run from HF Docker registry
303
+ >>> env = await MyEnv.from_env("openenv/echo-env")
304
+ >>>
305
+ >>> # Run locally with UV (clones the space)
306
+ >>> env = await MyEnv.from_env("openenv/echo-env", use_docker=False)
307
+ >>>
308
+ >>> # Run from a local checkout
309
+ >>> env = await MyEnv.from_env(
310
+ ... "openenv/echo-env",
311
+ ... use_docker=False,
312
+ ... project_path="/path/to/local/checkout"
313
+ ... )
314
+ """
315
+ # Extract start args that apply to both providers
316
+ start_args = {}
317
+ for key in ("port", "env_vars", "workers"):
318
+ if key in provider_kwargs:
319
+ start_args[key] = provider_kwargs.pop(key)
320
+
321
+ if use_docker:
322
+ # Docker mode: pull from HF registry
323
+ docker_provider = provider or LocalDockerProvider()
324
+ tag = provider_kwargs.pop("tag", "latest")
325
+ image = f"registry.hf.space/{repo_id.replace('/', '-')}:{tag}"
326
+ base_url = docker_provider.start_container(
327
+ image, **start_args, **provider_kwargs
328
+ )
329
+ docker_provider.wait_for_ready(base_url)
330
+
331
+ client = cls(base_url=base_url, provider=docker_provider)
332
+ await client.connect()
333
+ return client
334
+ else:
335
+ # UV mode: clone and run with uv
336
+ if provider is None:
337
+ uv_kwargs = dict(provider_kwargs)
338
+ project_path = uv_kwargs.pop("project_path", None)
339
+ if project_path is None:
340
+ project_path = f"git+https://huggingface.co/spaces/{repo_id}"
341
+
342
+ provider = UVProvider(project_path=project_path, **uv_kwargs)
343
+ else:
344
+ if provider_kwargs:
345
+ raise ValueError(
346
+ "provider_kwargs cannot be used when supplying a provider instance"
347
+ )
348
+
349
+ base_url = provider.start(**start_args)
350
+ provider.wait_for_ready()
351
+
352
+ client = cls(base_url=base_url, provider=provider)
353
+ await client.connect()
354
+ return client
355
+
356
+ @abstractmethod
357
+ def _step_payload(self, action: ActT) -> Dict[str, Any]:
358
+ """Convert an Action object to the JSON data expected by the env server."""
359
+ raise NotImplementedError
360
+
361
+ @abstractmethod
362
+ def _parse_result(self, payload: Dict[str, Any]) -> StepResult[ObsT]:
363
+ """Convert a JSON response from the env server to StepResult[ObsT]."""
364
+ raise NotImplementedError
365
+
366
+ @abstractmethod
367
+ def _parse_state(self, payload: Dict[str, Any]) -> StateT:
368
+ """Convert a JSON response from the state endpoint to a State object."""
369
+ raise NotImplementedError
370
+
371
+ async def reset(self, **kwargs: Any) -> StepResult[ObsT]:
372
+ """
373
+ Reset the environment with optional parameters.
374
+
375
+ Args:
376
+ **kwargs: Optional parameters passed to the environment's reset method.
377
+ Common parameters include:
378
+ - seed: Random seed for reproducibility
379
+ - episode_id: Custom episode identifier
380
+
381
+ Returns:
382
+ StepResult containing initial observation
383
+ """
384
+ message = {
385
+ "type": "reset",
386
+ "data": kwargs,
387
+ }
388
+ response = await self._send_and_receive(message)
389
+ return self._parse_result(response.get("data", {}))
390
+
391
+ async def step(self, action: ActT, **kwargs: Any) -> StepResult[ObsT]:
392
+ """
393
+ Execute an action in the environment.
394
+
395
+ Args:
396
+ action: The action to execute
397
+ **kwargs: Optional parameters (currently ignored)
398
+
399
+ Returns:
400
+ StepResult containing observation, reward, and done status
401
+ """
402
+ message = {
403
+ "type": "step",
404
+ "data": self._step_payload(action),
405
+ }
406
+ response = await self._send_and_receive(message)
407
+ return self._parse_result(response.get("data", {}))
408
+
409
+ async def state(self) -> StateT:
410
+ """
411
+ Get the current environment state from the server.
412
+
413
+ Returns:
414
+ State object with environment state information
415
+ """
416
+ message = {"type": "state"}
417
+ response = await self._send_and_receive(message)
418
+ return self._parse_state(response.get("data", {}))
419
+
420
+ async def close(self) -> None:
421
+ """
422
+ Close the WebSocket connection and clean up resources.
423
+
424
+ If this client was created via from_docker_image() or from_env(),
425
+ this will also stop and remove the associated container/process.
426
+ """
427
+ await self.disconnect()
428
+
429
+ if self._provider is not None:
430
+ # Handle both ContainerProvider and RuntimeProvider
431
+ if hasattr(self._provider, "stop_container"):
432
+ self._provider.stop_container()
433
+ elif hasattr(self._provider, "stop"):
434
+ self._provider.stop()
435
+
436
+ async def __aenter__(self) -> "EnvClient":
437
+ """Enter async context manager, ensuring connection is established."""
438
+ await self.connect()
439
+ return self
440
+
441
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
442
+ """Exit async context manager, closing connection."""
443
+ await self.close()
444
+
445
+ def __enter__(self) -> "EnvClient":
446
+ """Sync context manager entry - raises error suggesting async usage."""
447
+ raise TypeError(
448
+ "EnvClient is async by default. Use 'async with' instead of 'with', "
449
+ "or call .sync() to get a synchronous wrapper:\n"
450
+ " async with client: # async usage\n"
451
+ " with client.sync(): # sync wrapper"
452
+ )
453
+
454
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
455
+ """Sync context manager exit - should not be reached."""
456
+ pass # pragma: no cover
457
+
458
+ def sync(self) -> "SyncEnvClient":
459
+ """
460
+ Return a synchronous wrapper around this async client.
461
+
462
+ Use this method when you need synchronous access to the environment
463
+ without async/await syntax. This is useful for:
464
+ - Integration with synchronous codebases
465
+ - Interactive/REPL usage
466
+ - Stopping async from "infecting" the call stack
467
+
468
+ Returns:
469
+ SyncEnvClient wrapper that provides synchronous methods
470
+
471
+ Example:
472
+ >>> # Create async client and get sync wrapper
473
+ >>> async_client = GenericEnvClient(base_url="http://localhost:8000")
474
+ >>> sync_client = async_client.sync()
475
+ >>>
476
+ >>> # Use synchronous API
477
+ >>> with sync_client:
478
+ ... result = sync_client.reset()
479
+ ... result = sync_client.step({"code": "print('hello')"})
480
+ """
481
+ from .sync_client import SyncEnvClient
482
+
483
+ return SyncEnvClient(self)
src/core/openenv/core/env_server/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Core environment interfaces and types."""
8
+
9
+ from .base_transforms import CompositeTransform, NullTransform
10
+ from .http_server import HTTPEnvServer, create_app, create_fastapi_app
11
+ from .interfaces import Environment, Message, ModelTokenizer, Transform
12
+ from .route_config import GetEndpointConfig
13
+ from .serialization import (
14
+ deserialize_action,
15
+ deserialize_action_with_preprocessing,
16
+ serialize_observation,
17
+ )
18
+ from .types import (
19
+ Action,
20
+ Observation,
21
+ State,
22
+ SchemaResponse,
23
+ HealthResponse,
24
+ HealthStatus,
25
+ ServerMode,
26
+ WSErrorCode,
27
+ BaseMessage,
28
+ WSIncomingMessage,
29
+ WSResetMessage,
30
+ WSStepMessage,
31
+ WSStateMessage,
32
+ WSCloseMessage,
33
+ WSObservationResponse,
34
+ WSStateResponse,
35
+ WSErrorResponse,
36
+ ConcurrencyConfig,
37
+ ServerCapacityStatus,
38
+ SessionInfo,
39
+ )
40
+ from .exceptions import (
41
+ OpenEnvError,
42
+ ConcurrencyConfigurationError,
43
+ SessionCapacityError,
44
+ SessionNotFoundError,
45
+ SessionCreationError,
46
+ EnvironmentFactoryError,
47
+ )
48
+ from .web_interface import create_web_interface_app, WebInterfaceManager
49
+ from .mcp_types import (
50
+ Tool,
51
+ ToolError,
52
+ ToolErrorType,
53
+ ListToolsAction,
54
+ CallToolAction,
55
+ ListToolsObservation,
56
+ CallToolObservation,
57
+ WSMCPMessage,
58
+ WSMCPResponse,
59
+ RESERVED_TOOL_NAMES,
60
+ # JSON-RPC types
61
+ JsonRpcErrorCode,
62
+ JsonRpcError,
63
+ JsonRpcRequest,
64
+ JsonRpcResponse,
65
+ McpMethod,
66
+ )
67
+ from .mcp_environment import MCPEnvironment
68
+
69
+ __all__ = [
70
+ # Core interfaces
71
+ "Environment",
72
+ "Transform",
73
+ "Message",
74
+ "ModelTokenizer",
75
+ # Types
76
+ "Action",
77
+ "Observation",
78
+ "State",
79
+ "SchemaResponse",
80
+ "HealthResponse",
81
+ # Enums
82
+ "HealthStatus",
83
+ "ServerMode",
84
+ "WSErrorCode",
85
+ # WebSocket message types
86
+ "BaseMessage",
87
+ "WSIncomingMessage",
88
+ "WSResetMessage",
89
+ "WSStepMessage",
90
+ "WSStateMessage",
91
+ "WSCloseMessage",
92
+ "WSObservationResponse",
93
+ "WSStateResponse",
94
+ "WSErrorResponse",
95
+ # Concurrency types
96
+ "ConcurrencyConfig",
97
+ "ServerCapacityStatus",
98
+ "SessionInfo",
99
+ # Exceptions
100
+ "OpenEnvError",
101
+ "ConcurrencyConfigurationError",
102
+ "SessionCapacityError",
103
+ "SessionNotFoundError",
104
+ "SessionCreationError",
105
+ "EnvironmentFactoryError",
106
+ # Base transforms
107
+ "CompositeTransform",
108
+ "NullTransform",
109
+ # HTTP Server
110
+ "HTTPEnvServer",
111
+ "create_app",
112
+ "create_fastapi_app",
113
+ # Web Interface
114
+ "create_web_interface_app",
115
+ "WebInterfaceManager",
116
+ # Serialization utilities
117
+ "deserialize_action",
118
+ "deserialize_action_with_preprocessing",
119
+ "serialize_observation",
120
+ # Route configuration
121
+ "GetEndpointConfig",
122
+ # MCP types
123
+ "Tool",
124
+ "ToolError",
125
+ "ToolErrorType",
126
+ "ListToolsAction",
127
+ "CallToolAction",
128
+ "ListToolsObservation",
129
+ "CallToolObservation",
130
+ "WSMCPMessage",
131
+ "WSMCPResponse",
132
+ "RESERVED_TOOL_NAMES",
133
+ "MCPEnvironment",
134
+ # JSON-RPC types
135
+ "JsonRpcErrorCode",
136
+ "JsonRpcError",
137
+ "JsonRpcRequest",
138
+ "JsonRpcResponse",
139
+ "McpMethod",
140
+ ]
src/core/openenv/core/env_server/base_transforms.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Base transform implementations for composing environment-specific transforms."""
8
+
9
+ from .interfaces import Transform
10
+ from .types import Observation
11
+
12
+
13
+ class CompositeTransform(Transform):
14
+ """Combines multiple transforms into a single transform."""
15
+
16
+ def __init__(self, transforms: list[Transform]):
17
+ self.transforms = transforms
18
+
19
+ def __call__(self, observation: Observation) -> Observation:
20
+ for transform in self.transforms:
21
+ observation = transform(observation)
22
+ return observation
23
+
24
+
25
+ class NullTransform(Transform):
26
+ """Default transform that passes through unchanged."""
27
+
28
+ def __call__(self, observation: Observation) -> Observation:
29
+ return observation
src/core/openenv/core/env_server/exceptions.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Custom exceptions for environment server operations."""
8
+
9
+ from typing import Optional
10
+
11
+
12
+ class OpenEnvError(Exception):
13
+ """Base exception for all OpenEnv errors."""
14
+
15
+ pass
16
+
17
+
18
+ class ConcurrencyConfigurationError(OpenEnvError):
19
+ """
20
+ Raised when an environment is misconfigured for concurrent sessions.
21
+
22
+ This error is raised during server startup when max_concurrent_envs > 1
23
+ is specified for an environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ environment_name: str,
29
+ max_concurrent_envs: int,
30
+ message: Optional[str] = None,
31
+ ):
32
+ self.environment_name = environment_name
33
+ self.max_concurrent_envs = max_concurrent_envs
34
+
35
+ if message is None:
36
+ message = (
37
+ f"Environment '{environment_name}' is not marked as SUPPORTS_CONCURRENT_SESSIONS. "
38
+ f"Cannot run with max_concurrent_envs={max_concurrent_envs}. "
39
+ f"Either set max_concurrent_envs=1 or ensure the environment "
40
+ f"properly isolates session state and set SUPPORTS_CONCURRENT_SESSIONS=True."
41
+ )
42
+
43
+ super().__init__(message)
44
+
45
+
46
+ class SessionCapacityError(OpenEnvError):
47
+ """
48
+ Raised when the server cannot accept new sessions due to capacity limits.
49
+
50
+ This error is raised when a new WebSocket connection is attempted but
51
+ the server has already reached max_concurrent_envs active sessions.
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ active_sessions: int,
57
+ max_sessions: int,
58
+ message: Optional[str] = None,
59
+ ):
60
+ self.active_sessions = active_sessions
61
+ self.max_sessions = max_sessions
62
+
63
+ if message is None:
64
+ message = (
65
+ f"Server at capacity: {active_sessions}/{max_sessions} sessions active. "
66
+ f"Cannot accept new connections."
67
+ )
68
+
69
+ super().__init__(message)
70
+
71
+
72
+ class SessionNotFoundError(OpenEnvError):
73
+ """Raised when attempting to access a session that does not exist."""
74
+
75
+ def __init__(self, session_id: str, message: Optional[str] = None):
76
+ self.session_id = session_id
77
+
78
+ if message is None:
79
+ message = f"Session '{session_id}' not found."
80
+
81
+ super().__init__(message)
82
+
83
+
84
+ class SessionCreationError(OpenEnvError):
85
+ """Raised when a session cannot be created."""
86
+
87
+ def __init__(self, reason: str, message: Optional[str] = None):
88
+ self.reason = reason
89
+
90
+ if message is None:
91
+ message = f"Failed to create session: {reason}"
92
+
93
+ super().__init__(message)
94
+
95
+
96
+ class EnvironmentFactoryError(OpenEnvError):
97
+ """Raised when the environment factory fails to create an instance."""
98
+
99
+ def __init__(self, factory_name: str, message: Optional[str] = None):
100
+ self.factory_name = factory_name
101
+
102
+ if message is None:
103
+ message = f"Environment factory '{factory_name}' failed to create instance."
104
+
105
+ super().__init__(message)
src/core/openenv/core/env_server/gradio_theme.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Unified terminal-style theme for OpenEnv Gradio UI (light/dark)."""
8
+
9
+ from __future__ import annotations
10
+
11
+ import gradio as gr
12
+
13
+ _MONO_FONTS = (
14
+ "JetBrains Mono",
15
+ "Fira Code",
16
+ "Cascadia Code",
17
+ "Consolas",
18
+ "ui-monospace",
19
+ "monospace",
20
+ )
21
+
22
+ _CORE_FONT = (
23
+ "Lato",
24
+ "Inter",
25
+ "Arial",
26
+ "Helvetica",
27
+ "sans-serif",
28
+ )
29
+
30
+ _ZERO_RADIUS = gr.themes.Size(
31
+ xxs="0px",
32
+ xs="0px",
33
+ sm="0px",
34
+ md="0px",
35
+ lg="0px",
36
+ xl="0px",
37
+ xxl="0px",
38
+ )
39
+
40
+ _GREEN_HUE = gr.themes.Color(
41
+ c50="#e6f4ea",
42
+ c100="#ceead6",
43
+ c200="#a8dab5",
44
+ c300="#6fcc8b",
45
+ c400="#3fb950",
46
+ c500="#238636",
47
+ c600="#1a7f37",
48
+ c700="#116329",
49
+ c800="#0a4620",
50
+ c900="#033a16",
51
+ c950="#04200d",
52
+ )
53
+
54
+ _NEUTRAL_HUE = gr.themes.Color(
55
+ c50="#f6f8fa",
56
+ c100="#eaeef2",
57
+ c200="#d0d7de",
58
+ c300="#afb8c1",
59
+ c400="#8c959f",
60
+ c500="#6e7781",
61
+ c600="#57606a",
62
+ c700="#424a53",
63
+ c800="#32383f",
64
+ c900="#24292f",
65
+ c950="#1b1f24",
66
+ )
67
+
68
+ OPENENV_GRADIO_THEME = gr.themes.Base(
69
+ primary_hue=_GREEN_HUE,
70
+ secondary_hue=_NEUTRAL_HUE,
71
+ neutral_hue=_NEUTRAL_HUE,
72
+ font=_CORE_FONT,
73
+ font_mono=_MONO_FONTS,
74
+ radius_size=_ZERO_RADIUS,
75
+ ).set(
76
+ body_background_fill="#ffffff",
77
+ background_fill_primary="#ffffff",
78
+ background_fill_secondary="#f6f8fa",
79
+ block_background_fill="#ffffff",
80
+ block_border_color="#ffffff",
81
+ block_label_text_color="#57606a",
82
+ block_title_text_color="#24292f",
83
+ border_color_primary="#d0d7de",
84
+ input_background_fill="#ffffff",
85
+ input_border_color="#d0d7de",
86
+ button_primary_background_fill="#1a7f37",
87
+ button_primary_background_fill_hover="#116329",
88
+ button_primary_text_color="#ffffff",
89
+ button_secondary_background_fill="#f6f8fa",
90
+ button_secondary_background_fill_hover="#eaeef2",
91
+ button_secondary_text_color="#24292f",
92
+ button_secondary_border_color="#d0d7de",
93
+ body_background_fill_dark="#0d1117",
94
+ background_fill_primary_dark="#0d1117",
95
+ background_fill_secondary_dark="#0d1117",
96
+ block_background_fill_dark="#0d1117",
97
+ block_border_color_dark="#0d1117",
98
+ block_label_text_color_dark="#8b949e",
99
+ block_title_text_color_dark="#c9d1d9",
100
+ border_color_primary_dark="#30363d",
101
+ input_background_fill_dark="#0d1117",
102
+ input_border_color_dark="#30363d",
103
+ button_primary_background_fill_dark="#30363d",
104
+ button_primary_background_fill_hover_dark="#484f58",
105
+ button_primary_text_color_dark="#c9d1d9",
106
+ button_secondary_background_fill_dark="#21262d",
107
+ button_secondary_background_fill_hover_dark="#30363d",
108
+ button_secondary_text_color_dark="#c9d1d9",
109
+ button_secondary_border_color_dark="#30363d",
110
+ )
111
+
112
+ OPENENV_GRADIO_CSS = """
113
+ * { border-radius: 0 !important; }
114
+ .col-left { padding: 16px !important; }
115
+ .col-right { padding: 16px !important; }
116
+ .prose, .markdown-text, .md,
117
+ .prose > *, .markdown-text > * {
118
+ background: transparent !important;
119
+ border: none !important;
120
+ box-shadow: none !important;
121
+ }
122
+ .dark .col-left {
123
+ border-left-color: rgba(139, 148, 158, 0.4) !important;
124
+ }
125
+ .dark .col-right {
126
+ border-left-color: rgba(201, 209, 217, 0.3) !important;
127
+ }
128
+ """
src/core/openenv/core/env_server/gradio_ui.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Gradio-based web UI for OpenEnv environments.
9
+
10
+ Replaces the legacy HTML/JavaScript interface when ENABLE_WEB_INTERFACE is set.
11
+ Mount at /web via gr.mount_gradio_app() from create_web_interface_app().
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import re
18
+ from typing import Any, Dict, List, Optional
19
+
20
+ import gradio as gr
21
+
22
+ from .types import EnvironmentMetadata
23
+
24
+
25
+ def _escape_md(text: str) -> str:
26
+ """Escape Markdown special characters in user-controlled content."""
27
+ return re.sub(r"([\\`*_\{\}\[\]()#+\-.!|~>])", r"\\\1", str(text))
28
+
29
+
30
+ def _format_observation(data: Dict[str, Any]) -> str:
31
+ """Format reset/step response for Markdown display."""
32
+ lines: List[str] = []
33
+ obs = data.get("observation", {})
34
+ if isinstance(obs, dict):
35
+ if obs.get("prompt"):
36
+ lines.append(f"**Prompt:**\n\n{_escape_md(obs['prompt'])}\n")
37
+ messages = obs.get("messages", [])
38
+ if messages:
39
+ lines.append("**Messages:**\n")
40
+ for msg in messages:
41
+ sender = _escape_md(str(msg.get("sender_id", "?")))
42
+ content = _escape_md(str(msg.get("content", "")))
43
+ cat = _escape_md(str(msg.get("category", "")))
44
+ lines.append(f"- `[{cat}]` Player {sender}: {content}")
45
+ lines.append("")
46
+ reward = data.get("reward")
47
+ done = data.get("done")
48
+ if reward is not None:
49
+ lines.append(f"**Reward:** `{reward}`")
50
+ if done is not None:
51
+ lines.append(f"**Done:** `{done}`")
52
+ return "\n".join(lines) if lines else "*No observation data*"
53
+
54
+
55
+ def _readme_section(metadata: Optional[EnvironmentMetadata]) -> str:
56
+ """README content for the left panel."""
57
+ if not metadata or not metadata.readme_content:
58
+ return "*No README available.*"
59
+ return metadata.readme_content
60
+
61
+
62
+ def get_gradio_display_title(
63
+ metadata: Optional[EnvironmentMetadata],
64
+ fallback: str = "OpenEnv Environment",
65
+ ) -> str:
66
+ """Return the title used for the Gradio app (browser tab and Blocks)."""
67
+ name = metadata.name if metadata else fallback
68
+ return f"OpenEnv Agentic Environment: {name}"
69
+
70
+
71
+ def build_gradio_app(
72
+ web_manager: Any,
73
+ action_fields: List[Dict[str, Any]],
74
+ metadata: Optional[EnvironmentMetadata],
75
+ is_chat_env: bool,
76
+ title: str = "OpenEnv Environment",
77
+ quick_start_md: Optional[str] = None,
78
+ ) -> gr.Blocks:
79
+ """
80
+ Build a Gradio Blocks app for the OpenEnv web interface.
81
+
82
+ Args:
83
+ web_manager: WebInterfaceManager (reset/step_environment, get_state).
84
+ action_fields: Field dicts from _extract_action_fields(action_cls).
85
+ metadata: Environment metadata for README/name.
86
+ is_chat_env: If True, single message textbox; else form from action_fields.
87
+ title: App title (overridden by metadata.name when present; see get_gradio_display_title).
88
+ quick_start_md: Optional Quick Start markdown (class names already replaced).
89
+
90
+ Returns:
91
+ gr.Blocks to mount with gr.mount_gradio_app(app, blocks, path="/web").
92
+ """
93
+ readme_content = _readme_section(metadata)
94
+ display_title = get_gradio_display_title(metadata, fallback=title)
95
+
96
+ async def reset_env():
97
+ try:
98
+ data = await web_manager.reset_environment()
99
+ obs_md = _format_observation(data)
100
+ return (
101
+ obs_md,
102
+ json.dumps(data, indent=2),
103
+ "Environment reset successfully.",
104
+ )
105
+ except Exception as e:
106
+ return ("", "", f"Error: {e}")
107
+
108
+ def _step_with_action(action_data: Dict[str, Any]):
109
+ async def _run():
110
+ try:
111
+ data = await web_manager.step_environment(action_data)
112
+ obs_md = _format_observation(data)
113
+ return (
114
+ obs_md,
115
+ json.dumps(data, indent=2),
116
+ "Step complete.",
117
+ )
118
+ except Exception as e:
119
+ return ("", "", f"Error: {e}")
120
+
121
+ return _run
122
+
123
+ async def step_chat(message: str):
124
+ if not (message or str(message).strip()):
125
+ return ("", "", "Please enter an action message.")
126
+ action = {"message": str(message).strip()}
127
+ return await _step_with_action(action)()
128
+
129
+ def get_state_sync():
130
+ try:
131
+ data = web_manager.get_state()
132
+ return json.dumps(data, indent=2)
133
+ except Exception as e:
134
+ return f"Error: {e}"
135
+
136
+ with gr.Blocks(title=display_title) as demo:
137
+ with gr.Row():
138
+ with gr.Column(scale=1, elem_classes="col-left"):
139
+ if quick_start_md:
140
+ with gr.Accordion("Quick Start", open=True):
141
+ gr.Markdown(quick_start_md)
142
+ with gr.Accordion("README", open=False):
143
+ gr.Markdown(readme_content)
144
+
145
+ with gr.Column(scale=2, elem_classes="col-right"):
146
+ obs_display = gr.Markdown(
147
+ value=("# Playground\n\nClick **Reset** to start a new episode."),
148
+ )
149
+ with gr.Group():
150
+ if is_chat_env:
151
+ action_input = gr.Textbox(
152
+ label="Action message",
153
+ placeholder="e.g. Enter your message...",
154
+ )
155
+ step_inputs = [action_input]
156
+ step_fn = step_chat
157
+ else:
158
+ step_inputs = []
159
+ for field in action_fields:
160
+ name = field["name"]
161
+ field_type = field.get("type", "text")
162
+ label = name.replace("_", " ").title()
163
+ placeholder = field.get("placeholder", "")
164
+ if field_type == "checkbox":
165
+ inp = gr.Checkbox(label=label)
166
+ elif field_type == "number":
167
+ inp = gr.Number(label=label)
168
+ elif field_type == "select":
169
+ choices = field.get("choices") or []
170
+ inp = gr.Dropdown(
171
+ choices=choices,
172
+ label=label,
173
+ allow_custom_value=False,
174
+ )
175
+ elif field_type in ("textarea", "tensor"):
176
+ inp = gr.Textbox(
177
+ label=label,
178
+ placeholder=placeholder,
179
+ lines=3,
180
+ )
181
+ else:
182
+ inp = gr.Textbox(
183
+ label=label,
184
+ placeholder=placeholder,
185
+ )
186
+ step_inputs.append(inp)
187
+
188
+ async def step_form(*values):
189
+ if not action_fields:
190
+ return await _step_with_action({})()
191
+ action_data = {}
192
+ for i, field in enumerate(action_fields):
193
+ if i >= len(values):
194
+ break
195
+ name = field["name"]
196
+ val = values[i]
197
+ if field.get("type") == "checkbox":
198
+ action_data[name] = bool(val)
199
+ elif val is not None and val != "":
200
+ action_data[name] = val
201
+ return await _step_with_action(action_data)()
202
+
203
+ step_fn = step_form
204
+
205
+ with gr.Row():
206
+ step_btn = gr.Button("Step", variant="primary")
207
+ reset_btn = gr.Button("Reset", variant="secondary")
208
+ state_btn = gr.Button("Get state", variant="secondary")
209
+ with gr.Row():
210
+ status = gr.Textbox(
211
+ label="Status",
212
+ interactive=False,
213
+ )
214
+ raw_json = gr.Code(
215
+ label="Raw JSON response",
216
+ language="json",
217
+ interactive=False,
218
+ )
219
+
220
+ reset_btn.click(
221
+ fn=reset_env,
222
+ outputs=[obs_display, raw_json, status],
223
+ )
224
+ step_btn.click(
225
+ fn=step_fn,
226
+ inputs=step_inputs,
227
+ outputs=[obs_display, raw_json, status],
228
+ )
229
+ if is_chat_env:
230
+ action_input.submit(
231
+ fn=step_fn,
232
+ inputs=step_inputs,
233
+ outputs=[obs_display, raw_json, status],
234
+ )
235
+ state_btn.click(
236
+ fn=get_state_sync,
237
+ outputs=[raw_json],
238
+ )
239
+
240
+ return demo
src/core/openenv/core/env_server/http_server.py ADDED
@@ -0,0 +1,1396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ HTTP server wrapper for Environment instances.
9
+
10
+ This module provides utilities to wrap any Environment subclass and expose it
11
+ over HTTP and WebSocket endpoints that EnvClient can consume.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import asyncio
17
+ import inspect
18
+ import json
19
+ import os
20
+ import time
21
+ import uuid
22
+ from concurrent.futures import ThreadPoolExecutor
23
+ from typing import Any, Callable, Dict, Optional, Type
24
+
25
+ from fastapi import (
26
+ Body,
27
+ FastAPI,
28
+ HTTPException,
29
+ Request,
30
+ WebSocket,
31
+ WebSocketDisconnect,
32
+ status,
33
+ )
34
+ from pydantic import ValidationError
35
+
36
+ from .interfaces import Environment
37
+ from .route_config import (
38
+ GetEndpointConfig,
39
+ register_get_endpoints,
40
+ )
41
+ from .serialization import deserialize_action, serialize_observation
42
+ from .types import (
43
+ Action,
44
+ Observation,
45
+ ResetRequest,
46
+ ResetResponse,
47
+ State,
48
+ StepRequest,
49
+ StepResponse,
50
+ EnvironmentMetadata,
51
+ SchemaResponse,
52
+ HealthResponse,
53
+ HealthStatus,
54
+ ServerMode,
55
+ WSErrorCode,
56
+ WSResetMessage,
57
+ WSStepMessage,
58
+ WSStateMessage,
59
+ WSCloseMessage,
60
+ WSObservationResponse,
61
+ WSStateResponse,
62
+ WSErrorResponse,
63
+ ConcurrencyConfig,
64
+ ServerCapacityStatus,
65
+ SessionInfo,
66
+ )
67
+ from .mcp_types import (
68
+ JsonRpcErrorCode,
69
+ JsonRpcRequest,
70
+ JsonRpcResponse,
71
+ McpMethod,
72
+ WSMCPMessage,
73
+ WSMCPResponse,
74
+ )
75
+
76
+
77
+ def _make_json_serializable(obj: Any) -> Any:
78
+ """
79
+ Convert an object to a JSON-serializable form.
80
+
81
+ Handles Pydantic models, dataclasses, and other common types.
82
+
83
+ Args:
84
+ obj: The object to convert
85
+
86
+ Returns:
87
+ A JSON-serializable representation of the object
88
+ """
89
+ if obj is None:
90
+ return None
91
+ if isinstance(obj, (str, int, float, bool)):
92
+ return obj
93
+ if isinstance(obj, (list, tuple)):
94
+ return [_make_json_serializable(item) for item in obj]
95
+ if isinstance(obj, dict):
96
+ return {k: _make_json_serializable(v) for k, v in obj.items()}
97
+ if hasattr(obj, "model_dump"):
98
+ # Pydantic model
99
+ return obj.model_dump()
100
+ if hasattr(obj, "__dict__"):
101
+ # Object with __dict__
102
+ return {k: _make_json_serializable(v) for k, v in obj.__dict__.items()}
103
+ # Fallback to string representation
104
+ return str(obj)
105
+
106
+
107
+ from .exceptions import (
108
+ ConcurrencyConfigurationError,
109
+ SessionCapacityError,
110
+ EnvironmentFactoryError,
111
+ )
112
+
113
+
114
+ class HTTPEnvServer:
115
+ """
116
+ HTTP server wrapper for Environment instances.
117
+
118
+ This class wraps an Environment and exposes its reset(), step(), and state
119
+ methods as HTTP and WebSocket endpoints compatible with EnvClient.
120
+
121
+ The server expects:
122
+ - Action deserialization: Converts JSON dict to Action subclass
123
+ - Observation serialization: Converts Observation subclass to JSON dict
124
+
125
+ Example:
126
+ >>> from core.env_server import HTTPEnvServer
127
+ >>> from envs.coding_env.server import CodeExecutionEnvironment
128
+ >>> from envs.coding_env.models import CodeAction, CodeObservation
129
+ >>>
130
+ >>> # Pass environment class (factory pattern)
131
+ >>> server = HTTPEnvServer(
132
+ ... env=CodeExecutionEnvironment,
133
+ ... action_cls=CodeAction,
134
+ ... observation_cls=CodeObservation,
135
+ ... max_concurrent_envs=4,
136
+ ... )
137
+ >>>
138
+ >>> # Register routes with FastAPI
139
+ >>> from fastapi import FastAPI
140
+ >>> app = FastAPI()
141
+ >>> server.register_routes(app)
142
+ """
143
+
144
+ def __init__(
145
+ self,
146
+ env: Callable[[], Environment],
147
+ action_cls: Type[Action],
148
+ observation_cls: Type[Observation],
149
+ max_concurrent_envs: Optional[int] = None,
150
+ concurrency_config: Optional[ConcurrencyConfig] = None,
151
+ ):
152
+ """
153
+ Initialize HTTP server wrapper.
154
+
155
+ Args:
156
+ env: Environment factory (callable) that creates new instances.
157
+ Will be called to create a new environment for each WebSocket session.
158
+ action_cls: The Action subclass this environment expects
159
+ observation_cls: The Observation subclass this environment returns
160
+ max_concurrent_envs: Maximum number of concurrent WebSocket sessions.
161
+ Mutually exclusive with concurrency_config.
162
+ concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
163
+ Mutually exclusive with max_concurrent_envs.
164
+
165
+ Raises:
166
+ ValueError: If both max_concurrent_envs and concurrency_config are provided.
167
+ ConcurrencyConfigurationError: If max_concurrent_envs > 1 for an
168
+ environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
169
+ """
170
+ # Validate that env is callable
171
+ if not callable(env):
172
+ raise TypeError(
173
+ f"env must be a callable (class or factory function), got {type(env)}. "
174
+ f"Pass the environment class (e.g., MyEnvironment) not an instance (e.g., MyEnvironment())."
175
+ )
176
+
177
+ self._env_factory: Callable[[], Environment] = env
178
+
179
+ # Handle concurrency configuration
180
+ if max_concurrent_envs is not None and concurrency_config is not None:
181
+ raise ValueError(
182
+ "Cannot specify both 'max_concurrent_envs' and 'concurrency_config'. "
183
+ "Please use only one method to configure concurrency."
184
+ )
185
+
186
+ if concurrency_config is not None:
187
+ self._concurrency_config = concurrency_config
188
+ elif max_concurrent_envs is not None:
189
+ self._concurrency_config = ConcurrencyConfig(
190
+ max_concurrent_envs=max_concurrent_envs,
191
+ session_timeout=None,
192
+ )
193
+ else:
194
+ # Default configuration
195
+ self._concurrency_config = ConcurrencyConfig(
196
+ max_concurrent_envs=1,
197
+ session_timeout=None,
198
+ )
199
+
200
+ self._max_concurrent_envs = self._concurrency_config.max_concurrent_envs
201
+
202
+ # Validate concurrency configuration
203
+ self._validate_concurrency_safety()
204
+
205
+ self.action_cls = action_cls
206
+ self.observation_cls = observation_cls
207
+
208
+ # Session management for WebSocket connections
209
+ self._sessions: Dict[str, Environment] = {}
210
+ self._session_executors: Dict[str, ThreadPoolExecutor] = {}
211
+ self._session_info: Dict[str, SessionInfo] = {}
212
+ self._session_lock = asyncio.Lock()
213
+
214
+ # Create thread pool for running sync code in async context
215
+ # This is needed for environments using sync libraries (e.g., Playwright)
216
+ self._executor = ThreadPoolExecutor(max_workers=32)
217
+
218
+ def _validate_concurrency_safety(self) -> None:
219
+ """
220
+ Validate that the environment supports the configured concurrency level.
221
+
222
+ Raises:
223
+ ConcurrencyConfigurationError: If max_concurrent_envs > 1 for an
224
+ environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
225
+ """
226
+ if self._max_concurrent_envs <= 1:
227
+ return
228
+
229
+ if inspect.isclass(self._env_factory):
230
+ env_cls = self._env_factory
231
+ else:
232
+ _temp_env = self._env_factory()
233
+ env_cls = type(_temp_env)
234
+ _temp_env.close()
235
+ del _temp_env
236
+
237
+ if not getattr(env_cls, "SUPPORTS_CONCURRENT_SESSIONS", False):
238
+ raise ConcurrencyConfigurationError(
239
+ environment_name=env_cls.__name__,
240
+ max_concurrent_envs=self._max_concurrent_envs,
241
+ )
242
+
243
+ def get_capacity_status(self) -> ServerCapacityStatus:
244
+ """
245
+ Get the current capacity status of the server.
246
+
247
+ Returns:
248
+ ServerCapacityStatus with current session counts and availability.
249
+ """
250
+ return ServerCapacityStatus.from_counts(
251
+ active=len(self._sessions),
252
+ max_sessions=self._max_concurrent_envs,
253
+ )
254
+
255
+ async def _run_sync_in_thread_pool(
256
+ self, func: Callable[..., Observation], *args, **kwargs
257
+ ) -> Observation:
258
+ """Run a synchronous function in the thread pool executor."""
259
+ loop = asyncio.get_event_loop()
260
+ return await loop.run_in_executor(self._executor, lambda: func(*args, **kwargs))
261
+
262
+ def _get_valid_kwargs(
263
+ self,
264
+ sig: inspect.Signature,
265
+ kwargs: Dict[str, Any],
266
+ skip_params: Optional[set[str]] = None,
267
+ ) -> Dict[str, Any]:
268
+ """Filter kwargs to only include parameters accepted by the function signature."""
269
+ if skip_params is None:
270
+ skip_params = set()
271
+
272
+ valid_kwargs = {}
273
+
274
+ has_kwargs = any(
275
+ p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
276
+ )
277
+
278
+ for k, v in kwargs.items():
279
+ if k in sig.parameters or has_kwargs:
280
+ if k not in skip_params:
281
+ valid_kwargs[k] = v
282
+
283
+ return valid_kwargs
284
+
285
+ async def _create_session(self) -> tuple[str, Environment]:
286
+ """
287
+ Create a new WebSocket session with its own environment instance.
288
+
289
+ Returns:
290
+ Tuple of (session_id, environment)
291
+
292
+ Raises:
293
+ SessionCapacityError: If max concurrent sessions reached
294
+ EnvironmentFactoryError: If the factory fails to create an environment
295
+ """
296
+ async with self._session_lock:
297
+ if len(self._sessions) >= self._max_concurrent_envs:
298
+ raise SessionCapacityError(
299
+ active_sessions=len(self._sessions),
300
+ max_sessions=self._max_concurrent_envs,
301
+ )
302
+
303
+ session_id = str(uuid.uuid4())
304
+ current_time = time.time()
305
+
306
+ # Create executor and reserve slot so capacity is not exceeded while
307
+ # we create the env outside the lock (avoids blocking other sessions)
308
+ executor = ThreadPoolExecutor(max_workers=1)
309
+ self._session_executors[session_id] = executor
310
+ self._sessions[session_id] = None # placeholder until env is ready
311
+
312
+ try:
313
+ # Create environment in the executor thread (outside lock)
314
+ loop = asyncio.get_event_loop()
315
+ env = await loop.run_in_executor(executor, self._env_factory)
316
+ except Exception as e:
317
+ async with self._session_lock:
318
+ executor.shutdown(wait=False)
319
+ self._session_executors.pop(session_id, None)
320
+ self._sessions.pop(session_id, None)
321
+ factory_name = getattr(
322
+ self._env_factory, "__name__", str(self._env_factory)
323
+ )
324
+ raise EnvironmentFactoryError(factory_name) from e
325
+
326
+ async with self._session_lock:
327
+ self._sessions[session_id] = env
328
+ self._session_info[session_id] = SessionInfo(
329
+ session_id=session_id,
330
+ created_at=current_time,
331
+ last_activity_at=current_time,
332
+ step_count=0,
333
+ environment_type=type(env).__name__,
334
+ )
335
+
336
+ return session_id, env
337
+
338
+ async def _destroy_session(self, session_id: str) -> None:
339
+ """
340
+ Destroy a WebSocket session and cleanup resources.
341
+
342
+ Args:
343
+ session_id: The session ID to destroy
344
+ """
345
+ async with self._session_lock:
346
+ env = self._sessions.pop(session_id, None)
347
+ executor = self._session_executors.pop(session_id, None)
348
+ self._session_info.pop(session_id, None)
349
+
350
+ # Run close() in the same executor where the env was created
351
+ # This is required for thread-sensitive libraries like Playwright/greenlet
352
+ if env is not None:
353
+ if executor is not None:
354
+ try:
355
+ loop = asyncio.get_event_loop()
356
+ await loop.run_in_executor(executor, env.close)
357
+ except Exception:
358
+ # If executor close fails, try direct close as fallback
359
+ try:
360
+ env.close()
361
+ except Exception:
362
+ pass # Best effort cleanup
363
+ else:
364
+ try:
365
+ env.close()
366
+ except Exception:
367
+ pass # Best effort cleanup
368
+
369
+ # Shutdown executor after close is done
370
+ if executor is not None:
371
+ executor.shutdown(wait=False)
372
+
373
+ def _update_session_activity(
374
+ self, session_id: str, increment_step: bool = False
375
+ ) -> None:
376
+ """
377
+ Update session activity timestamp and optionally increment step count.
378
+
379
+ Args:
380
+ session_id: The session ID to update
381
+ increment_step: If True, increment the step count
382
+ """
383
+ if session_id in self._session_info:
384
+ self._session_info[session_id].last_activity_at = time.time()
385
+ if increment_step:
386
+ self._session_info[session_id].step_count += 1
387
+
388
+ def get_session_info(self, session_id: str) -> Optional[SessionInfo]:
389
+ """
390
+ Get information about a specific session.
391
+
392
+ Args:
393
+ session_id: The session ID to query
394
+
395
+ Returns:
396
+ SessionInfo if the session exists, None otherwise
397
+ """
398
+ return self._session_info.get(session_id)
399
+
400
+ async def _run_in_session_executor(
401
+ self, session_id: str, func: Callable[..., Observation], *args, **kwargs
402
+ ) -> Observation:
403
+ """Run a synchronous function in the session's thread pool executor."""
404
+ executor = self._session_executors.get(session_id, self._executor)
405
+ loop = asyncio.get_event_loop()
406
+ return await loop.run_in_executor(executor, lambda: func(*args, **kwargs))
407
+
408
+ @property
409
+ def active_sessions(self) -> int:
410
+ """Return the number of active WebSocket sessions."""
411
+ return len(self._sessions)
412
+
413
+ @property
414
+ def max_concurrent_envs(self) -> int:
415
+ """Return the maximum number of concurrent environments."""
416
+ return self._max_concurrent_envs
417
+
418
+ @property
419
+ def is_concurrency_safe(self) -> bool:
420
+ """Return whether the environment is marked as concurrency safe."""
421
+ import inspect
422
+
423
+ if inspect.isclass(self._env_factory):
424
+ return getattr(self._env_factory, "SUPPORTS_CONCURRENT_SESSIONS", False)
425
+ else:
426
+ _temp_env = self._env_factory()
427
+ result = getattr(_temp_env, "SUPPORTS_CONCURRENT_SESSIONS", False)
428
+ _temp_env.close()
429
+ del _temp_env
430
+ return result
431
+
432
+ @property
433
+ def concurrency_config(self) -> ConcurrencyConfig:
434
+ """Return the concurrency configuration."""
435
+ return self._concurrency_config
436
+
437
+ def register_routes(
438
+ self, app: FastAPI, mode: ServerMode | str = ServerMode.SIMULATION
439
+ ) -> None:
440
+ """
441
+ Register HTTP routes on a FastAPI application.
442
+
443
+ Args:
444
+ app: FastAPI application instance
445
+ mode: Server mode - either SIMULATION or PRODUCTION (or string equivalents).
446
+ In production mode, simulation control endpoints (/reset, /step, /state)
447
+ are NOT registered. Only safe endpoints (/health, /schema, /metadata, /ws)
448
+ are available. Defaults to SIMULATION for backwards compatibility.
449
+
450
+ Raises:
451
+ ValueError: If mode is not a valid ServerMode or string equivalent.
452
+ """
453
+ # Convert string to ServerMode enum for backwards compatibility
454
+ if isinstance(mode, str):
455
+ try:
456
+ mode = ServerMode(mode.lower())
457
+ except ValueError:
458
+ valid_modes = [m.value for m in ServerMode]
459
+ raise ValueError(
460
+ f"Invalid mode: '{mode}'. Must be one of: {valid_modes}"
461
+ )
462
+
463
+ # Helper function to handle reset endpoint
464
+ async def reset_handler(
465
+ request: ResetRequest = Body(default_factory=ResetRequest),
466
+ ) -> ResetResponse:
467
+ """Reset endpoint - returns initial observation."""
468
+ _env = self._env_factory()
469
+
470
+ try:
471
+ kwargs = request.model_dump(exclude_unset=True)
472
+
473
+ is_async = _env.reset_async.__func__ is not Environment.reset_async
474
+
475
+ if is_async:
476
+ sig = inspect.signature(_env.reset_async)
477
+ else:
478
+ sig = inspect.signature(_env.reset)
479
+ valid_kwargs = self._get_valid_kwargs(sig, kwargs)
480
+
481
+ if is_async:
482
+ observation = await _env.reset_async(**valid_kwargs)
483
+ else:
484
+ observation = await self._run_sync_in_thread_pool(
485
+ _env.reset, **valid_kwargs
486
+ )
487
+ return ResetResponse(**serialize_observation(observation))
488
+ finally:
489
+ _env.close()
490
+
491
+ # Helper function to handle step endpoint
492
+ async def step_handler(request: StepRequest) -> StepResponse:
493
+ """Step endpoint - executes action and returns observation."""
494
+ action_data = request.action
495
+
496
+ try:
497
+ action = deserialize_action(action_data, self.action_cls)
498
+ except ValidationError as e:
499
+ raise HTTPException(
500
+ status_code=status.HTTP_422_UNPROCESSABLE_CONTENT, detail=e.errors()
501
+ )
502
+
503
+ _env = self._env_factory()
504
+
505
+ try:
506
+ kwargs = request.model_dump(exclude_unset=True, exclude={"action"})
507
+
508
+ is_async = _env.step_async.__func__ is not Environment.step_async
509
+
510
+ if is_async:
511
+ sig = inspect.signature(_env.step_async)
512
+ else:
513
+ sig = inspect.signature(_env.step)
514
+ valid_kwargs = self._get_valid_kwargs(
515
+ sig, kwargs, skip_params={"action"}
516
+ )
517
+
518
+ if is_async:
519
+ observation = await _env.step_async(action, **valid_kwargs)
520
+ else:
521
+ observation = await self._run_sync_in_thread_pool(
522
+ _env.step, action, **valid_kwargs
523
+ )
524
+
525
+ return StepResponse(**serialize_observation(observation))
526
+ finally:
527
+ _env.close()
528
+
529
+ # Helper function to handle MCP endpoint
530
+ async def mcp_handler(
531
+ request: JsonRpcRequest, session_env: Optional[Environment] = None
532
+ ) -> JsonRpcResponse:
533
+ """
534
+ Handle MCP JSON-RPC requests.
535
+
536
+ Supports tools/list and tools/call methods in JSON-RPC 2.0 format.
537
+ """
538
+ method = request.method
539
+ request_id = request.id
540
+
541
+ # Use provided session environment or create temporary one
542
+ if session_env is not None:
543
+ _env = session_env
544
+ should_close = False
545
+ else:
546
+ _env = self._env_factory()
547
+ should_close = True
548
+ try:
549
+ if method == McpMethod.TOOLS_LIST:
550
+ # Check if environment is MCP-enabled
551
+ if not hasattr(_env, "mcp_client"):
552
+ return JsonRpcResponse.error_response(
553
+ JsonRpcErrorCode.INTERNAL_ERROR,
554
+ "Environment does not support MCP",
555
+ request_id=request_id,
556
+ )
557
+
558
+ # Use async context manager for MCP client
559
+ async with _env.mcp_client:
560
+ tools = await _env.mcp_client.list_tools()
561
+
562
+ return JsonRpcResponse.success(
563
+ result={
564
+ "tools": [
565
+ t.model_dump() if hasattr(t, "model_dump") else dict(t)
566
+ for t in tools
567
+ ]
568
+ },
569
+ request_id=request_id,
570
+ )
571
+
572
+ elif method == McpMethod.TOOLS_CALL:
573
+ params = request.params
574
+ tool_name = params.get("name")
575
+ arguments = params.get("arguments", {})
576
+
577
+ if not hasattr(_env, "mcp_client"):
578
+ return JsonRpcResponse.error_response(
579
+ JsonRpcErrorCode.INTERNAL_ERROR,
580
+ "Environment does not support MCP",
581
+ request_id=request_id,
582
+ )
583
+
584
+ if not tool_name:
585
+ return JsonRpcResponse.error_response(
586
+ JsonRpcErrorCode.INVALID_REQUEST,
587
+ "Missing 'name' in params",
588
+ request_id=request_id,
589
+ )
590
+
591
+ # Use async context manager for MCP client
592
+ async with _env.mcp_client:
593
+ result = await _env.mcp_client.call_tool(
594
+ name=tool_name, arguments=arguments
595
+ )
596
+
597
+ # Ensure result is JSON serializable
598
+ serializable_result = _make_json_serializable(result)
599
+
600
+ return JsonRpcResponse.success(
601
+ result=serializable_result,
602
+ request_id=request_id,
603
+ )
604
+
605
+ else:
606
+ return JsonRpcResponse.error_response(
607
+ JsonRpcErrorCode.METHOD_NOT_FOUND,
608
+ f"Method not found: {method}",
609
+ request_id=request_id,
610
+ )
611
+
612
+ except Exception as e:
613
+ return JsonRpcResponse.error_response(
614
+ JsonRpcErrorCode.INTERNAL_ERROR,
615
+ str(e),
616
+ request_id=request_id,
617
+ )
618
+ finally:
619
+ if should_close:
620
+ _env.close()
621
+
622
+ # Register MCP WebSocket endpoint (available in both production and simulation modes)
623
+ @app.websocket("/mcp")
624
+ async def mcp_websocket_endpoint(websocket: WebSocket):
625
+ """
626
+ WebSocket endpoint for MCP JSON-RPC requests.
627
+
628
+ Each WebSocket connection gets its own environment instance for MCP operations.
629
+
630
+ Message Protocol:
631
+ - Client sends: JSON-RPC 2.0 request (tools/list, tools/call)
632
+ - Server responds: JSON-RPC 2.0 response (result or error)
633
+ """
634
+ await websocket.accept()
635
+
636
+ session_id = None
637
+ session_env = None
638
+
639
+ try:
640
+ # Create session with dedicated environment
641
+ session_id, session_env = await self._create_session()
642
+
643
+ while True:
644
+ # Receive message from client
645
+ raw_message = await websocket.receive_text()
646
+
647
+ try:
648
+ jsonrpc_dict = json.loads(raw_message)
649
+ jsonrpc_request = JsonRpcRequest(**jsonrpc_dict)
650
+ except json.JSONDecodeError as e:
651
+ error_resp = JsonRpcResponse.error_response(
652
+ JsonRpcErrorCode.PARSE_ERROR,
653
+ f"Parse error: {e}",
654
+ )
655
+ await websocket.send_text(error_resp.model_dump_json())
656
+ continue
657
+ except ValidationError as e:
658
+ error_resp = JsonRpcResponse.error_response(
659
+ JsonRpcErrorCode.INVALID_REQUEST,
660
+ f"Invalid request: {e}",
661
+ )
662
+ await websocket.send_text(error_resp.model_dump_json())
663
+ continue
664
+
665
+ try:
666
+ # Call mcp_handler with session environment
667
+ response = await mcp_handler(
668
+ jsonrpc_request, session_env=session_env
669
+ )
670
+ await websocket.send_text(response.model_dump_json())
671
+ except Exception as e:
672
+ error_resp = JsonRpcResponse.error_response(
673
+ JsonRpcErrorCode.INTERNAL_ERROR,
674
+ str(e),
675
+ request_id=jsonrpc_request.id,
676
+ )
677
+ await websocket.send_text(error_resp.model_dump_json())
678
+
679
+ except WebSocketDisconnect:
680
+ pass
681
+ except SessionCapacityError as e:
682
+ error_resp = JsonRpcResponse.error_response(
683
+ JsonRpcErrorCode.SERVER_ERROR,
684
+ str(e),
685
+ data={
686
+ "active_sessions": e.active_sessions,
687
+ "max_sessions": e.max_sessions,
688
+ },
689
+ )
690
+ await websocket.send_text(error_resp.model_dump_json())
691
+ except EnvironmentFactoryError as e:
692
+ error_resp = JsonRpcResponse.error_response(
693
+ JsonRpcErrorCode.SERVER_ERROR,
694
+ str(e),
695
+ data={"factory_name": e.factory_name},
696
+ )
697
+ await websocket.send_text(error_resp.model_dump_json())
698
+ except Exception as e:
699
+ error_resp = JsonRpcResponse.error_response(
700
+ JsonRpcErrorCode.SERVER_ERROR,
701
+ str(e),
702
+ )
703
+ await websocket.send_text(error_resp.model_dump_json())
704
+ finally:
705
+ if session_id:
706
+ await self._destroy_session(session_id)
707
+ try:
708
+ await websocket.close()
709
+ except RuntimeError:
710
+ pass
711
+
712
+ # Register simulation control routes only in simulation mode
713
+ if mode == ServerMode.SIMULATION:
714
+
715
+ @app.post(
716
+ "/reset",
717
+ response_model=ResetResponse,
718
+ tags=["Environment Control"],
719
+ summary="Reset the environment",
720
+ description="""
721
+ Reset the environment to its initial state and return the first observation.
722
+
723
+ You can optionally provide a seed for reproducibility and an episode_id for tracking.
724
+ """,
725
+ responses={
726
+ 200: {
727
+ "description": "Environment reset successfully",
728
+ "content": {
729
+ "application/json": {
730
+ "example": {
731
+ "observation": {"status": "ready", "data": {}},
732
+ "reward": None,
733
+ "done": False,
734
+ }
735
+ }
736
+ },
737
+ }
738
+ },
739
+ )
740
+ async def reset(
741
+ request: ResetRequest = Body(default_factory=ResetRequest),
742
+ ) -> ResetResponse:
743
+ return await reset_handler(request)
744
+
745
+ @app.post(
746
+ "/step",
747
+ response_model=StepResponse,
748
+ tags=["Environment Control"],
749
+ summary="Execute an action in the environment",
750
+ description="""
751
+ Execute an action in the environment and receive the resulting observation.
752
+
753
+ The action must conform to the environment's action schema, which can be
754
+ retrieved from the `/schema` endpoint. If the action is invalid,
755
+ the endpoint will return HTTP 422 with detailed validation errors.
756
+
757
+ The response includes:
758
+ - **observation**: The environment's response to the action
759
+ - **reward**: Optional reward signal (float or None)
760
+ - **done**: Boolean indicating if the episode has terminated
761
+ """,
762
+ responses={
763
+ 200: {
764
+ "description": "Action executed successfully",
765
+ "content": {
766
+ "application/json": {
767
+ "example": {
768
+ "observation": {"status": "success", "data": {}},
769
+ "reward": 1.0,
770
+ "done": False,
771
+ }
772
+ }
773
+ },
774
+ },
775
+ 422: {
776
+ "description": "Validation error - invalid action format or values",
777
+ "content": {
778
+ "application/json": {
779
+ "example": {
780
+ "detail": [
781
+ {
782
+ "type": "string_too_short",
783
+ "loc": ["body", "action", "message"],
784
+ "msg": "String should have at least 1 character",
785
+ "input": "",
786
+ }
787
+ ]
788
+ }
789
+ }
790
+ },
791
+ },
792
+ 500: {
793
+ "description": "Internal server error during action execution"
794
+ },
795
+ },
796
+ )
797
+ async def step(request: StepRequest) -> StepResponse:
798
+ return await step_handler(request)
799
+
800
+ def get_state_handler() -> State:
801
+ _env = self._env_factory()
802
+ try:
803
+ return _env.state
804
+ finally:
805
+ _env.close()
806
+
807
+ def get_metadata_handler() -> EnvironmentMetadata:
808
+ _env = self._env_factory()
809
+ try:
810
+ return _env.get_metadata()
811
+ finally:
812
+ _env.close()
813
+
814
+ # Build list of GET endpoints based on mode
815
+ get_endpoints = [
816
+ GetEndpointConfig(
817
+ path="/metadata",
818
+ handler=get_metadata_handler,
819
+ response_model=EnvironmentMetadata,
820
+ tag="Environment Info",
821
+ summary="Get environment metadata",
822
+ description="""
823
+ Get metadata about this environment.
824
+
825
+ Returns information about the environment including name, description,
826
+ version, author, and documentation links.
827
+ """,
828
+ ),
829
+ GetEndpointConfig(
830
+ path="/health",
831
+ handler=lambda: HealthResponse(status=HealthStatus.HEALTHY),
832
+ response_model=HealthResponse,
833
+ tag="Health",
834
+ summary="Health check",
835
+ description="Check if the environment server is running and healthy.",
836
+ ),
837
+ ]
838
+
839
+ # Only register /state endpoint in simulation mode
840
+ if mode == ServerMode.SIMULATION:
841
+ get_endpoints.insert(
842
+ 0,
843
+ GetEndpointConfig(
844
+ path="/state",
845
+ handler=get_state_handler,
846
+ response_model=State,
847
+ tag="State Management",
848
+ summary="Get current environment state",
849
+ description="""
850
+ Retrieve the current internal state of the environment.
851
+
852
+ The structure of the state object is defined by the environment's State model.
853
+ """,
854
+ ),
855
+ )
856
+
857
+ register_get_endpoints(app, get_endpoints)
858
+
859
+ # Register combined schema endpoint
860
+ @app.get(
861
+ "/schema",
862
+ response_model=SchemaResponse,
863
+ tags=["Schema"],
864
+ summary="Get all JSON schemas",
865
+ description="""
866
+ Get JSON schemas for actions, observations, and state in a single response.
867
+
868
+ Returns a combined schema object containing:
869
+ - **action**: JSON schema for actions accepted by this environment
870
+ - **observation**: JSON schema for observations returned by this environment
871
+ - **state**: JSON schema for environment state objects
872
+
873
+ This is more efficient than calling individual schema endpoints and provides
874
+ all schema information needed to interact with the environment.
875
+ """,
876
+ responses={
877
+ 200: {
878
+ "description": "Combined schemas retrieved successfully",
879
+ "content": {
880
+ "application/json": {
881
+ "example": {
882
+ "action": {
883
+ "type": "object",
884
+ "properties": {"message": {"type": "string"}},
885
+ },
886
+ "observation": {
887
+ "type": "object",
888
+ "properties": {"response": {"type": "string"}},
889
+ },
890
+ "state": {
891
+ "type": "object",
892
+ "properties": {"step_count": {"type": "integer"}},
893
+ },
894
+ }
895
+ }
896
+ },
897
+ }
898
+ },
899
+ )
900
+ async def get_schemas() -> SchemaResponse:
901
+ """Return all schemas in one response."""
902
+ return SchemaResponse(
903
+ action=self.action_cls.model_json_schema(),
904
+ observation=self.observation_cls.model_json_schema(),
905
+ state=State.model_json_schema(),
906
+ )
907
+
908
+ # Register MCP endpoint for production mode (direct MCP access)
909
+ @app.post("/mcp")
910
+ async def mcp_endpoint(request_raw: Request) -> Dict[str, Any]:
911
+ """
912
+ MCP JSON-RPC endpoint for production mode.
913
+
914
+ Bypasses step() overhead and provides direct access to MCP tools.
915
+ Supports tools/list and tools/call methods.
916
+ """
917
+ # Parse JSON manually to handle parse errors gracefully
918
+ try:
919
+ body = await request_raw.body()
920
+ request_dict = json.loads(body)
921
+ request = JsonRpcRequest(**request_dict)
922
+ except json.JSONDecodeError:
923
+ return JsonRpcResponse.error_response(
924
+ JsonRpcErrorCode.PARSE_ERROR
925
+ ).model_dump()
926
+ except ValidationError as e:
927
+ return JsonRpcResponse.error_response(
928
+ JsonRpcErrorCode.INVALID_REQUEST,
929
+ f"Invalid request: {e}",
930
+ ).model_dump()
931
+ except Exception:
932
+ return JsonRpcResponse.error_response(
933
+ JsonRpcErrorCode.PARSE_ERROR
934
+ ).model_dump()
935
+
936
+ method = request.method
937
+ params = request.params
938
+ request_id = request.id
939
+
940
+ # Create a temporary environment for MCP access
941
+ _env = self._env_factory()
942
+
943
+ try:
944
+ # Check if environment supports MCP
945
+ if not hasattr(_env, "mcp_client") and not hasattr(_env, "mcp_server"):
946
+ return JsonRpcResponse.error_response(
947
+ JsonRpcErrorCode.INTERNAL_ERROR,
948
+ "Environment does not support MCP",
949
+ request_id=request_id,
950
+ ).model_dump()
951
+
952
+ if method == McpMethod.TOOLS_LIST:
953
+ # List tools from MCP server
954
+ if hasattr(_env, "mcp_client") and _env.mcp_client:
955
+ async with _env.mcp_client:
956
+ tools = await _env.mcp_client.list_tools()
957
+ return JsonRpcResponse.success(
958
+ result={
959
+ "tools": [
960
+ t.model_dump()
961
+ if hasattr(t, "model_dump")
962
+ else dict(t)
963
+ for t in tools
964
+ ]
965
+ },
966
+ request_id=request_id,
967
+ ).model_dump()
968
+ elif hasattr(_env, "mcp_server") and _env.mcp_server:
969
+ # Use server directly
970
+ tools = []
971
+ if hasattr(_env.mcp_server, "_tool_manager"):
972
+ tool_manager = _env.mcp_server._tool_manager
973
+ if hasattr(tool_manager, "_tools"):
974
+ for tool_name, tool in tool_manager._tools.items():
975
+ tool_dict = {
976
+ "name": tool.name,
977
+ "description": tool.description or "",
978
+ "inputSchema": tool.parameters or {},
979
+ }
980
+ tools.append(tool_dict)
981
+ return JsonRpcResponse.success(
982
+ result={"tools": tools},
983
+ request_id=request_id,
984
+ ).model_dump()
985
+ else:
986
+ return JsonRpcResponse.error_response(
987
+ JsonRpcErrorCode.INTERNAL_ERROR,
988
+ "MCP server not available",
989
+ request_id=request_id,
990
+ ).model_dump()
991
+
992
+ elif method == McpMethod.TOOLS_CALL:
993
+ tool_name = params.get("name")
994
+ arguments = params.get("arguments", {})
995
+
996
+ if not tool_name:
997
+ return JsonRpcResponse.error_response(
998
+ JsonRpcErrorCode.INVALID_PARAMS,
999
+ "Invalid params - 'name' is required",
1000
+ request_id=request_id,
1001
+ ).model_dump()
1002
+
1003
+ # Call tool via MCP
1004
+ if hasattr(_env, "mcp_client") and _env.mcp_client:
1005
+ async with _env.mcp_client:
1006
+ result = await _env.mcp_client.call_tool(
1007
+ name=tool_name, arguments=arguments
1008
+ )
1009
+ elif hasattr(_env, "mcp_server") and hasattr(
1010
+ _env.mcp_server, "_tool_manager"
1011
+ ):
1012
+ # Call tool directly on FastMCP server
1013
+ tool_manager = _env.mcp_server._tool_manager
1014
+ if tool_name in tool_manager._tools:
1015
+ tool = tool_manager._tools[tool_name]
1016
+ result = tool.fn(**arguments)
1017
+ else:
1018
+ return JsonRpcResponse.error_response(
1019
+ JsonRpcErrorCode.INVALID_PARAMS,
1020
+ f"Tool not found: {tool_name}",
1021
+ request_id=request_id,
1022
+ ).model_dump()
1023
+ else:
1024
+ return JsonRpcResponse.error_response(
1025
+ JsonRpcErrorCode.INTERNAL_ERROR,
1026
+ "MCP server not available",
1027
+ request_id=request_id,
1028
+ ).model_dump()
1029
+
1030
+ # Make result JSON serializable
1031
+ serializable_result = _make_json_serializable(result)
1032
+
1033
+ return JsonRpcResponse.success(
1034
+ result=serializable_result,
1035
+ request_id=request_id,
1036
+ ).model_dump()
1037
+
1038
+ else:
1039
+ return JsonRpcResponse.error_response(
1040
+ JsonRpcErrorCode.METHOD_NOT_FOUND,
1041
+ f"Method not found: {method}",
1042
+ request_id=request_id,
1043
+ ).model_dump()
1044
+
1045
+ except Exception as e:
1046
+ return JsonRpcResponse.error_response(
1047
+ JsonRpcErrorCode.INTERNAL_ERROR,
1048
+ str(e),
1049
+ request_id=request_id,
1050
+ ).model_dump()
1051
+ finally:
1052
+ _env.close()
1053
+
1054
+ # Register WebSocket endpoint for persistent sessions
1055
+ @app.websocket("/ws")
1056
+ async def websocket_endpoint(websocket: WebSocket):
1057
+ """
1058
+ WebSocket endpoint for persistent environment sessions.
1059
+
1060
+ Each WebSocket connection gets its own environment instance.
1061
+
1062
+ Message Protocol:
1063
+ - Client sends: WSResetMessage | WSStepMessage | WSStateMessage | WSCloseMessage
1064
+ - Server responds: WSObservationResponse | WSStateResponse | WSErrorResponse
1065
+ """
1066
+ await websocket.accept()
1067
+
1068
+ session_id = None
1069
+ session_env = None
1070
+
1071
+ try:
1072
+ # Create session with dedicated environment
1073
+ session_id, session_env = await self._create_session()
1074
+
1075
+ while True:
1076
+ # Receive message from client
1077
+ raw_message = await websocket.receive_text()
1078
+
1079
+ try:
1080
+ message_dict = json.loads(raw_message)
1081
+ except json.JSONDecodeError as e:
1082
+ error_resp = WSErrorResponse(
1083
+ data={
1084
+ "message": f"Invalid JSON: {e}",
1085
+ "code": WSErrorCode.INVALID_JSON,
1086
+ }
1087
+ )
1088
+ await websocket.send_text(error_resp.model_dump_json())
1089
+ continue
1090
+
1091
+ msg_type = message_dict.get("type", "")
1092
+
1093
+ try:
1094
+ match msg_type:
1095
+ case "reset":
1096
+ msg = WSResetMessage(**message_dict)
1097
+
1098
+ is_async = (
1099
+ session_env.reset_async.__func__
1100
+ is not Environment.reset_async
1101
+ )
1102
+
1103
+ if is_async:
1104
+ sig = inspect.signature(session_env.reset_async)
1105
+ valid_kwargs = self._get_valid_kwargs(sig, msg.data)
1106
+ observation = await session_env.reset_async(
1107
+ **valid_kwargs
1108
+ )
1109
+ else:
1110
+ sig = inspect.signature(session_env.reset)
1111
+ valid_kwargs = self._get_valid_kwargs(sig, msg.data)
1112
+ observation = await self._run_in_session_executor(
1113
+ session_id, session_env.reset, **valid_kwargs
1114
+ )
1115
+
1116
+ self._update_session_activity(session_id)
1117
+
1118
+ response = WSObservationResponse(
1119
+ data=serialize_observation(observation),
1120
+ )
1121
+
1122
+ case "step":
1123
+ msg = WSStepMessage(**message_dict)
1124
+ action = deserialize_action(msg.data, self.action_cls)
1125
+
1126
+ is_async = (
1127
+ session_env.step_async.__func__
1128
+ is not Environment.step_async
1129
+ )
1130
+
1131
+ if is_async:
1132
+ observation = await session_env.step_async(action)
1133
+ else:
1134
+ observation = await self._run_in_session_executor(
1135
+ session_id, session_env.step, action
1136
+ )
1137
+
1138
+ self._update_session_activity(
1139
+ session_id, increment_step=True
1140
+ )
1141
+
1142
+ response = WSObservationResponse(
1143
+ data=serialize_observation(observation)
1144
+ )
1145
+
1146
+ case "state":
1147
+ msg = WSStateMessage(**message_dict)
1148
+ state = session_env.state
1149
+ if hasattr(state, "model_dump"):
1150
+ state_data = state.model_dump()
1151
+ else:
1152
+ state_data = dict(state) if state else {}
1153
+
1154
+ response = WSStateResponse(data=state_data)
1155
+
1156
+ case "close":
1157
+ msg = WSCloseMessage(**message_dict)
1158
+ break
1159
+
1160
+ case "mcp":
1161
+ msg = WSMCPMessage(**message_dict)
1162
+ try:
1163
+ rpc_request = JsonRpcRequest(**msg.data)
1164
+ except (ValidationError, Exception) as e:
1165
+ rpc_response = JsonRpcResponse.error_response(
1166
+ JsonRpcErrorCode.INVALID_REQUEST,
1167
+ f"Invalid request: {e}",
1168
+ )
1169
+ else:
1170
+ rpc_response = await mcp_handler(
1171
+ rpc_request,
1172
+ session_env=session_env,
1173
+ )
1174
+ response = WSMCPResponse(data=rpc_response.model_dump())
1175
+
1176
+ case _:
1177
+ response = WSErrorResponse(
1178
+ data={
1179
+ "message": f"Unknown message type: {msg_type}",
1180
+ "code": WSErrorCode.UNKNOWN_TYPE,
1181
+ }
1182
+ )
1183
+
1184
+ await websocket.send_text(response.model_dump_json())
1185
+
1186
+ except ValidationError as e:
1187
+ error_resp = WSErrorResponse(
1188
+ data={
1189
+ "message": "Invalid message",
1190
+ "code": WSErrorCode.VALIDATION_ERROR,
1191
+ "errors": e.errors(),
1192
+ }
1193
+ )
1194
+ await websocket.send_text(error_resp.model_dump_json())
1195
+ except Exception as e:
1196
+ error_resp = WSErrorResponse(
1197
+ data={
1198
+ "message": str(e),
1199
+ "code": WSErrorCode.EXECUTION_ERROR,
1200
+ }
1201
+ )
1202
+ await websocket.send_text(error_resp.model_dump_json())
1203
+
1204
+ except WebSocketDisconnect:
1205
+ pass
1206
+ except SessionCapacityError as e:
1207
+ error_resp = WSErrorResponse(
1208
+ data={
1209
+ "message": str(e),
1210
+ "code": WSErrorCode.CAPACITY_REACHED,
1211
+ "active_sessions": e.active_sessions,
1212
+ "max_sessions": e.max_sessions,
1213
+ }
1214
+ )
1215
+ await websocket.send_text(error_resp.model_dump_json())
1216
+ except EnvironmentFactoryError as e:
1217
+ error_resp = WSErrorResponse(
1218
+ data={
1219
+ "message": str(e),
1220
+ "code": WSErrorCode.FACTORY_ERROR,
1221
+ "factory_name": e.factory_name,
1222
+ }
1223
+ )
1224
+ await websocket.send_text(error_resp.model_dump_json())
1225
+ except Exception as e:
1226
+ error_resp = WSErrorResponse(
1227
+ data={"message": str(e), "code": WSErrorCode.SESSION_ERROR}
1228
+ )
1229
+ await websocket.send_text(error_resp.model_dump_json())
1230
+ finally:
1231
+ if session_id:
1232
+ await self._destroy_session(session_id)
1233
+ try:
1234
+ await websocket.close()
1235
+ except RuntimeError:
1236
+ pass
1237
+
1238
+
1239
+ def create_app(
1240
+ env: Callable[[], Environment],
1241
+ action_cls: Type[Action],
1242
+ observation_cls: Type[Observation],
1243
+ env_name: Optional[str] = None,
1244
+ max_concurrent_envs: Optional[int] = None,
1245
+ concurrency_config: Optional[ConcurrencyConfig] = None,
1246
+ gradio_builder: Optional[Callable[..., Any]] = None,
1247
+ ) -> FastAPI:
1248
+ """
1249
+ Create a FastAPI application with or without web interface.
1250
+
1251
+ This function creates a FastAPI app with the web interface enabled by default,
1252
+ including README integration for better user experience.
1253
+
1254
+ Args:
1255
+ env: Environment factory (callable) that creates new instances
1256
+ action_cls: The Action subclass this environment expects
1257
+ observation_cls: The Observation subclass this environment returns
1258
+ env_name: Optional environment name for README loading
1259
+ max_concurrent_envs: Maximum concurrent WebSocket sessions.
1260
+ Mutually exclusive with concurrency_config.
1261
+ concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
1262
+ Mutually exclusive with max_concurrent_envs.
1263
+ gradio_builder: Optional callable to build a custom Gradio UI at /web.
1264
+ Signature: (web_manager, action_fields, metadata, is_chat_env, title,
1265
+ quick_start_md) -> gr.Blocks. When None, the default Gradio app is used.
1266
+ See docs/customizing-web-ui.md.
1267
+
1268
+ Returns:
1269
+ FastAPI application instance with or without web interface and README integration
1270
+ """
1271
+ # Check if web interface should be enabled
1272
+ # This can be controlled via environment variable or build argument
1273
+ enable_web = os.getenv("ENABLE_WEB_INTERFACE", "false").lower() in (
1274
+ "true",
1275
+ "1",
1276
+ "yes",
1277
+ )
1278
+
1279
+ if enable_web:
1280
+ # Gradio-based web UI (gradio is a core dependency)
1281
+ from .web_interface import create_web_interface_app
1282
+
1283
+ return create_web_interface_app(
1284
+ env,
1285
+ action_cls,
1286
+ observation_cls,
1287
+ env_name,
1288
+ max_concurrent_envs,
1289
+ concurrency_config,
1290
+ gradio_builder=gradio_builder,
1291
+ )
1292
+ else:
1293
+ # Use standard FastAPI app without web interface
1294
+ return create_fastapi_app(
1295
+ env, action_cls, observation_cls, max_concurrent_envs, concurrency_config
1296
+ )
1297
+
1298
+
1299
+ def create_fastapi_app(
1300
+ env: Callable[[], Environment],
1301
+ action_cls: Type[Action],
1302
+ observation_cls: Type[Observation],
1303
+ max_concurrent_envs: Optional[int] = None,
1304
+ concurrency_config: Optional[ConcurrencyConfig] = None,
1305
+ ) -> FastAPI:
1306
+ """
1307
+ Create a FastAPI application with comprehensive documentation.
1308
+
1309
+ Args:
1310
+ env: Environment factory (callable) that creates new instances
1311
+ action_cls: The Action subclass this environment expects
1312
+ observation_cls: The Observation subclass this environment returns
1313
+ max_concurrent_envs: Maximum concurrent WebSocket sessions.
1314
+ Mutually exclusive with concurrency_config.
1315
+ concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
1316
+ Mutually exclusive with max_concurrent_envs.
1317
+
1318
+ Returns:
1319
+ FastAPI application instance
1320
+ """
1321
+ try:
1322
+ from fastapi import FastAPI
1323
+ except ImportError:
1324
+ raise ImportError(
1325
+ "FastAPI is required. Install with: pip install fastapi uvicorn"
1326
+ )
1327
+
1328
+ app = FastAPI(
1329
+ title="OpenEnv Environment HTTP API",
1330
+ version="1.0.0",
1331
+ description="""
1332
+ # OpenEnv Environment HTTP API
1333
+
1334
+ HTTP API for interacting with OpenEnv environments through a standardized interface.
1335
+
1336
+ ## Features
1337
+
1338
+ * **Environment Reset**: Initialize or restart episodes
1339
+ * **Action Execution**: Send actions and receive observations
1340
+ * **State Inspection**: Query current environment state
1341
+ * **Schema Access**: Retrieve JSON schemas for actions and observations
1342
+
1343
+ ## Workflow
1344
+
1345
+ 1. Call `/reset` to start a new episode and get initial observation
1346
+ 2. Call `/step` repeatedly with actions to interact with environment
1347
+ 3. Episode ends when observation returns `done: true`
1348
+ 4. Call `/state` anytime to inspect current environment state
1349
+
1350
+ ## Documentation
1351
+
1352
+ * **Swagger UI**: Available at `/docs`
1353
+ * **ReDoc**: Available at `/redoc`
1354
+ * **OpenAPI Schema**: Available at `/openapi.json`
1355
+ """,
1356
+ openapi_tags=[
1357
+ {
1358
+ "name": "Environment Control",
1359
+ "description": "Core operations for environment interaction (reset, step)",
1360
+ },
1361
+ {
1362
+ "name": "State Management",
1363
+ "description": "Operations for inspecting environment state",
1364
+ },
1365
+ {
1366
+ "name": "Environment Info",
1367
+ "description": "Information about the environment",
1368
+ },
1369
+ {
1370
+ "name": "Schema",
1371
+ "description": "JSON Schema endpoints for actions, observations, and state",
1372
+ },
1373
+ {"name": "Health", "description": "Service health and status checks"},
1374
+ ],
1375
+ docs_url="/docs",
1376
+ redoc_url="/redoc",
1377
+ openapi_url="/openapi.json",
1378
+ contact={
1379
+ "name": "OpenEnv Team",
1380
+ "url": "https://github.com/meta-pytorch/OpenEnv",
1381
+ },
1382
+ license_info={
1383
+ "name": "BSD-3-Clause",
1384
+ "url": "https://github.com/meta-pytorch/OpenEnv/blob/main/LICENSE",
1385
+ },
1386
+ )
1387
+
1388
+ server = HTTPEnvServer(
1389
+ env,
1390
+ action_cls,
1391
+ observation_cls,
1392
+ max_concurrent_envs,
1393
+ concurrency_config=concurrency_config,
1394
+ )
1395
+ server.register_routes(app)
1396
+ return app
src/core/openenv/core/env_server/interfaces.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import inspect
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any, Generic, Optional, Protocol, TypedDict, TypeVar, TYPE_CHECKING
10
+
11
+ from .types import Action, Observation, State, EnvironmentMetadata
12
+
13
+ if TYPE_CHECKING:
14
+ from openenv.core.rubrics import Rubric
15
+
16
+ ActT = TypeVar("ActT", bound=Action)
17
+ ObsT = TypeVar("ObsT", bound=Observation)
18
+ StateT = TypeVar("StateT", bound=State)
19
+
20
+
21
+ class Message(TypedDict):
22
+ """A message in a conversation.
23
+
24
+ Compatible with Huggingface chat template format.
25
+ """
26
+
27
+ role: str
28
+ content: str
29
+
30
+
31
+ class ModelTokenizer(Protocol):
32
+ """Protocol for tokenizers that support chat templates.
33
+
34
+ This protocol defines the interface that tokenizers must implement
35
+ to work with chat-based environments. It's compatible with
36
+ Huggingface transformers tokenizers.
37
+ """
38
+
39
+ def apply_chat_template(
40
+ self,
41
+ conversation: list[Message],
42
+ tokenize: bool = True,
43
+ return_tensors: str | None = None,
44
+ **kwargs: Any,
45
+ ) -> Any:
46
+ """Apply a chat template to format and optionally tokenize a conversation.
47
+
48
+ Args:
49
+ conversation: List of message dictionaries with 'role' and 'content'
50
+ tokenize: Whether to tokenize the output
51
+ return_tensors: Format for returned tensors ('pt' for PyTorch)
52
+ **kwargs: Additional arguments
53
+
54
+ Returns:
55
+ Formatted and optionally tokenized conversation
56
+ """
57
+ ...
58
+
59
+ def decode(
60
+ self, token_ids: Any, skip_special_tokens: bool = False, **kwargs: Any
61
+ ) -> str:
62
+ """Decode token IDs back to text.
63
+
64
+ Args:
65
+ token_ids: Token IDs to decode
66
+ skip_special_tokens: Whether to skip special tokens in output
67
+ **kwargs: Additional arguments
68
+
69
+ Returns:
70
+ Decoded text string
71
+ """
72
+ ...
73
+
74
+
75
+ class Transform(ABC, Generic[ObsT]):
76
+ """Transform observations to add rewards, metrics, or other modifications.
77
+
78
+ Transforms follow the TorchRL pattern where they take an observation
79
+ and return a (potentially modified) observation. This allows for
80
+ flexible reward computation and observation augmentation.
81
+ """
82
+
83
+ @abstractmethod
84
+ def __call__(self, observation: ObsT) -> ObsT:
85
+ """Transform an observation.
86
+
87
+ Args:
88
+ observation: The input observation
89
+
90
+ Returns:
91
+ The transformed observation
92
+ """
93
+ pass
94
+
95
+
96
+ class Environment(ABC, Generic[ActT, ObsT, StateT]):
97
+ """Base class for all environment servers following Gym/Gymnasium API.
98
+
99
+ Args:
100
+ transform: Optional transform to apply to observations
101
+ rubric: Optional rubric for reward computation. When provided, the
102
+ rubric's output can be used to set the observation's reward in step().
103
+
104
+ Class Attributes:
105
+ SUPPORTS_CONCURRENT_SESSIONS: Whether this environment supports concurrent sessions.
106
+ When True, multiple WebSocket connections can each have their own
107
+ environment instance (up to max_concurrent_envs). When False (default),
108
+ the environment should only be used with a single session at a time.
109
+
110
+ Set this to True in your Environment subclass if:
111
+ - The environment uses proper session isolation (e.g., unique working dirs)
112
+ - No shared mutable state exists between instances
113
+ - External resources (databases, APIs) can handle concurrent access
114
+
115
+ Attributes:
116
+ rubric: Optional rubric for computing rewards. Environments can set this
117
+ in __init__ and use it in step() to compute observation rewards.
118
+ Training infrastructure can access it for introspection:
119
+ for name, r in env.rubric.named_rubrics():
120
+ print(f"{name}: {r.last_score}")
121
+
122
+ See RFC 004 for rubric design: rfcs/004-rubrics.md
123
+ """
124
+
125
+ # Class-level flag indicating whether this environment supports concurrent sessions
126
+ SUPPORTS_CONCURRENT_SESSIONS: bool = False
127
+
128
+ # Optional rubric for reward computation
129
+ rubric: Optional["Rubric"]
130
+
131
+ def __init__(
132
+ self,
133
+ transform: Optional[Transform[ObsT]] = None,
134
+ rubric: Optional["Rubric"] = None,
135
+ ):
136
+ self.transform = transform
137
+ self.rubric = rubric
138
+
139
+ @abstractmethod
140
+ def reset(
141
+ self,
142
+ seed: Optional[int] = None,
143
+ episode_id: Optional[str] = None,
144
+ **kwargs: Any,
145
+ ) -> ObsT:
146
+ """Reset the environment and return initial observation."""
147
+ pass
148
+
149
+ async def reset_async(
150
+ self,
151
+ seed: Optional[int] = None,
152
+ episode_id: Optional[str] = None,
153
+ **kwargs: Any,
154
+ ) -> ObsT:
155
+ """Async version of reset. Default implementation calls sync reset.
156
+
157
+ Override to provide true async implementation.
158
+ """
159
+ return self.reset(seed=seed, episode_id=episode_id, **kwargs)
160
+
161
+ @abstractmethod
162
+ def step(
163
+ self,
164
+ action: ActT,
165
+ timeout_s: Optional[float] = None,
166
+ **kwargs: Any,
167
+ ) -> ObsT:
168
+ """Take a step in the environment."""
169
+ pass
170
+
171
+ async def step_async(
172
+ self,
173
+ action: ActT,
174
+ timeout_s: Optional[float] = None,
175
+ **kwargs: Any,
176
+ ) -> ObsT:
177
+ """Async version of step. Default implementation calls sync step.
178
+
179
+ Override to provide true async implementation.
180
+ """
181
+ return self.step(action, timeout_s=timeout_s, **kwargs)
182
+
183
+ @property
184
+ @abstractmethod
185
+ def state(self) -> StateT:
186
+ """Get the current environment state."""
187
+ pass
188
+
189
+ def get_metadata(self) -> EnvironmentMetadata:
190
+ """
191
+ Get metadata about this environment.
192
+
193
+ Override this method to provide custom metadata for the environment.
194
+ Default implementation returns basic metadata derived from class name.
195
+
196
+ Returns:
197
+ EnvironmentMetadata with environment information
198
+ """
199
+ return EnvironmentMetadata(
200
+ name=self.__class__.__name__,
201
+ description=f"{self.__class__.__name__} environment",
202
+ version="1.0.0",
203
+ )
204
+
205
+ def _apply_transform(self, observation: ObsT) -> ObsT:
206
+ """Apply transform if one is provided."""
207
+ if self.transform is not None:
208
+ return self.transform(observation)
209
+ return observation
210
+
211
+ def _apply_rubric(self, action: ActT, observation: ObsT) -> float:
212
+ """Apply rubric if one is provided.
213
+
214
+ Args:
215
+ action: The action taken by the agent.
216
+ observation: The resulting observation.
217
+
218
+ Returns:
219
+ Reward value from the rubric, or 0.0 if no rubric is set.
220
+
221
+ Usage in step():
222
+ def step(self, action: MyAction, ...) -> MyObservation:
223
+ # ... execute action and create observation ...
224
+ observation.reward = self._apply_rubric(action, observation)
225
+ return observation
226
+ """
227
+ if self.rubric is not None:
228
+ return self.rubric(action, observation)
229
+ return 0.0
230
+
231
+ async def _apply_rubric_async(self, action: ActT, observation: ObsT) -> float:
232
+ """Apply rubric asynchronously if one is provided.
233
+
234
+ Args:
235
+ action: The action taken by the agent.
236
+ observation: The resulting observation.
237
+
238
+ Returns:
239
+ Reward value from the rubric, or 0.0 if no rubric is set.
240
+
241
+ Usage in step_async():
242
+ async def step_async(self, action: MyAction, ...) -> MyObservation:
243
+ # ... execute action and create observation ...
244
+ observation.reward = await self._apply_rubric_async(action, observation)
245
+ return observation
246
+ """
247
+ if self.rubric is not None:
248
+ result = self.rubric(action, observation)
249
+ # If rubric returns a coroutine, await it
250
+ if inspect.iscoroutine(result):
251
+ return await result
252
+ return result
253
+ return 0.0
254
+
255
+ def _reset_rubric(self) -> None:
256
+ """Reset the rubric state if one is provided.
257
+
258
+ Call this in reset() to clear any trajectory state in the rubric.
259
+
260
+ Usage in reset():
261
+ def reset(self, ...) -> MyObservation:
262
+ self._reset_rubric()
263
+ # ... create initial observation ...
264
+ return observation
265
+ """
266
+ if self.rubric is not None:
267
+ self.rubric.reset()
268
+
269
+ async def _reset_rubric_async(self) -> None:
270
+ """Reset the rubric state asynchronously if one is provided.
271
+
272
+ Call this in reset_async() to clear any trajectory state in the rubric.
273
+
274
+ Usage in reset_async():
275
+ async def reset_async(self, ...) -> MyObservation:
276
+ await self._reset_rubric_async()
277
+ # ... create initial observation ...
278
+ return observation
279
+ """
280
+ if self.rubric is not None:
281
+ # Check if rubric has async reset method
282
+ if hasattr(self.rubric, "reset_async"):
283
+ result = self.rubric.reset_async()
284
+ if inspect.iscoroutine(result):
285
+ await result
286
+ else:
287
+ self.rubric.reset()
288
+
289
+ def close(self) -> None:
290
+ """Clean up resources used by the environment.
291
+
292
+ Override this method to implement custom cleanup logic.
293
+ Called when the environment is being destroyed or reset.
294
+ """
295
+ pass
src/core/openenv/core/env_server/mcp_environment.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ MCP Environment base class for OpenEnv.
9
+
10
+ This module provides the MCPEnvironment base class that integrates FastMCP servers
11
+ with OpenEnv's Gym-style Environment interface. It handles MCP tool discovery
12
+ and invocation through the step() API, following RFC 003.
13
+
14
+ Key features:
15
+ - Automatic routing of ListToolsAction and CallToolAction to MCP server
16
+ - Reserved tool name validation (reset, step, state, close are protected)
17
+ - Timeout handling for tool calls
18
+ - Proper error categorization (tool not found, execution errors, timeouts)
19
+ - Mode-aware tool registration (production vs simulation)
20
+ - Code mode support via get_callables() and execute_code()
21
+
22
+ Usage:
23
+ from fastmcp import FastMCP
24
+ from openenv.core.env_server.mcp_environment import MCPEnvironment
25
+
26
+ class MyMCPEnv(MCPEnvironment):
27
+ def __init__(self):
28
+ mcp = FastMCP("my-server")
29
+
30
+ # Register mode-specific tools
31
+ @self.tool(mode="production")
32
+ def my_tool(arg: str) -> str:
33
+ return f"Production: {arg}"
34
+
35
+ @self.tool(mode="simulation")
36
+ def my_tool(arg: str) -> str:
37
+ return f"Simulation: {arg}"
38
+
39
+ super().__init__(mcp)
40
+
41
+ def reset(self, seed=None, episode_id=None, **kwargs):
42
+ # Reset logic here
43
+ ...
44
+
45
+ def _step_impl(self, action):
46
+ # Handle non-MCP actions
47
+ ...
48
+
49
+ @property
50
+ def state(self):
51
+ # Return current state
52
+ ...
53
+ """
54
+
55
+ import asyncio
56
+ import inspect
57
+ from abc import abstractmethod
58
+ from collections import defaultdict
59
+ from typing import Any, Callable, Dict, Optional
60
+
61
+ from fastmcp import Client
62
+ from fastmcp.client.client import CallToolResult
63
+ from mcp.types import TextContent
64
+
65
+ from ..utils import run_async_safely
66
+ from .interfaces import Environment
67
+ from .mcp_types import (
68
+ CallToolAction,
69
+ CallToolObservation,
70
+ ListToolsAction,
71
+ ListToolsObservation,
72
+ RESERVED_TOOL_NAMES,
73
+ Tool,
74
+ ToolError,
75
+ ToolErrorType,
76
+ )
77
+ from .types import Action, Observation
78
+
79
+
80
+ # Default timeout for MCP tool calls in seconds
81
+ MCP_TOOL_CALL_TIMEOUT = 30.0
82
+
83
+ # Valid modes for tool registration
84
+ VALID_MODES = {"production", "simulation"}
85
+
86
+
87
+ class MCPEnvironment(Environment):
88
+ """
89
+ Base class for environments that expose tools via MCP (Model Context Protocol).
90
+
91
+ MCPEnvironment bridges FastMCP servers with OpenEnv's Gym-style API, allowing
92
+ agents to discover and invoke MCP tools through the standard step() interface.
93
+
94
+ The class automatically handles:
95
+ - ListToolsAction: Returns available tools from the MCP server
96
+ - CallToolAction: Invokes a specific tool with arguments
97
+
98
+ All other actions are delegated to the abstract _step_impl() method,
99
+ which subclasses must implement.
100
+
101
+ Args:
102
+ mcp_server: A FastMCP server instance containing tool definitions.
103
+ The server's tools will be validated against reserved names.
104
+ transform: Optional transform to apply to observations (inherited from Environment).
105
+
106
+ Raises:
107
+ ValueError: If any tool in the MCP server uses a reserved name
108
+ (reset, step, state, close).
109
+
110
+ Example:
111
+ >>> from fastmcp import FastMCP
112
+ >>> mcp = FastMCP("calculator")
113
+ >>> @mcp.tool()
114
+ ... def add(a: int, b: int) -> int:
115
+ ... return a + b
116
+ >>> env = MyMCPEnvironment(mcp)
117
+ >>> obs = env.step(ListToolsAction())
118
+ >>> obs.tools[0].name
119
+ 'add'
120
+ """
121
+
122
+ def __init__(self, mcp_server: Any, transform: Optional[Any] = None) -> None:
123
+ """
124
+ Initialize the MCP environment.
125
+
126
+ Args:
127
+ mcp_server: A FastMCP server instance with tool definitions.
128
+ transform: Optional transform to apply to observations.
129
+
130
+ Raises:
131
+ ValueError: If any tool uses a reserved name (reset, step, state, close).
132
+ """
133
+ super().__init__(transform=transform)
134
+
135
+ # Validate tool names before storing
136
+ self._validate_tool_names(mcp_server)
137
+
138
+ self.mcp_server = mcp_server
139
+ self.mcp_client = Client(mcp_server)
140
+
141
+ # Track mode-specific tools: {tool_name: {mode: func}}
142
+ # mode can be "production", "simulation", or None (available in all modes)
143
+ self._mode_tools = defaultdict(dict)
144
+
145
+ # Track tool schemas for list_tools: {tool_name: {mode: schema}}
146
+ self._mode_tool_schemas = defaultdict(dict)
147
+
148
+ @property
149
+ def supports_code_mode(self) -> bool:
150
+ """Check if this environment supports code mode (execute_code)."""
151
+ return True
152
+
153
+ def get_callables(self) -> Dict[str, Callable]:
154
+ """
155
+ Get callable functions for code mode.
156
+
157
+ Returns tool functions as direct Python callables, enabling code mode
158
+ where agents write Python code that calls tools directly (no JSON-RPC
159
+ overhead). Mode-specific tools are filtered by the current mode.
160
+
161
+ Returns:
162
+ Dictionary mapping tool names to callables.
163
+ """
164
+ callables: Dict[str, Callable] = {}
165
+ current_mode = getattr(self, "_mode", None)
166
+
167
+ # Extract callables from FastMCP server's tool manager
168
+ if (
169
+ hasattr(self.mcp_server, "_tool_manager")
170
+ and hasattr(self.mcp_server._tool_manager, "_tools")
171
+ and isinstance(getattr(self.mcp_server._tool_manager, "_tools", None), dict)
172
+ ):
173
+ for tool_name, tool in self.mcp_server._tool_manager._tools.items():
174
+ if hasattr(tool, "fn") and callable(tool.fn):
175
+ callables[tool_name] = tool.fn
176
+
177
+ # Add mode-specific tools available in current mode
178
+ for tool_name, mode_funcs in self._mode_tools.items():
179
+ if None in mode_funcs:
180
+ # Tool available in all modes (already in FastMCP if registered there)
181
+ if tool_name not in callables:
182
+ callables[tool_name] = mode_funcs[None]
183
+ elif current_mode in mode_funcs:
184
+ # Tool available in current mode only
185
+ callables[tool_name] = mode_funcs[current_mode]
186
+
187
+ return callables
188
+
189
+ def execute_code(self, code: str) -> Observation:
190
+ """
191
+ Execute Python code with tools available as callables.
192
+
193
+ This enables the CodeAct pattern where agents write Python code
194
+ that calls tools directly as functions, avoiding JSON-RPC overhead.
195
+
196
+ Args:
197
+ code: Python code to execute. Tools are available as functions
198
+ in the execution namespace. Set a variable named 'result'
199
+ to capture the return value.
200
+
201
+ Returns:
202
+ Observation with result in metadata["result"] or error in
203
+ metadata["error"].
204
+ """
205
+ namespace = self.get_callables()
206
+
207
+ result_dict: Dict[str, Any] = {}
208
+ try:
209
+ exec(code, namespace, result_dict)
210
+ result = result_dict.get("result")
211
+ return Observation(done=False, reward=0.0, metadata={"result": result})
212
+ except SyntaxError as e:
213
+ return Observation(
214
+ done=False, reward=0.0, metadata={"error": f"Syntax error: {str(e)}"}
215
+ )
216
+ except Exception as e:
217
+ return Observation(done=False, reward=0.0, metadata={"error": str(e)})
218
+
219
+ def _validate_tool_names(self, mcp_server: Any) -> None:
220
+ """
221
+ Validate that no tools use reserved names.
222
+
223
+ Reserved names (reset, step, state, close) are protected to maintain
224
+ the dual API boundary between infrastructure and agent APIs.
225
+
226
+ Args:
227
+ mcp_server: The FastMCP server to validate.
228
+
229
+ Raises:
230
+ ValueError: If any tool uses a reserved name.
231
+ """
232
+ # FastMCP stores tools in _tool_manager._tools dict
233
+ if hasattr(mcp_server, "_tool_manager"):
234
+ tool_manager = mcp_server._tool_manager
235
+ # Check both possible attribute names for tools storage
236
+ tools_dict = None
237
+ if hasattr(tool_manager, "_tools"):
238
+ tools_dict = tool_manager._tools
239
+ elif hasattr(tool_manager, "tools"):
240
+ tools_dict = tool_manager.tools
241
+
242
+ if tools_dict:
243
+ tool_names = set(tools_dict.keys())
244
+ conflicts = tool_names & RESERVED_TOOL_NAMES
245
+ if conflicts:
246
+ raise ValueError(
247
+ f"MCP tools cannot use reserved names: {sorted(conflicts)}. "
248
+ f"Reserved names are: {sorted(RESERVED_TOOL_NAMES)}"
249
+ )
250
+
251
+ def tool(self, mode: Optional[str] = None) -> Callable:
252
+ """
253
+ Decorator for registering mode-aware tools.
254
+
255
+ Args:
256
+ mode: Optional mode for the tool ("production" or "simulation").
257
+ If None, tool is available in all modes.
258
+
259
+ Returns:
260
+ A decorator function for registering tools.
261
+
262
+ Raises:
263
+ ValueError: If mode is not None, "production", or "simulation".
264
+ """
265
+ if mode is not None and mode not in VALID_MODES:
266
+ raise ValueError(
267
+ f"Invalid mode '{mode}'. Mode must be 'production', 'simulation', or None."
268
+ )
269
+
270
+ def decorator(func: Callable) -> Callable:
271
+ tool_name = func.__name__
272
+ # Validate tool name is not reserved
273
+ if tool_name in RESERVED_TOOL_NAMES:
274
+ raise ValueError(
275
+ f"Tool name '{tool_name}' is reserved and cannot be used. "
276
+ f"Reserved names are: {sorted(RESERVED_TOOL_NAMES)}"
277
+ )
278
+
279
+ # If mode is None, register with FastMCP as usual
280
+ if mode is None:
281
+ decorated_func = self.mcp_server.tool()(func)
282
+ self._mode_tools[tool_name][None] = func
283
+ return decorated_func
284
+
285
+ # For mode-specific tools, don't register with FastMCP
286
+ # Instead, track them ourselves
287
+ self._mode_tools[tool_name][mode] = func
288
+
289
+ # Extract schema information from function signature
290
+ sig = inspect.signature(func)
291
+ schema = {
292
+ "type": "object",
293
+ "properties": {},
294
+ "required": [],
295
+ }
296
+
297
+ for param_name, param in sig.parameters.items():
298
+ # Get type annotation
299
+ param_type = param.annotation
300
+ json_type = "string" # default
301
+ if param_type in (int, "int"):
302
+ json_type = "integer"
303
+ elif param_type in (float, "float"):
304
+ json_type = "number"
305
+ elif param_type in (bool, "bool"):
306
+ json_type = "boolean"
307
+
308
+ schema["properties"][param_name] = {"type": json_type}
309
+
310
+ # If no default value, it's required
311
+ if param.default == inspect.Parameter.empty:
312
+ schema["required"].append(param_name)
313
+
314
+ # Store the schema for this mode-specific tool
315
+ self._mode_tool_schemas[tool_name][mode] = {
316
+ "name": tool_name,
317
+ "description": func.__doc__ or "",
318
+ "input_schema": schema,
319
+ }
320
+
321
+ return func
322
+
323
+ return decorator
324
+
325
+ def step(
326
+ self,
327
+ action: Action,
328
+ timeout_s: Optional[float] = None,
329
+ **kwargs: Any,
330
+ ) -> Observation:
331
+ """
332
+ Execute an action in the environment.
333
+
334
+ This method routes MCP-specific actions (ListToolsAction, CallToolAction)
335
+ to the appropriate handlers, while delegating all other actions to
336
+ the subclass's _step_impl() method.
337
+
338
+ Args:
339
+ action: The action to execute. Can be:
340
+ - ListToolsAction: Returns available MCP tools
341
+ - CallToolAction: Invokes a specific MCP tool
342
+ - Any other Action: Delegated to _step_impl()
343
+ timeout_s: Optional timeout in seconds for the action.
344
+ Defaults to MCP_TOOL_CALL_TIMEOUT (30s) for MCP actions.
345
+ **kwargs: Additional arguments passed to handlers.
346
+
347
+ Returns:
348
+ Observation appropriate to the action type:
349
+ - ListToolsObservation for ListToolsAction
350
+ - CallToolObservation for CallToolAction
351
+ - Subclass-defined Observation for other actions
352
+ """
353
+ if isinstance(action, ListToolsAction):
354
+ return self._handle_list_tools()
355
+ elif isinstance(action, CallToolAction):
356
+ return self._handle_call_tool(action, timeout_s=timeout_s)
357
+ else:
358
+ return self._step_impl(action, timeout_s=timeout_s, **kwargs)
359
+
360
+ def _handle_list_tools(self) -> ListToolsObservation:
361
+ """
362
+ Handle a ListToolsAction by querying the MCP server.
363
+
364
+ Returns:
365
+ ListToolsObservation containing all available tools with their
366
+ names, descriptions, and input schemas, filtered by current mode.
367
+ """
368
+ try:
369
+ # Get current mode
370
+ current_mode = getattr(self, "_mode", None)
371
+
372
+ # Start with tools from FastMCP server (mode=None tools)
373
+ tools_result = run_async_safely(self._async_list_tools())
374
+
375
+ # Build list of Tool objects
376
+ tools = []
377
+
378
+ # Add FastMCP tools that are not mode-specific
379
+ for tool in tools_result:
380
+ if tool.name not in self._mode_tool_schemas:
381
+ tools.append(
382
+ Tool(
383
+ name=tool.name,
384
+ description=tool.description or "",
385
+ input_schema=tool.inputSchema
386
+ if hasattr(tool, "inputSchema")
387
+ else {},
388
+ )
389
+ )
390
+
391
+ # Add mode-specific tools available in current mode
392
+ for tool_name, mode_schemas in self._mode_tool_schemas.items():
393
+ if None in mode_schemas:
394
+ # Tool available in all modes
395
+ schema = mode_schemas[None]
396
+ tools.append(
397
+ Tool(
398
+ name=schema["name"],
399
+ description=schema["description"],
400
+ input_schema=schema["input_schema"],
401
+ )
402
+ )
403
+ elif current_mode in mode_schemas:
404
+ # Tool available in current mode
405
+ schema = mode_schemas[current_mode]
406
+ tools.append(
407
+ Tool(
408
+ name=schema["name"],
409
+ description=schema["description"],
410
+ input_schema=schema["input_schema"],
411
+ )
412
+ )
413
+
414
+ return ListToolsObservation(tools=tools)
415
+
416
+ except Exception as e:
417
+ # Return an observation with error in metadata
418
+ return ListToolsObservation(
419
+ tools=[],
420
+ metadata={
421
+ "error": str(e),
422
+ "error_type": "list_tools_failed",
423
+ },
424
+ )
425
+
426
+ async def _async_list_tools(self) -> list:
427
+ """
428
+ Async helper to list tools from the MCP client.
429
+
430
+ Returns:
431
+ List of tool objects from the MCP server.
432
+ """
433
+ async with self.mcp_client:
434
+ return await self.mcp_client.list_tools()
435
+
436
+ def _handle_call_tool(
437
+ self,
438
+ action: CallToolAction,
439
+ timeout_s: Optional[float] = None,
440
+ ) -> CallToolObservation:
441
+ """
442
+ Handle a CallToolAction by invoking the specified tool.
443
+
444
+ Args:
445
+ action: The CallToolAction containing tool_name and arguments.
446
+ timeout_s: Timeout in seconds. Defaults to MCP_TOOL_CALL_TIMEOUT (30s).
447
+
448
+ Returns:
449
+ CallToolObservation with the tool's result or an error.
450
+ """
451
+ timeout = timeout_s if timeout_s is not None else MCP_TOOL_CALL_TIMEOUT
452
+
453
+ # Check if this is a mode-specific tool
454
+ tool_name = action.tool_name
455
+ current_mode = getattr(self, "_mode", None)
456
+
457
+ if tool_name in self._mode_tools:
458
+ mode_info = self._mode_tools[tool_name]
459
+
460
+ # Check if tool is available in current mode
461
+ # Tool is available if:
462
+ # 1. It has a None mode (available in all modes), OR
463
+ # 2. It has an implementation for the current mode
464
+ if None in mode_info:
465
+ # Use the mode-agnostic version
466
+ func = mode_info[None]
467
+ elif current_mode in mode_info:
468
+ # Use the mode-specific version
469
+ func = mode_info[current_mode]
470
+ else:
471
+ # Tool not available in current mode
472
+ return CallToolObservation(
473
+ tool_name=tool_name,
474
+ result=None,
475
+ error=ToolError(
476
+ error_type=ToolErrorType.TOOL_NOT_FOUND,
477
+ message=f"Tool '{tool_name}' not available in {current_mode} mode",
478
+ ),
479
+ )
480
+
481
+ # Call the mode-specific function directly
482
+ try:
483
+ # Check if function is async and await if necessary
484
+ if inspect.iscoroutinefunction(func):
485
+ result = run_async_safely(func(**action.arguments))
486
+ else:
487
+ result = func(**action.arguments)
488
+
489
+ # Wrap result in CallToolResult format to match FastMCP behavior
490
+ return CallToolObservation(
491
+ tool_name=tool_name,
492
+ result=CallToolResult(
493
+ content=[TextContent(type="text", text=str(result))],
494
+ structured_content={"result": result},
495
+ meta=None,
496
+ data=result,
497
+ is_error=False,
498
+ ),
499
+ )
500
+ except Exception as e:
501
+ return CallToolObservation(
502
+ tool_name=tool_name,
503
+ result=None,
504
+ error=ToolError(
505
+ error_type=ToolErrorType.EXECUTION_ERROR,
506
+ message=str(e),
507
+ ),
508
+ )
509
+
510
+ # Not a mode-specific tool, use FastMCP
511
+ try:
512
+ # Run the async call_tool with timeout
513
+ # Use run_async_safely to handle both sync and async contexts
514
+ result = run_async_safely(
515
+ asyncio.wait_for(
516
+ self._async_call_tool(action.tool_name, action.arguments),
517
+ timeout=timeout,
518
+ )
519
+ )
520
+
521
+ return CallToolObservation(
522
+ tool_name=action.tool_name,
523
+ result=result,
524
+ )
525
+
526
+ except asyncio.TimeoutError:
527
+ return CallToolObservation(
528
+ tool_name=action.tool_name,
529
+ result=None,
530
+ error=ToolError(
531
+ error_type=ToolErrorType.TIMEOUT,
532
+ message=f"Tool '{action.tool_name}' timed out after {timeout} seconds",
533
+ ),
534
+ )
535
+
536
+ except Exception as e:
537
+ error_message = str(e)
538
+
539
+ # Determine error type based on the exception
540
+ if (
541
+ "not found" in error_message.lower()
542
+ or "unknown tool" in error_message.lower()
543
+ ):
544
+ error_type = ToolErrorType.TOOL_NOT_FOUND
545
+ elif (
546
+ "invalid" in error_message.lower()
547
+ or "argument" in error_message.lower()
548
+ ):
549
+ error_type = ToolErrorType.INVALID_ARGS
550
+ else:
551
+ error_type = ToolErrorType.EXECUTION_ERROR
552
+
553
+ return CallToolObservation(
554
+ tool_name=action.tool_name,
555
+ result=None,
556
+ error=ToolError(
557
+ error_type=error_type,
558
+ message=error_message,
559
+ ),
560
+ )
561
+
562
+ async def _async_call_tool(self, tool_name: str, arguments: dict) -> Any:
563
+ """
564
+ Async helper to call a tool on the MCP server.
565
+
566
+ Args:
567
+ tool_name: Name of the tool to invoke.
568
+ arguments: Dictionary of arguments to pass to the tool.
569
+
570
+ Returns:
571
+ The result from the tool execution.
572
+ """
573
+ async with self.mcp_client:
574
+ return await self.mcp_client.call_tool(tool_name, arguments)
575
+
576
+ @abstractmethod
577
+ def _step_impl(
578
+ self,
579
+ action: Action,
580
+ timeout_s: Optional[float] = None,
581
+ **kwargs: Any,
582
+ ) -> Observation:
583
+ """
584
+ Handle non-MCP actions in the environment.
585
+
586
+ Subclasses must implement this method to handle any actions that are
587
+ not ListToolsAction or CallToolAction. This is where environment-specific
588
+ action processing should occur.
589
+
590
+ Args:
591
+ action: The action to execute (guaranteed not to be an MCP action).
592
+ timeout_s: Optional timeout in seconds.
593
+ **kwargs: Additional arguments.
594
+
595
+ Returns:
596
+ An Observation appropriate for the action.
597
+ """
598
+ pass
599
+
600
+ def close(self) -> None:
601
+ """
602
+ Clean up resources used by the environment.
603
+
604
+ This method cleans up the MCP client and any other resources.
605
+ Subclasses should call super().close() if they override this method.
606
+ """
607
+ # The MCP client uses async context manager, so cleanup happens
608
+ # automatically when the context exits. We just clear references.
609
+ self.mcp_client = None
610
+ self.mcp_server = None
src/core/openenv/core/env_server/mcp_types.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ MCP (Model Context Protocol) type definitions for OpenEnv.
9
+
10
+ This module defines strongly typed models for MCP tool discovery and invocation,
11
+ following RFC 003. These types map MCP's REST-like API (tools/list, tools/call)
12
+ to Gym-style action types.
13
+
14
+ Key design decisions:
15
+ - Tool discovery (list_tools) does NOT require reset() first
16
+ - Reserved tool names (reset, step, state, close) are prohibited
17
+ - Both step() and WebSocket /mcp paths are supported
18
+ """
19
+
20
+ from enum import Enum
21
+ from typing import Any, Dict, List, Literal, Optional, Union
22
+
23
+ from pydantic import BaseModel, ConfigDict, Field
24
+
25
+ from .types import Action, Observation, BaseMessage
26
+
27
+
28
+ # =============================================================================
29
+ # JSON-RPC 2.0 Types
30
+ # =============================================================================
31
+
32
+
33
+ class JsonRpcErrorCode(int, Enum):
34
+ """
35
+ Standard JSON-RPC 2.0 error codes.
36
+
37
+ See: https://www.jsonrpc.org/specification#error_object
38
+ """
39
+
40
+ # Standard JSON-RPC errors
41
+ PARSE_ERROR = -32700 # Invalid JSON was received
42
+ INVALID_REQUEST = -32600 # JSON is not a valid Request object
43
+ METHOD_NOT_FOUND = -32601 # Method does not exist / is not available
44
+ INVALID_PARAMS = -32602 # Invalid method parameter(s)
45
+ INTERNAL_ERROR = -32603 # Internal JSON-RPC error
46
+
47
+ # Server errors (reserved for implementation-defined errors)
48
+ SERVER_ERROR = -32000 # Generic server error
49
+
50
+
51
+ class McpMethod(str, Enum):
52
+ """Supported MCP method names."""
53
+
54
+ TOOLS_LIST = "tools/list"
55
+ TOOLS_CALL = "tools/call"
56
+
57
+
58
+ class JsonRpcError(BaseModel):
59
+ """
60
+ JSON-RPC 2.0 error object.
61
+
62
+ See: https://www.jsonrpc.org/specification#error_object
63
+ """
64
+
65
+ model_config = ConfigDict(extra="forbid")
66
+
67
+ code: int = Field(description="Error code indicating the error type")
68
+ message: str = Field(description="Short description of the error")
69
+ data: Optional[Any] = Field(
70
+ default=None, description="Additional error information"
71
+ )
72
+
73
+ @classmethod
74
+ def from_code(
75
+ cls, code: JsonRpcErrorCode, message: Optional[str] = None, data: Any = None
76
+ ) -> "JsonRpcError":
77
+ """Create an error from a standard error code."""
78
+ default_messages = {
79
+ JsonRpcErrorCode.PARSE_ERROR: "Parse error",
80
+ JsonRpcErrorCode.INVALID_REQUEST: "Invalid Request",
81
+ JsonRpcErrorCode.METHOD_NOT_FOUND: "Method not found",
82
+ JsonRpcErrorCode.INVALID_PARAMS: "Invalid params",
83
+ JsonRpcErrorCode.INTERNAL_ERROR: "Internal error",
84
+ JsonRpcErrorCode.SERVER_ERROR: "Server error",
85
+ }
86
+ return cls(
87
+ code=code.value,
88
+ message=message or default_messages.get(code, "Unknown error"),
89
+ data=data,
90
+ )
91
+
92
+
93
+ class JsonRpcRequest(BaseModel):
94
+ """
95
+ JSON-RPC 2.0 request object.
96
+
97
+ See: https://www.jsonrpc.org/specification#request_object
98
+ """
99
+
100
+ model_config = ConfigDict(extra="forbid")
101
+
102
+ jsonrpc: Literal["2.0"] = Field(description="JSON-RPC version, must be '2.0'")
103
+ method: str = Field(description="Name of the method to be invoked")
104
+ params: Dict[str, Any] = Field(
105
+ default_factory=dict, description="Parameter values for the method"
106
+ )
107
+ id: Optional[Union[str, int]] = Field(
108
+ default=None, description="Request identifier established by the client"
109
+ )
110
+
111
+
112
+ class JsonRpcResponse(BaseModel):
113
+ """
114
+ JSON-RPC 2.0 response object.
115
+
116
+ Per JSON-RPC 2.0 spec, a response has either 'result' or 'error', not both.
117
+ This model excludes None values during serialization to comply with the spec.
118
+
119
+ See: https://www.jsonrpc.org/specification#response_object
120
+ """
121
+
122
+ model_config = ConfigDict(extra="forbid")
123
+
124
+ jsonrpc: Literal["2.0"] = Field(default="2.0", description="JSON-RPC version")
125
+ result: Optional[Any] = Field(
126
+ default=None, description="Result of the method invocation"
127
+ )
128
+ error: Optional[JsonRpcError] = Field(
129
+ default=None, description="Error object if method invocation failed"
130
+ )
131
+ id: Optional[Union[str, int]] = Field(
132
+ default=None, description="Request identifier from the request"
133
+ )
134
+
135
+ def model_dump(self, **kwargs) -> Dict[str, Any]:
136
+ """Serialize to dict, excluding result or error when None (JSON-RPC compliance)."""
137
+ # Always include jsonrpc and id, but only include result OR error
138
+ data: Dict[str, Any] = {"jsonrpc": self.jsonrpc, "id": self.id}
139
+ if self.error is not None:
140
+ data["error"] = (
141
+ self.error.model_dump()
142
+ if hasattr(self.error, "model_dump")
143
+ else self.error
144
+ )
145
+ else:
146
+ # Only include result if there's no error
147
+ data["result"] = self.result
148
+ return data
149
+
150
+ def model_dump_json(self, **kwargs) -> str:
151
+ """Serialize to JSON string, excluding result or error when None (JSON-RPC compliance)."""
152
+ import json
153
+
154
+ return json.dumps(self.model_dump())
155
+
156
+ @classmethod
157
+ def success(
158
+ cls, result: Any, request_id: Optional[Union[str, int]] = None
159
+ ) -> "JsonRpcResponse":
160
+ """Create a success response."""
161
+ return cls(result=result, id=request_id)
162
+
163
+ @classmethod
164
+ def error_response(
165
+ cls,
166
+ code: JsonRpcErrorCode,
167
+ message: Optional[str] = None,
168
+ data: Any = None,
169
+ request_id: Optional[Union[str, int]] = None,
170
+ ) -> "JsonRpcResponse":
171
+ """Create an error response from a standard error code."""
172
+ return cls(
173
+ error=JsonRpcError.from_code(code, message, data),
174
+ id=request_id,
175
+ )
176
+
177
+
178
+ # =============================================================================
179
+ # MCP Tool Types
180
+ # =============================================================================
181
+
182
+
183
+ class Tool(BaseModel):
184
+ """
185
+ Strongly typed MCP tool specification.
186
+
187
+ Follows the MCP ToolSpec format for tool discovery.
188
+ See: https://modelcontextprotocol.io/specification/2025-06-18/server/tools
189
+ """
190
+
191
+ model_config = ConfigDict(extra="forbid")
192
+
193
+ name: str = Field(description="Unique identifier for the tool")
194
+ description: str = Field(
195
+ description="Human-readable description of what the tool does"
196
+ )
197
+ input_schema: Dict[str, Any] = Field(
198
+ description="JSON Schema for the tool's input parameters"
199
+ )
200
+
201
+
202
+ class ToolErrorType(str, Enum):
203
+ """Types of errors that can occur during tool execution."""
204
+
205
+ EXECUTION_ERROR = "execution_error" # Tool ran but failed
206
+ INVALID_ARGS = "invalid_args" # Invalid arguments provided
207
+ TRANSPORT_ERROR = "transport_error" # Communication failure
208
+ TOOL_NOT_FOUND = "tool_not_found" # Tool doesn't exist
209
+ TIMEOUT = "timeout" # Operation timed out
210
+
211
+
212
+ class ToolError(BaseModel):
213
+ """
214
+ Structured error for tool execution failures.
215
+
216
+ This is used for transport/framework errors, NOT for errors returned
217
+ by the tool itself (those go in the result field).
218
+ """
219
+
220
+ model_config = ConfigDict(extra="forbid")
221
+
222
+ error_type: ToolErrorType = Field(description="Category of the error")
223
+ message: str = Field(description="Human-readable error message")
224
+
225
+
226
+ # --- MCP Actions ---
227
+
228
+
229
+ class ListToolsAction(Action):
230
+ """
231
+ Request list of available tools from the environment.
232
+
233
+ This action triggers MCP's tools/list operation and returns
234
+ all available tools with their schemas.
235
+
236
+ Note: Does NOT require reset() to be called first.
237
+ """
238
+
239
+ type: Literal["list_tools"] = Field(
240
+ default="list_tools", description="Action type discriminator"
241
+ )
242
+
243
+
244
+ class CallToolAction(Action):
245
+ """
246
+ Call a specific tool via MCP.
247
+
248
+ This action triggers MCP's tools/call operation with the
249
+ specified tool name and arguments.
250
+ """
251
+
252
+ type: Literal["call_tool"] = Field(
253
+ default="call_tool", description="Action type discriminator"
254
+ )
255
+ tool_name: str = Field(description="Name of the tool to call")
256
+ arguments: Dict[str, Any] = Field(
257
+ default_factory=dict, description="Arguments to pass to the tool"
258
+ )
259
+
260
+
261
+ # --- MCP Observations ---
262
+
263
+
264
+ class ListToolsObservation(Observation):
265
+ """
266
+ Response containing available tools.
267
+
268
+ Returned when processing a ListToolsAction.
269
+ """
270
+
271
+ tools: List[Tool] = Field(description="List of available tools with their schemas")
272
+
273
+
274
+ class CallToolObservation(Observation):
275
+ """
276
+ Response from tool execution.
277
+
278
+ Contains the tool's result or an error if the call failed.
279
+ Tool-specific errors (from the tool itself) are included in the result.
280
+ Transport/framework errors use the error field.
281
+ """
282
+
283
+ tool_name: str = Field(description="Name of the tool that was called")
284
+ result: Any = Field(
285
+ default=None, description="Tool-specific result (may include tool errors)"
286
+ )
287
+ error: Optional[ToolError] = Field(
288
+ default=None, description="Transport/framework error if call failed"
289
+ )
290
+
291
+
292
+ # --- WebSocket Message Types for MCP ---
293
+
294
+
295
+ class WSMCPMessage(BaseMessage):
296
+ """
297
+ WebSocket message for MCP JSON-RPC requests.
298
+
299
+ Allows direct MCP access via WebSocket for production inference,
300
+ bypassing the step() API.
301
+ """
302
+
303
+ type: Literal["mcp"] = Field(default="mcp", description="Message type")
304
+ data: Dict[str, Any] = Field(description="JSON-RPC payload (method, params, id)")
305
+
306
+
307
+ class WSMCPResponse(BaseModel):
308
+ """
309
+ WebSocket response for MCP JSON-RPC.
310
+
311
+ Contains the JSON-RPC response from the MCP server.
312
+ """
313
+
314
+ model_config = ConfigDict(extra="forbid")
315
+
316
+ type: str = Field(default="mcp", description="Response type")
317
+ data: Dict[str, Any] = Field(description="JSON-RPC response payload")
318
+
319
+
320
+ # Reserved tool names that cannot be used (protects dual API boundary)
321
+ RESERVED_TOOL_NAMES = frozenset(["reset", "step", "state", "close"])