File size: 6,009 Bytes
5fed0fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
"""
Configuration loading for problem runtime settings.
Loads and parses problem config.yaml files, including runtime resources
and docker configuration.
"""
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional
import yaml
DEFAULT_DOCKER_IMAGE = "python:3.11-slim-trixie"
@dataclass
class ResourcesConfig:
"""SkyPilot-compatible resources configuration."""
accelerators: Optional[str] = None # e.g., "L4:1", "A100:4"
instance_type: Optional[str] = None # e.g., "n1-standard-8"
cpus: Optional[str] = None # e.g., "8", "8+"
memory: Optional[str] = None # e.g., "32", "32+"
disk_size: Optional[int] = None # GB
disk_tier: Optional[str] = None # "high", "medium", "low"
cloud: Optional[str] = None # "gcp", "aws", "azure"
region: Optional[str] = None
image_id: Optional[str] = None # VM image for SkyPilot
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict, excluding None values."""
return {k: v for k, v in vars(self).items() if v is not None}
@property
def has_gpu(self) -> bool:
return self.accelerators is not None
@property
def gpu_type(self) -> Optional[str]:
"""Extract GPU type from accelerators (e.g., 'L4:1' -> 'L4')."""
if not self.accelerators:
return None
return self.accelerators.split(":")[0]
@dataclass
class DockerConfig:
"""Docker configuration for running evaluations."""
image: str = DEFAULT_DOCKER_IMAGE
gpu: bool = False # Whether to pass --gpus all
dind: bool = False # Docker-in-Docker (mount docker socket)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "DockerConfig":
"""Create DockerConfig from a dictionary."""
return cls(
image=data.get("image", DEFAULT_DOCKER_IMAGE),
gpu=bool(data.get("gpu", False)),
dind=bool(data.get("dind", False)),
)
@dataclass
class RuntimeConfig:
"""Complete runtime configuration from config.yaml."""
timeout_seconds: Optional[int] = None
requires_gpu: Optional[bool] = None
resources: ResourcesConfig = field(default_factory=ResourcesConfig)
docker: DockerConfig = field(default_factory=DockerConfig)
environment: Optional[str] = None # For LLM prompts
@dataclass
class ProblemConfig:
"""Full problem configuration from config.yaml."""
tag: Optional[str] = None # Problem category: os, hpc, ai, db, pl, security
runtime: RuntimeConfig = field(default_factory=RuntimeConfig)
dependencies: Dict[str, Any] = field(default_factory=dict)
datasets: list = field(default_factory=list)
def load_problem_config(problem_path: Path) -> ProblemConfig:
"""
Load full problem configuration from config.yaml.
Example config.yaml:
```yaml
tag: hpc
runtime:
timeout_seconds: 1800
docker:
image: andylizf/triton-tlx:tlx-nv-cu122
gpu: true
resources:
accelerators: "L4:1"
cpus: "8+"
```
"""
config_file = problem_path / "config.yaml"
problem_config = ProblemConfig()
if not config_file.exists():
return problem_config
try:
with open(config_file, "r", encoding="utf-8") as f:
config = yaml.safe_load(f) or {}
except Exception:
return problem_config
# Parse tag
if config.get("tag"):
problem_config.tag = str(config["tag"])
# Parse dependencies and datasets
problem_config.dependencies = config.get("dependencies", {})
problem_config.datasets = config.get("datasets", [])
# Parse runtime section
runtime = config.get("runtime", {})
rt = problem_config.runtime
if runtime.get("timeout_seconds"):
rt.timeout_seconds = int(runtime["timeout_seconds"])
if runtime.get("requires_gpu") is not None:
rt.requires_gpu = bool(runtime["requires_gpu"])
if runtime.get("environment"):
rt.environment = str(runtime["environment"])
# Parse docker section
docker = runtime.get("docker", {})
if docker:
rt.docker = DockerConfig.from_dict(docker)
elif runtime.get("requires_gpu"):
# Legacy: if requires_gpu is set but no docker config, assume GPU needed
rt.docker.gpu = True
# Parse resources section
resources = runtime.get("resources", {})
if resources:
res = rt.resources
for key in ["accelerators", "instance_type", "cpus", "memory", "disk_tier", "cloud", "region", "image_id"]:
if resources.get(key):
setattr(res, key, str(resources[key]))
if resources.get("disk_size"):
res.disk_size = int(resources["disk_size"])
return problem_config
def load_runtime_config(problem_path: Path) -> RuntimeConfig:
"""Load runtime configuration from problem's config.yaml."""
return load_problem_config(problem_path).runtime
def load_docker_config_from_yaml(problem_path: Path) -> DockerConfig:
"""Load docker configuration from problem's config.yaml."""
return load_problem_config(problem_path).runtime.docker
def get_effective_gpu_type(runtime_config: RuntimeConfig) -> Optional[str]:
"""
Get effective GPU type from runtime config.
Priority:
1. resources.accelerators (extract type, e.g., "L4:1" -> "L4")
2. docker.gpu flag (returns default "L4" if True)
3. requires_gpu flag (returns default "L4" if True)
4. None (CPU only)
"""
# Check accelerators first
if runtime_config.resources.accelerators:
return runtime_config.resources.gpu_type
# Check docker GPU flag
if runtime_config.docker.gpu:
return "L4" # Default GPU type
# Check legacy requires_gpu
if runtime_config.requires_gpu:
return "L4"
return None
|