Spaces:
Sleeping
Sleeping
File size: 3,403 Bytes
aaaafca | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | #!/usr/bin/env python3
"""
Gym wrapper for the Energy Optimization Environment.
"""
import sys
import os
import gymnasium as gym
import numpy as np
sys.path.insert(0, os.path.dirname(__file__))
# Mock the he_demo package
import types
he_demo = types.ModuleType('he_demo')
from models import EnergyOptimizationAction, EnergyOptimizationObservation, Task, TaskSummary
he_demo.EnergyOptimizationAction = EnergyOptimizationAction
he_demo.EnergyOptimizationObservation = EnergyOptimizationObservation
he_demo.Task = Task
he_demo.TaskSummary = TaskSummary
sys.modules['he_demo'] = he_demo
sys.modules['he_demo.models'] = he_demo
from server.he_demo_environment import EnergyOptimizationEnvironment
class EnergyOptimizationGymEnv(gym.Env):
"""Gym wrapper for the Energy Optimization Environment."""
def __init__(self):
super().__init__()
# Create the underlying environment
self.env = EnergyOptimizationEnvironment()
# Define action and observation spaces
# Actions: [action_type_index, intensity]
# action_type_index: 0=reduce_ram, 1=optimize_energy, 2=balance_resources, 3=monitor_system
self.action_space = gym.spaces.Box(
low=np.array([0, 0.0]),
high=np.array([3, 1.0]),
dtype=np.float32
)
# Observations: [ram_usage, energy_consumption, system_load, task_progress, efficiency_score, steps_taken]
self.observation_space = gym.spaces.Box(
low=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0]),
high=np.array([100.0, 10.0, 1.0, 1.0, 1.0, 100]),
dtype=np.float32
)
def reset(self, **kwargs):
"""Reset the environment."""
obs = self.env.reset()
return self._obs_to_array(obs), {}
def step(self, action):
"""Execute an action in the environment."""
# Convert action array to EnergyOptimizationAction
action_type_index = int(action[0])
intensity = float(action[1])
action_types = ["reduce_ram", "optimize_energy", "balance_resources", "monitor_system"]
action_type = action_types[action_type_index]
action_obj = EnergyOptimizationAction(action_type=action_type, intensity=intensity)
obs = self.env.step(action_obj)
# Convert observation to array
obs_array = self._obs_to_array(obs)
# Check if episode is done
done = obs.done
# Return reward
reward = obs.reward
return obs_array, reward, done, False, {}
def _obs_to_array(self, obs):
"""Convert EnergyOptimizationObservation to numpy array."""
return np.array([
obs.ram_usage,
obs.energy_consumption,
obs.system_load,
obs.task_progress,
obs.efficiency_score,
obs.steps_taken
], dtype=np.float32)
def render(self, mode="human"):
"""Render the environment."""
obs = self.env._get_current_observation()
if obs:
print(f"RAM: {obs.ram_usage:.1f}%, Energy: {obs.energy_consumption:.1f}kWh, "
f"Task: {obs.current_task.name if obs.current_task else 'None'}, "
f"Progress: {obs.task_progress:.2f}")
def close(self):
"""Close the environment."""
pass |