Spaces:
Running
Running
Upload ai/environments/gym_env.py with huggingface_hub
Browse files- ai/environments/gym_env.py +404 -0
ai/environments/gym_env.py
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import numpy as np
|
| 6 |
+
from ai.vector_env import VectorGameState
|
| 7 |
+
from gymnasium import spaces
|
| 8 |
+
|
| 9 |
+
# from sb3_contrib import MaskablePPO # Moved to internal use to avoid worker OOM
|
| 10 |
+
from engine.game.game_state import initialize_game
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LoveLiveCardGameEnv(gym.Env):
|
| 14 |
+
"""
|
| 15 |
+
Love Live Card Game Gymnasium Wrapper
|
| 16 |
+
Default: Plays as Player 0 against a Random or Self-Play Opponent (Player 1)
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
metadata = {"render.modes": ["human"]}
|
| 20 |
+
|
| 21 |
+
def __init__(self, target_cpu_usage=1.0, deck_type="normal", opponent_type="random"):
|
| 22 |
+
super(LoveLiveCardGameEnv, self).__init__()
|
| 23 |
+
|
| 24 |
+
# Init Game
|
| 25 |
+
pid = os.getpid()
|
| 26 |
+
self.deck_type = deck_type
|
| 27 |
+
self.opponent_type = opponent_type
|
| 28 |
+
self.game = initialize_game(deck_type=deck_type)
|
| 29 |
+
self.game.suppress_logs = True # Holistic speedup: disable rule logging
|
| 30 |
+
self.game.enable_loop_detection = False # Holistic speedup: disable state hashing
|
| 31 |
+
self.game.fast_mode = True # Use JIT bytecode for abilities
|
| 32 |
+
self.agent_player_id = 0 # Agent controls player 0
|
| 33 |
+
|
| 34 |
+
# Init Opponent
|
| 35 |
+
self.opponent_model = None
|
| 36 |
+
self.opponent_model_path = os.path.join(os.getcwd(), "checkpoints", "self_play_opponent.zip")
|
| 37 |
+
self.last_load_time = 0
|
| 38 |
+
|
| 39 |
+
if self.opponent_type == "self_play":
|
| 40 |
+
# Optimization: Restrict torch threads in worker process
|
| 41 |
+
import torch
|
| 42 |
+
|
| 43 |
+
torch.set_num_threads(1)
|
| 44 |
+
self._load_opponent()
|
| 45 |
+
|
| 46 |
+
# Action Space: 1000
|
| 47 |
+
ACTION_SIZE = 1000
|
| 48 |
+
self.action_space = spaces.Discrete(ACTION_SIZE)
|
| 49 |
+
|
| 50 |
+
# Observation Space: STANDARD (2304)
|
| 51 |
+
OBS_SIZE = 2304
|
| 52 |
+
self.observation_space = spaces.Box(low=0, high=1, shape=(OBS_SIZE,), dtype=np.float32)
|
| 53 |
+
|
| 54 |
+
# Helper Vector State for Encoding (Reuses the robust logic from VectorEnv)
|
| 55 |
+
self.v_state = VectorGameState(1)
|
| 56 |
+
|
| 57 |
+
# CPU Throttling
|
| 58 |
+
self.target_cpu_usage = target_cpu_usage
|
| 59 |
+
self.last_step_time = time.time()
|
| 60 |
+
|
| 61 |
+
# Stats tracking
|
| 62 |
+
self.win_count = 0
|
| 63 |
+
self.game_count = 0
|
| 64 |
+
self.last_win_rate = 0.0
|
| 65 |
+
self.total_steps = 0
|
| 66 |
+
self.episode_reward = 0.0
|
| 67 |
+
self.last_score = 0
|
| 68 |
+
self.last_turn = 1
|
| 69 |
+
self.pid = pid
|
| 70 |
+
|
| 71 |
+
def reset(self, seed=None, options=None):
|
| 72 |
+
super().reset(seed=seed)
|
| 73 |
+
|
| 74 |
+
# Track stats before reset
|
| 75 |
+
if hasattr(self, "game") and self.game.game_over:
|
| 76 |
+
self.game_count += 1
|
| 77 |
+
if self.game.winner == self.agent_player_id:
|
| 78 |
+
self.win_count += 1
|
| 79 |
+
self.last_win_rate = (self.win_count / self.game_count) * 100
|
| 80 |
+
|
| 81 |
+
# Reset Game
|
| 82 |
+
self.game = initialize_game(deck_type=self.deck_type)
|
| 83 |
+
self.game.suppress_logs = True
|
| 84 |
+
self.game.enable_loop_detection = False
|
| 85 |
+
self.game.fast_mode = True
|
| 86 |
+
|
| 87 |
+
self.total_steps = 0
|
| 88 |
+
self.episode_reward = 0.0
|
| 89 |
+
self.last_score = 0
|
| 90 |
+
self.last_turn = 1
|
| 91 |
+
|
| 92 |
+
# If it's not our turn at the start, we'll need a trick.
|
| 93 |
+
# Gym reset MUST return (obs, info). It can't return a "needs_opponent" signal easily
|
| 94 |
+
# because the VecEnv reset doesn't expect it in the same way 'step' does.
|
| 95 |
+
# HOWEVER, the Vectorized environment calls reset and then step.
|
| 96 |
+
# Let's ensure initialize_game always starts on agent turn or we loop here.
|
| 97 |
+
|
| 98 |
+
# For now, we use the legacy behavior if it's the opponent's turn,
|
| 99 |
+
# BUT we'll just return the observation and let the next 'step' handle it if possible.
|
| 100 |
+
# Actually, let's just make it do one random opponent move if it's not our turn yet,
|
| 101 |
+
# or better: initialize_game should be player 0's turn.
|
| 102 |
+
|
| 103 |
+
observation = self._get_fast_observation()
|
| 104 |
+
info = {"win_rate": self.last_win_rate}
|
| 105 |
+
|
| 106 |
+
# If it's opponent turn, we add a flag to info so the BatchedEnv knows it needs to
|
| 107 |
+
# run an opponent move BEFORE the first agent step.
|
| 108 |
+
if not self.game.is_terminal() and self.game.current_player != self.agent_player_id:
|
| 109 |
+
info["needs_opponent"] = True
|
| 110 |
+
info["opp_obs"] = self._get_fast_observation(self.game.current_player)
|
| 111 |
+
info["opp_masks"] = self.game.get_legal_actions().astype(bool)
|
| 112 |
+
|
| 113 |
+
return observation, info
|
| 114 |
+
|
| 115 |
+
def step(self, action):
|
| 116 |
+
"""
|
| 117 |
+
Execute action for Agent.
|
| 118 |
+
If it's no longer the agent's turn, return 'needs_opponent' signal for batched inference.
|
| 119 |
+
"""
|
| 120 |
+
start_time = time.time()
|
| 121 |
+
start_engine = time.perf_counter()
|
| 122 |
+
# 1. Agent's Move
|
| 123 |
+
self.game = self.game.step(action, check_legality=False, in_place=True)
|
| 124 |
+
engine_time = time.perf_counter() - start_engine
|
| 125 |
+
|
| 126 |
+
# 2. Check turn
|
| 127 |
+
if not self.game.is_terminal() and self.game.current_player != self.agent_player_id:
|
| 128 |
+
# Need Opponent Move
|
| 129 |
+
obs, reward, terminated, truncated, info = self._signal_opponent_move(start_time)
|
| 130 |
+
info["time_engine"] = engine_time
|
| 131 |
+
# Correct `time_obs` injection is in _finalize_step or _signal_opponent_move
|
| 132 |
+
return obs, reward, terminated, truncated, info
|
| 133 |
+
|
| 134 |
+
# 3. Finalize (rewards, terminal check)
|
| 135 |
+
return self._finalize_step(start_time, engine_time_=engine_time)
|
| 136 |
+
|
| 137 |
+
def step_opponent(self, action):
|
| 138 |
+
"""Executes a move decided by the central batched inference."""
|
| 139 |
+
start_time = time.time()
|
| 140 |
+
self.game = self.game.step(action, check_legality=False, in_place=True)
|
| 141 |
+
|
| 142 |
+
# After one opponent move, it might still be their turn
|
| 143 |
+
if not self.game.is_terminal() and self.game.current_player != self.agent_player_id:
|
| 144 |
+
return self._signal_opponent_move(start_time)
|
| 145 |
+
|
| 146 |
+
res = self._finalize_step(start_time)
|
| 147 |
+
|
| 148 |
+
# CRITICAL: If game ended on opponent move, we MUST trigger auto-reset here
|
| 149 |
+
# so the next agent 'step' doesn't call 'step' on a terminal state.
|
| 150 |
+
if res[2]: # terminated
|
| 151 |
+
obs, info = self.reset()
|
| 152 |
+
# Wrap terminal info into the result for the agent to see
|
| 153 |
+
res[4]["terminal_observation"] = res[0]
|
| 154 |
+
# Replace observation with the new reset observation
|
| 155 |
+
res = (obs, res[1], res[2], res[3], res[4])
|
| 156 |
+
|
| 157 |
+
return res
|
| 158 |
+
|
| 159 |
+
def _shape_reward(self, reward: float) -> float:
|
| 160 |
+
"""Apply Gym-level reward shaping (Turn penalties, Live bonuses)."""
|
| 161 |
+
|
| 162 |
+
def _shape_reward(self, reward: float) -> float:
|
| 163 |
+
"""Apply Gym-level reward shaping (Turn penalties, Live bonuses)."""
|
| 164 |
+
# 1. Base State: Ignore Win/Loss, penalize Illegal heavily.
|
| 165 |
+
# We focus purely on "How many lives did I get?" and "How fast?".
|
| 166 |
+
if self.game.winner == -2:
|
| 167 |
+
# Illegal Move / Technical Loss
|
| 168 |
+
reward = -100.0
|
| 169 |
+
else:
|
| 170 |
+
# Neutralize Win/Loss and Heuristic
|
| 171 |
+
reward = 0.0
|
| 172 |
+
|
| 173 |
+
# 2. Shaping: Turn Penalty (Major increase to force speed)
|
| 174 |
+
# We penalize -3.0 per turn.
|
| 175 |
+
current_turn = self.game.turn_number
|
| 176 |
+
if current_turn > self.last_turn:
|
| 177 |
+
reward -= 3.0
|
| 178 |
+
self.last_turn = current_turn
|
| 179 |
+
|
| 180 |
+
# 3. Shaping: Live Capture Bonus (Primary Objective)
|
| 181 |
+
# +50.0 per live.
|
| 182 |
+
# Win (3 lives) = 150 points. Loss (0 lives) = 0 points.
|
| 183 |
+
current_score = len(self.game.players[self.agent_player_id].success_lives)
|
| 184 |
+
delta = current_score - self.last_score
|
| 185 |
+
if delta > 0:
|
| 186 |
+
reward += delta * 50.0
|
| 187 |
+
self.last_score = current_score
|
| 188 |
+
return reward
|
| 189 |
+
|
| 190 |
+
def _signal_opponent_move(self, start_time):
|
| 191 |
+
"""Returns the signal needed for BatchedSubprocVecEnv."""
|
| 192 |
+
start_obs = time.perf_counter()
|
| 193 |
+
observation = self._get_fast_observation()
|
| 194 |
+
obs_time = time.perf_counter() - start_obs
|
| 195 |
+
|
| 196 |
+
reward = self.game.get_reward(self.agent_player_id)
|
| 197 |
+
reward = self._shape_reward(reward)
|
| 198 |
+
|
| 199 |
+
# Get data for opponent's move
|
| 200 |
+
opp_obs = self._get_fast_observation(self.game.current_player)
|
| 201 |
+
opp_masks = self.game.get_legal_actions().astype(bool)
|
| 202 |
+
|
| 203 |
+
info = {
|
| 204 |
+
"needs_opponent": True,
|
| 205 |
+
"opp_obs": opp_obs,
|
| 206 |
+
"opp_masks": opp_masks,
|
| 207 |
+
"time_obs": obs_time, # Inject obs time here too
|
| 208 |
+
}
|
| 209 |
+
return observation, reward, False, False, info
|
| 210 |
+
|
| 211 |
+
def _finalize_step(self, start_time, engine_time_=0.0):
|
| 212 |
+
"""Standard cleanup and reward calculation."""
|
| 213 |
+
start_obs = time.perf_counter()
|
| 214 |
+
observation = self._get_fast_observation()
|
| 215 |
+
obs_time = time.perf_counter() - start_obs
|
| 216 |
+
|
| 217 |
+
reward = self.game.get_reward(self.agent_player_id)
|
| 218 |
+
reward = self._shape_reward(reward)
|
| 219 |
+
terminated = self.game.is_terminal()
|
| 220 |
+
truncated = False
|
| 221 |
+
|
| 222 |
+
# Stability
|
| 223 |
+
if not np.isfinite(observation).all():
|
| 224 |
+
observation = np.nan_to_num(observation, 0.0)
|
| 225 |
+
if not np.isfinite(reward):
|
| 226 |
+
reward = 0.0
|
| 227 |
+
|
| 228 |
+
self.total_steps += 1
|
| 229 |
+
self.episode_reward += reward
|
| 230 |
+
|
| 231 |
+
info = {}
|
| 232 |
+
if terminated:
|
| 233 |
+
info["episode"] = {
|
| 234 |
+
"r": self.episode_reward,
|
| 235 |
+
"l": self.total_steps,
|
| 236 |
+
"win": self.game.winner == self.agent_player_id,
|
| 237 |
+
"phase": self.game.phase.name if hasattr(self.game.phase, "name") else str(self.game.phase),
|
| 238 |
+
"turn": self.game.turn_number,
|
| 239 |
+
"t": round(time.time() - start_time, 6),
|
| 240 |
+
}
|
| 241 |
+
return observation, reward, terminated, False, info
|
| 242 |
+
|
| 243 |
+
def _load_opponent(self):
|
| 244 |
+
"""Legacy - will be unused in batched mode.
|
| 245 |
+
Only loads if actually requested (e.g. legacy/direct testing)."""
|
| 246 |
+
if self.opponent_type == "self_play" and self.opponent_model is None:
|
| 247 |
+
from sb3_contrib import MaskablePPO
|
| 248 |
+
|
| 249 |
+
if os.path.exists(self.opponent_model_path):
|
| 250 |
+
self.opponent_model = MaskablePPO.load(self.opponent_model_path, device="cpu")
|
| 251 |
+
|
| 252 |
+
def get_current_info(self):
|
| 253 |
+
"""Helper for BatchedSubprocVecEnv to pull info after reset."""
|
| 254 |
+
terminated = self.game.is_terminal()
|
| 255 |
+
if not self.game.is_terminal() and self.game.current_player != self.agent_player_id:
|
| 256 |
+
return self._signal_opponent_move(time.time())[4]
|
| 257 |
+
|
| 258 |
+
# Standard info
|
| 259 |
+
info = {}
|
| 260 |
+
if terminated:
|
| 261 |
+
# Reconstruct minimal episode info if needed, but usually this is for reset
|
| 262 |
+
pass
|
| 263 |
+
return info
|
| 264 |
+
|
| 265 |
+
def action_masks(self):
|
| 266 |
+
"""
|
| 267 |
+
Return mask of legal actions for MaskablePPO
|
| 268 |
+
"""
|
| 269 |
+
masks = self.game.get_legal_actions()
|
| 270 |
+
return masks.astype(bool)
|
| 271 |
+
|
| 272 |
+
def render(self, mode="human"):
|
| 273 |
+
if mode == "human":
|
| 274 |
+
print(f"Turn: {self.game.turn_number}, Phase: {self.game.phase}, Player: {self.game.current_player}")
|
| 275 |
+
|
| 276 |
+
def _get_fast_observation(self, player_idx: int = None) -> np.ndarray:
|
| 277 |
+
"""
|
| 278 |
+
Use the JIT-compiled vectorized encoder via VectorGameState Helper.
|
| 279 |
+
Reflects current state into 1-element batches.
|
| 280 |
+
"""
|
| 281 |
+
if player_idx is None:
|
| 282 |
+
player_idx = self.agent_player_id
|
| 283 |
+
|
| 284 |
+
p = self.game.players[player_idx]
|
| 285 |
+
opp_idx = 1 - player_idx
|
| 286 |
+
opp = self.game.players[opp_idx]
|
| 287 |
+
|
| 288 |
+
# Populate v_state buffers (Batch Size=1)
|
| 289 |
+
# 1. Hand
|
| 290 |
+
self.v_state.batch_hand.fill(0)
|
| 291 |
+
for j, c in enumerate(p.hand):
|
| 292 |
+
if j < 60:
|
| 293 |
+
if hasattr(c, "card_id"):
|
| 294 |
+
self.v_state.batch_hand[0, j] = c.card_id
|
| 295 |
+
elif isinstance(c, (int, np.integer)):
|
| 296 |
+
self.v_state.batch_hand[0, j] = int(c)
|
| 297 |
+
|
| 298 |
+
# 2. Stage
|
| 299 |
+
self.v_state.batch_stage.fill(-1)
|
| 300 |
+
self.v_state.batch_tapped.fill(0)
|
| 301 |
+
self.v_state.batch_energy_count.fill(0)
|
| 302 |
+
for s in range(3):
|
| 303 |
+
self.v_state.batch_stage[0, s] = p.stage[s] if p.stage[s] >= 0 else -1
|
| 304 |
+
self.v_state.batch_tapped[0, s] = 1 if p.tapped_members[s] else 0
|
| 305 |
+
self.v_state.batch_energy_count[0, s] = p.stage_energy_count[s]
|
| 306 |
+
|
| 307 |
+
# 3. Opp Stage
|
| 308 |
+
self.v_state.opp_stage.fill(-1)
|
| 309 |
+
self.v_state.opp_tapped.fill(0)
|
| 310 |
+
for s in range(3):
|
| 311 |
+
self.v_state.opp_stage[0, s] = opp.stage[s] if opp.stage[s] >= 0 else -1
|
| 312 |
+
self.v_state.opp_tapped[0, s] = 1 if opp.tapped_members[s] else 0
|
| 313 |
+
|
| 314 |
+
# 4. Scores/Lives
|
| 315 |
+
self.v_state.batch_scores[0] = len(p.success_lives)
|
| 316 |
+
self.v_state.opp_scores[0] = len(opp.success_lives)
|
| 317 |
+
|
| 318 |
+
# 5. Live Zone (Sync from game state)
|
| 319 |
+
self.v_state.batch_live.fill(0)
|
| 320 |
+
lz = getattr(self.game, "live_zone", [])
|
| 321 |
+
for k, l_card in enumerate(lz):
|
| 322 |
+
if k < 50:
|
| 323 |
+
if hasattr(l_card, "card_id"):
|
| 324 |
+
self.v_state.batch_live[0, k] = l_card.card_id
|
| 325 |
+
elif isinstance(l_card, (int, np.integer)):
|
| 326 |
+
self.v_state.batch_live[0, k] = int(l_card)
|
| 327 |
+
|
| 328 |
+
# 6. Global Context (Phase, Turn, Deck Counts)
|
| 329 |
+
self.v_state.turn = self.game.turn_number
|
| 330 |
+
self.v_state.batch_global_ctx.fill(0)
|
| 331 |
+
# Map Phase key to Int
|
| 332 |
+
# Phase Enum: START=0, DRAW=1, MAIN=2, PERFORMANCE=3, CLEAR_CHECK=4, TURN_END=5
|
| 333 |
+
# Assuming game.phase is Enum or Int. If Enum, get value.
|
| 334 |
+
p_val = self.game.phase.value if hasattr(self.game.phase, "value") else int(self.game.phase)
|
| 335 |
+
self.v_state.batch_global_ctx[0, 8] = p_val # Move Phase to index 8
|
| 336 |
+
self.v_state.batch_global_ctx[0, 6] = len(p.main_deck)
|
| 337 |
+
self.v_state.batch_global_ctx[0, 7] = len(opp.main_deck)
|
| 338 |
+
|
| 339 |
+
# 6.5 Deck Density (Hearts/Blades)
|
| 340 |
+
d_hearts = 0
|
| 341 |
+
d_blades = 0
|
| 342 |
+
m_db = getattr(self.game, "member_db", {})
|
| 343 |
+
for c_obj in p.main_deck:
|
| 344 |
+
cid = c_obj.card_id if hasattr(c_obj, "card_id") else c_obj
|
| 345 |
+
if cid in m_db:
|
| 346 |
+
card = m_db[cid]
|
| 347 |
+
d_blades += card.blades
|
| 348 |
+
d_hearts += sum(card.hearts)
|
| 349 |
+
self.v_state.batch_global_ctx[0, 8] = d_blades
|
| 350 |
+
self.v_state.batch_global_ctx[0, 9] = d_hearts
|
| 351 |
+
|
| 352 |
+
# 7. Opponent History (Trash / Discard Pile)
|
| 353 |
+
self.v_state.batch_opp_history.fill(0)
|
| 354 |
+
# Assuming `opp.discard_pile` is a list of Card objects
|
| 355 |
+
# We want the TOP 12 (Most Recent First).
|
| 356 |
+
if hasattr(opp, "discard_pile"):
|
| 357 |
+
d_pile = opp.discard_pile
|
| 358 |
+
limit = min(len(d_pile), 12)
|
| 359 |
+
for k in range(limit):
|
| 360 |
+
# LIFO: Index 0 = Top (-1), Index 1 = -2
|
| 361 |
+
c = d_pile[-(k + 1)]
|
| 362 |
+
val = 0
|
| 363 |
+
if hasattr(c, "card_id"):
|
| 364 |
+
val = c.card_id
|
| 365 |
+
elif isinstance(c, (int, np.integer)):
|
| 366 |
+
val = int(c)
|
| 367 |
+
|
| 368 |
+
if val > 0:
|
| 369 |
+
self.v_state.batch_opp_history[0, k] = val
|
| 370 |
+
|
| 371 |
+
# Encode
|
| 372 |
+
batch_obs = self.v_state.get_observations()
|
| 373 |
+
return batch_obs[0]
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
if __name__ == "__main__":
|
| 377 |
+
# Test Code
|
| 378 |
+
try:
|
| 379 |
+
env = LoveLiveCardGameEnv()
|
| 380 |
+
obs, info = env.reset()
|
| 381 |
+
print("Env Created. Obs shape:", obs.shape)
|
| 382 |
+
|
| 383 |
+
terminated = False
|
| 384 |
+
steps = 0
|
| 385 |
+
while not terminated and steps < 20:
|
| 386 |
+
masks = env.action_masks()
|
| 387 |
+
# Random legal action
|
| 388 |
+
legal_indices = np.where(masks)[0]
|
| 389 |
+
if len(legal_indices) == 0:
|
| 390 |
+
print("No legal actions (Game Over?)")
|
| 391 |
+
break
|
| 392 |
+
|
| 393 |
+
action = np.random.choice(legal_indices)
|
| 394 |
+
print(f"Agent Action: {action}")
|
| 395 |
+
obs, reward, terminated, truncated, info = env.step(action)
|
| 396 |
+
env.render()
|
| 397 |
+
print(f"Step {steps}: Reward {reward}, Terminated {terminated}")
|
| 398 |
+
steps += 1
|
| 399 |
+
|
| 400 |
+
print("Test Complete.")
|
| 401 |
+
except ImportError:
|
| 402 |
+
print("Please install requirements: pip install -r requirements_rl.txt")
|
| 403 |
+
except Exception as e:
|
| 404 |
+
print(f"Test Failed: {e}")
|