DynaTraj / dataset.py
hang
UPDATE: longer episode without termination
1f86ebf
import numpy as np
from collections import defaultdict
class TrajectoryBuffer:
def __init__(self, traj_steps):
self.traj_steps = traj_steps
self.step_idx = 0
self.buffers = defaultdict(list)
self.traj_pool = defaultdict(list)
self.batch_size = None
def append_step(self, obs, ext_obs,action, reward, done):
"""
obs : [B, …]
action : [B, …]
reward : [B]
done : [B]
"""
if self.batch_size is None:
self.batch_size = obs.shape[0]
self.buffers["obs"].append(obs.copy())
self.buffers["action"].append(action.copy())
self.buffers["reward"].append(reward.copy())
self.buffers["done"].append(done.copy())
self.buffers["ext_obs"].append(ext_obs.copy())
self.step_idx += 1
if self.step_idx % self.traj_steps == 0:
for k, lst in self.buffers.items():
traj_segment = np.stack(lst, axis=1)
self.traj_pool[k].append(traj_segment)
lst.clear()
def force_complete_trajectory(self):
"""Force completion of current trajectory by padding with the last step"""
if len(self.buffers["obs"]) > 0:
# Get the last step data
last_obs = self.buffers["obs"][-1].copy()
last_ext_obs = self.buffers["ext_obs"][-1].copy()
last_action = self.buffers["action"][-1].copy()
last_reward = np.zeros_like(self.buffers["reward"][-1]) # Zero reward for padding
last_done = np.ones_like(self.buffers["done"][-1], dtype=np.bool_) # Mark as done
# Pad until we complete the trajectory
while self.step_idx % self.traj_steps != 0:
self.buffers["obs"].append(last_obs.copy())
self.buffers["ext_obs"].append(last_ext_obs.copy())
self.buffers["action"].append(last_action.copy())
self.buffers["reward"].append(last_reward.copy())
self.buffers["done"].append(last_done.copy())
self.step_idx += 1
# Now complete the trajectory
for k, lst in self.buffers.items():
traj_segment = np.stack(lst, axis=1)
self.traj_pool[k].append(traj_segment)
lst.clear()
def finalize(self):
# Complete any remaining partial trajectory
self.force_complete_trajectory()
return {k: np.stack(v, axis=0) for k, v in self.traj_pool.items()}
def save(self, path):
np.savez_compressed(path, **self.finalize())
def __len__(self):
if not self.traj_pool or self.batch_size is None:
return 0
flushes = len(next(iter(self.traj_pool.values())))
return flushes * self.batch_size