code stringlengths 17 6.64M |
|---|
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print('Creating captioning dataset')
(val_dataset, test_dataset) ... |
def train(model, data_loader, optimizer, epoch, device, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4... |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.gpu)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
cudnn.deterministic = True
print('Creating dataset')
datasets = ... |
def train(model, data_loader, optimizer, epoch, device, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4... |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.gpu)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
cudnn.deterministic = True
print('Creating dataset')
datasets = ... |
def train(model, data_loader, optimizer, epoch, device, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')... |
@torch.no_grad()
def evaluate(model, data_loader, device, config):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Evaluation:'
print_freq = 200
for (image0, image1, text, targets) in metric_logger.log_every(data_loader, print_freq, header):
images = torch.cat([im... |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print('Creating dataset')
datasets = create_dataset('nlvr', confi... |
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
'Decay the learning rate'
lr = ((((init_lr - min_lr) * 0.5) * (1.0 + math.cos(((math.pi * epoch) / max_epoch)))) + min_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
'Warmup the learning rate'
lr = min(max_lr, (init_lr + (((max_lr - init_lr) * step) / max_step)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
'Decay the learning rate'
lr = max(min_lr, (init_lr * (decay_rate ** epoch)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=wi... |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert is... |
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
def compute_acc(logits, label, reduction='mean'):
ret = (torch.argmax(logits, dim=1) == label).float()
if (reduction == 'none'):
return ret.detach()
elif (reduction == 'mean'):
return ret.mean().item()
|
def compute_n_params(model, return_str=True):
tot = 0
for p in model.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if (tot >= 1000000.0):
return '{:.1f}M'.format((tot / 1000000.0))
else:
return '{:.1f}K'.for... |
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_p... |
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
'\n initialize the normal job\n '
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ.get('LOCAL_RANK', 0))
print('args... |
def init_distributed_ddpjob(args=None):
'\n initialize the ddp job\n '
if (dist.is_available() and dist.is_initialized()):
return (dist.get_world_size(), dist.get_rank())
try:
os.environ['MASTER_PORT'] = '40101'
torch.distributed.init_process_group(backend='nccl')
except ... |
def generate_object_bbox(objects):
object_bboxs = []
for (index, object) in enumerate(objects):
if (index < 10):
object_bboxs.append([int(coord) for coord in object['rect']])
return object_bboxs
|
def _loss_names(d):
ret = {'itm': 0, 'mlm': 0, 'mpp': 0, 'vqa': 0, 'nlvr2': 0, 'irtr': 0}
ret.update(d)
return ret
|
@ex.config
def config():
exp_name = 'vilt'
seed = 0
datasets = ['coco', 'vg', 'sbu', 'gcc']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 4096
train_transform_keys = ['pixelbert']
val_transform_keys = ['pixelbert']
image_size = 384
max_image_len = (- 1)
patch_size... |
@ex.named_config
def env_dandelin():
data_root = '/data2/dsets/dataset'
log_dir = '/data2/vilt/result'
num_gpus = 8
num_nodes = 1
|
@ex.named_config
def task_mlm_itm():
exp_name = 'mlm_itm'
datasets = ['cc3m']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200
|
@ex.named_config
def task_mlm_itm_randaug():
exp_name = 'mlm_itm_randaug'
datasets = ['coco', 'vg', 'sbu', 'gcc']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200
|
@ex.named_config
def task_mlm_itm_mpp():
exp_name = 'mlm_itm_mpp'
datasets = ['coco', 'vg', 'sbu', 'gcc']
loss_names = _loss_names({'itm': 1, 'mlm': 1, 'mpp': 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200
|
@ex.named_config
def task_finetune_nlvr2():
exp_name = 'finetune_nlvr2'
datasets = ['nlvr2']
loss_names = _loss_names({'nlvr2': 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 0.0001
|
@ex.named_config
def task_finetune_nlvr2_randaug():
exp_name = 'finetune_nlvr2_randaug'
datasets = ['nlvr2']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'nlvr2': 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
... |
@ex.named_config
def task_finetune_vqa():
exp_name = 'finetune_vqa'
datasets = ['vqa']
loss_names = _loss_names({'vqa': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 0.0001
val_check_interval = 0.1
lr_mult = 10
|
@ex.named_config
def task_finetune_vqa_randaug():
exp_name = 'finetune_vqa_randaug'
datasets = ['vqa']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'vqa': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
lear... |
@ex.named_config
def task_finetune_irtr_coco():
exp_name = 'finetune_irtr_coco'
datasets = ['coco']
loss_names = _loss_names({'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate ... |
@ex.named_config
def task_finetune_irtr_coco_randaug():
exp_name = 'finetune_irtr_coco_randaug'
datasets = ['coco']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get... |
@ex.named_config
def task_finetune_irtr_f30k():
exp_name = 'finetune_irtr_f30k'
datasets = ['f30k']
loss_names = _loss_names({'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate ... |
@ex.named_config
def task_finetune_irtr_f30k_randaug():
exp_name = 'finetune_irtr_f30k_randaug'
datasets = ['f30k']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get... |
@ex.named_config
def step25k():
max_epoch = 100
max_steps = 25000
|
@ex.named_config
def step50k():
max_epoch = 100
max_steps = 50000
|
@ex.named_config
def step100k():
max_epoch = 100
max_steps = 100000
|
@ex.named_config
def step200k():
max_epoch = 200
max_steps = 200000
|
@ex.named_config
def vit32_base():
vit = 'vit_base_patch32_384'
patch_size = 32
hidden_size = 768
num_heads = 12
num_layers = 12
|
class NoopResetEnv(gym.Wrapper):
'Sample initial states by taking random number of no-ops on reset.\n\n No-op is assumed to be action 0.\n\n :param gym.Env env: the environment to wrap.\n :param int noop_max: the maximum value of no-ops to run.\n '
def __init__(self, env, noop_max=30):
super().__... |
class MaxAndSkipEnv(gym.Wrapper):
'Return only every `skip`-th frame (frameskipping) using most recent raw\n observations (for max pooling across time steps)\n\n :param gym.Env env: the environment to wrap.\n :param int skip: number of `skip`-th frame.\n '
def __init__(self, env, skip=4):
super()... |
class EpisodicLifeEnv(gym.Wrapper):
'Make end-of-life == end-of-episode, but only reset on true game over.\n\n It helps the value estimation.\n\n :param gym.Env env: the environment to wrap.\n '
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True... |
class FireResetEnv(gym.Wrapper):
'Take action on reset for environments that are fixed until firing.\n\n Related discussion: https://github.com/openai/baselines/issues/240\n\n :param gym.Env env: the environment to wrap.\n '
def __init__(self, env):
super().__init__(env)
assert (env.unwrap... |
class WarpFrame(gym.ObservationWrapper):
'Warp frames to 84x84 as done in the Nature paper and later work.\n\n :param gym.Env env: the environment to wrap.\n '
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(low=np.min(env.observ... |
class ScaledFloatFrame(gym.ObservationWrapper):
'Normalize observations to 0~1.\n\n :param gym.Env env: the environment to wrap.\n '
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = l... |
class ClipRewardEnv(gym.RewardWrapper):
'clips the reward to {+1, 0, -1} by its sign.\n\n :param gym.Env env: the environment to wrap.\n '
def __init__(self, env):
super().__init__(env)
self.reward_range = ((- 1), 1)
def reward(self, reward):
'Bin reward to {+1, 0, -1} by its s... |
class FrameStack(gym.Wrapper):
'Stack n_frames last frames.\n\n :param gym.Env env: the environment to wrap.\n :param int n_frames: the number of frames to stack.\n '
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_... |
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=4, scale=False, warp_frame=True):
'Configure environment for DeepMind-style Atari.\n\n The observation is channel-first: (c, h, w) instead of (h, w, c).\n\n :param str env: the raw atari environment.\n :param bool episode_life: wrap the e... |
def reset_data() -> None:
global data
data = {'Num. Workers': [], 'FPS': [], 'Env': [], 'System': [], 'Method': []}
|
def parse_table(env: str, system: str, suffix: str) -> None:
private_copy = {'Num. Workers': [], 'FPS': [], 'Env': [], 'System': [], 'Method': []}
sep = f'<!-- {env} - {system} -->'
raw = open('README.md').read().split(sep)[1].strip().splitlines()
worker_num = list(map(int, raw[0].split('|')[2:(- 1)])... |
def benchmark(suffix: str) -> None:
global data
reset_data()
for env in ['Atari', 'Mujoco']:
for system in ['Laptop', 'Workstation', 'TPU-VM', 'DGX-A100']:
parse_table(env, system, suffix)
data = pd.DataFrame(data)
print(data.groupby(['Env', 'Method', 'System']).max())
def... |
def run_dmc(env, action, frame_skip, total_step):
ts = env.reset()
t = time.time()
for i in tqdm.trange(total_step):
if (ts.discount == 0):
ts = env.reset()
else:
ts = env.step(action[i])
fps = ((frame_skip * total_step) / (time.time() - t))
print(f'FPS(dmc)... |
def run_envpool(env, action, frame_skip, total_step):
ts = env.reset()
t = time.time()
for i in tqdm.trange(total_step):
if (ts.discount[0] == 0):
ts = env.reset()
else:
ts = env.step(action[i:(i + 1)])
fps = ((frame_skip * total_step) / (time.time() - t))
p... |
def run(env, num_envs, total_step, async_):
if (env == 'atari'):
task_id = 'PongNoFrameskip-v4'
frame_skip = 4
if (num_envs == 1):
env = wrap_deepmind(gym.make(task_id), episode_life=False, clip_rewards=False, frame_stack=4)
else:
env = gym.vector.make(task_... |
def get_version() -> str:
with open(os.path.join('..', 'envpool', '__init__.py'), 'r') as f:
init = f.read().split()
return init[(init.index('__version__') + 2)][1:(- 1)]
|
def setup(app):
app.add_js_file('js/copybutton.js')
app.add_css_file('css/style.css')
|
class _SpecTest(absltest.TestCase):
@no_type_check
def test_spec(self) -> None:
action_nums = {'pong': 6, 'breakout': 4}
for task in ['pong', 'breakout']:
action_num = action_nums[task]
spec = make_spec((task.capitalize() + '-v5'))
logging.info(spec)
... |
class _DMSyncTest(absltest.TestCase):
@no_type_check
def test_spec(self) -> None:
action_nums = {'pong': 6, 'breakout': 4}
for task in ['pong', 'breakout']:
action_num = action_nums[task]
env = make_dm((task.capitalize() + '-v5'))
self.assertIsInstance(env,... |
class _GymSyncTest(absltest.TestCase):
@no_type_check
def test_spec(self) -> None:
action_nums = {'pong': 6, 'breakout': 4}
for task in ['pong', 'breakout']:
action_num = action_nums[task]
env = make_gym((task.capitalize() + '-v5'))
self.assertIsInstance(en... |
class DQN(nn.Module):
'Reference: Human-level control through deep reinforcement learning.'
def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], device: Union[(str, int, torch.device)]='cpu', features_only: bool=False) -> None:
'Constructor of DQN.'
super().__init__()
... |
class C51(DQN):
'Reference: A distributional perspective on reinforcement learning.'
def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], num_atoms: int=51, device: Union[(str, int, torch.device)]='cpu') -> None:
'Constructor of C51.'
self.action_num = np.prod(action_shape)... |
class QRDQN(DQN):
'Reference: Distributional Reinforcement Learning with Quantile Regression.'
def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], num_quantiles: int=200, device: Union[(str, int, torch.device)]='cpu') -> None:
'Constructor of QRDQN.'
self.action_num = np... |
class _AtariPretrainTest(absltest.TestCase):
def eval_qrdqn(self, task: str, resume_path: str, num_envs: int=10, seed: int=0, target_reward: float=0.0) -> None:
env = make_gym((task.capitalize() + '-v5'), num_envs=num_envs, seed=seed)
state_shape = env.observation_space.shape
action_shape... |
class _Box2dEnvPoolCorrectnessTest(absltest.TestCase):
def run_space_check(self, env0: gym.Env, env1: Any) -> None:
'Check observation_space and action space.'
(obs0, obs1) = (env0.observation_space, env1.observation_space)
np.testing.assert_allclose(obs0.shape, obs1.shape)
(act0,... |
class _Box2dEnvPoolDeterministicTest(absltest.TestCase):
def run_deterministic_check(self, task_id: str, num_envs: int=4, **kwargs: Any) -> None:
env0 = make_gym(task_id, num_envs=num_envs, seed=0, **kwargs)
env1 = make_gym(task_id, num_envs=num_envs, seed=0, **kwargs)
env2 = make_gym(tas... |
class _ClassicControlEnvPoolTest(absltest.TestCase):
@no_type_check
def run_space_check(self, env0: gym.Env, env1: Any) -> None:
'Check if envpool.observation_space == gym.make().observation_space.'
(obs0, obs1) = (env0.observation_space, env1.observation_space)
np.testing.assert_allc... |
class _DummyEnvPoolTest(absltest.TestCase):
def test_config(self) -> None:
ref_config_keys = ['num_envs', 'batch_size', 'num_threads', 'max_num_players', 'thread_affinity_offset', 'base_path', 'seed', 'gym_reset_return_info', 'state_num', 'action_num', 'max_episode_steps']
default_conf = _DummyEn... |
class _MakeTest(absltest.TestCase):
def test_version(self) -> None:
print(envpool.__version__)
def test_list_all_envs(self) -> None:
pprint.pprint(envpool.list_all_envs())
def test_make_atari(self) -> None:
self.assertRaises(TypeError, envpool.make, 'Pong-v5')
spec = env... |
class _MiniGridEnvPoolAlignTest(absltest.TestCase):
def check_spec(self, spec0: gym.spaces.Space, spec1: gym.spaces.Space) -> None:
self.assertEqual(spec0.dtype, spec1.dtype)
if isinstance(spec0, gym.spaces.Discrete):
self.assertEqual(spec0.n, spec1.n)
elif isinstance(spec0, g... |
class _MiniGridEnvPoolDeterministicTest(absltest.TestCase):
def run_deterministic_check(self, task_id: str, num_envs: int=4, total: int=5000, seed: int=1, **kwargs: Any) -> None:
env0 = make_gym(task_id, num_envs=num_envs, seed=0, **kwargs)
env1 = make_gym(task_id, num_envs=num_envs, seed=0, **kw... |
class _MujocoDmcAlignTest(absltest.TestCase):
def run_space_check(self, env0: dm_env.Environment, env1: Any) -> None:
'Check observation_spec() and action_spec().'
(obs0, obs1) = (env0.observation_spec(), env1.observation_spec())
for k in obs0:
self.assertTrue(hasattr(obs1, k)... |
class _MujocoDmcDeterministicTest(absltest.TestCase):
def check(self, domain: str, task: str, obs_keys: List[str], blacklist: Optional[List[str]]=None, num_envs: int=4) -> None:
domain_name = ''.join([(g[:1].upper() + g[1:]) for g in domain.split('_')])
task_name = ''.join([(g[:1].upper() + g[1:]... |
class _MujocoDmcSuiteExtAlignTest(absltest.TestCase):
def run_space_check(self, env0: dm_env.Environment, env1: Any) -> None:
'Check observation_spec() and action_spec().'
(obs0, obs1) = (env0.observation_spec(), env1.observation_spec())
for k in obs0:
self.assertTrue(hasattr(... |
class _MujocoDmcSuiteExtDeterministicTest(absltest.TestCase):
def check(self, domain: str, task: str, obs_keys: List[str], blacklist: Optional[List[str]]=None, num_envs: int=4) -> None:
domain_name = ''.join([(g[:1].upper() + g[1:]) for g in domain.split('_')])
task_name = ''.join([(g[:1].upper()... |
class _MujocoGymAlignTest(absltest.TestCase):
@no_type_check
def run_space_check(self, env0: gym.Env, env1: Any) -> None:
'Check observation_space and action space.'
'Check observation_space and action space.'
(obs0, obs1) = (env0.observation_space, env1.observation_space)
np.... |
class _MujocoGymDeterministicTest(absltest.TestCase):
def check(self, task_id: str, num_envs: int=4) -> None:
env0 = make_gym(task_id, num_envs=num_envs, seed=0)
env1 = make_gym(task_id, num_envs=num_envs, seed=0)
env2 = make_gym(task_id, num_envs=num_envs, seed=1)
act_space = env... |
class _ProcgenEnvPoolTest(absltest.TestCase):
def deterministic_check(self, task_id: str, num_envs: int=4, total: int=200) -> None:
logging.info(f'deterministic check for {task_id}')
env0 = make_gym(task_id, num_envs=num_envs, seed=0)
env1 = make_gym(task_id, num_envs=num_envs, seed=0)
... |
def py_env(envspec: Type[EnvSpec], envpool: Type[EnvPool]) -> Tuple[(Type[EnvSpec], Type[EnvPool], Type[EnvPool], Type[EnvPool])]:
'Initialize EnvPool for users.'
spec_name = envspec.__name__[1:]
pool_name = envpool.__name__[1:]
return (EnvSpecMeta(spec_name, (envspec,), {}), DMEnvPoolMeta(pool_name.r... |
def to_nested_dict(flatten_dict: Dict[(str, Any)], generator: Type=dict) -> Dict[(str, Any)]:
'Convert a flat dict to a hierarchical dict.\n\n The input dict\'s hierarchy is denoted by ``.``.\n\n Example:\n ::\n\n >>> to_nested_dict({"a.b": 2333, "a.c": 666})\n {"a": {"b": 2333, "c": 666}}\n\n Arg... |
def to_namedtuple(name: str, hdict: Dict) -> Tuple:
'Convert a hierarchical dict to namedtuple.'
return namedtuple(name, hdict.keys())(*[(to_namedtuple(k, v) if isinstance(v, Dict) else v) for (k, v) in hdict.items()])
|
def dm_spec_transform(name: str, spec: ArraySpec, spec_type: str) -> dm_env.specs.Array:
'Transform ArraySpec to dm_env compatible specs.'
if ((np.prod(np.abs(spec.shape)) == 1) and np.isclose(spec.minimum, 0) and (spec.maximum < ACTION_THRESHOLD)):
return dm_env.specs.DiscreteArray(name=name, dtype=s... |
def gym_spec_transform(name: str, spec: ArraySpec, spec_type: str) -> gym.Space:
'Transform ArraySpec to gym.Env compatible spaces.'
if ((np.prod(np.abs(spec.shape)) == 1) and np.isclose(spec.minimum, 0) and (spec.maximum < ACTION_THRESHOLD)):
discrete_range = int(((spec.maximum - spec.minimum) + 1))
... |
def gymnasium_spec_transform(name: str, spec: ArraySpec, spec_type: str) -> gymnasium.Space:
'Transform ArraySpec to gymnasium.Env compatible spaces.'
if ((np.prod(np.abs(spec.shape)) == 1) and np.isclose(spec.minimum, 0) and (spec.maximum < ACTION_THRESHOLD)):
discrete_range = int(((spec.maximum - sp... |
def dm_structure(root_name: str, keys: List[str]) -> Tuple[(List[Tuple[(int, ...)]], List[int], PyTreeSpec)]:
'Convert flat keys into tree structure for namedtuple construction.'
new_keys = []
for key in keys:
if (key in ['obs', 'info']):
key = f'obs:{key}'
key = key.replace('i... |
def gym_structure(keys: List[str]) -> Tuple[(List[Tuple[(str, ...)]], List[int], PyTreeSpec)]:
'Convert flat keys into tree structure for dict construction.'
keys = [k.replace(':', '.') for k in keys]
dict_tree = to_nested_dict(dict(zip(keys, list(range(len(keys))))))
(paths, indices, treespec) = optr... |
class DMEnvPoolMixin(ABC):
'Special treatment for dm_env API.'
def observation_spec(self: Any) -> Tuple:
'Observation spec from EnvSpec.'
if (not hasattr(self, '_dm_observation_spec')):
self._dm_observation_spec = self.spec.observation_spec()
return self._dm_observation_sp... |
class DMEnvPoolMeta(ABCMeta):
'Additional wrapper for EnvPool dm_env API.'
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
'Check internal config and initialize data format convertion.'
base = parents[0]
try:
from .lax import XlaMixin
pare... |
class EnvSpecMixin(ABC):
'Mixin class for EnvSpec, exposed to EnvSpecMeta.'
gen_config: Type
@property
def config(self: EnvSpec) -> NamedTuple:
'Configuration used to create the current EnvSpec.'
return self.gen_config(*self._config_values)
@property
def reward_threshold(self... |
class EnvSpecMeta(ABCMeta):
'Additional checker and wrapper for EnvSpec.'
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
'Check keys and initialize namedtuple config.'
base = parents[0]
parents = (base, EnvSpecMixin)
config_keys = base._config_keys
... |
class EnvPoolMixin(ABC):
'Mixin class for EnvPool, exposed to EnvPoolMeta.'
_spec: EnvSpec
def _check_action(self: EnvPool, actions: List[np.ndarray]) -> None:
if hasattr(self, '_check_action_finished'):
return
self._check_action_finished = True
for (a, (k, v)) in zip(... |
class GymEnvPoolMixin(ABC):
'Special treatment for gym API.'
@property
def observation_space(self: Any) -> Union[(gym.Space, Dict[(str, Any)])]:
'Observation space from EnvSpec.'
if (not hasattr(self, '_gym_observation_space')):
self._gym_observation_space = self.spec.observat... |
class GymEnvPoolMeta(ABCMeta, gym.Env.__class__):
'Additional wrapper for EnvPool gym.Env API.'
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
'Check internal config and initialize data format convertion.'
base = parents[0]
try:
from .lax import XlaM... |
class GymnasiumEnvPoolMixin(ABC):
'Special treatment for gymnasim API.'
@property
def observation_space(self: Any) -> Union[(gymnasium.Space, Dict[(str, Any)])]:
'Observation space from EnvSpec.'
if (not hasattr(self, '_gym_observation_space')):
self._gym_observation_space = s... |
class GymnasiumEnvPoolMeta(ABCMeta, gymnasium.Env.__class__):
'Additional wrapper for EnvPool gymnasium.Env API.'
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
'Check internal config and initialize data format convertion.'
base = parents[0]
try:
fro... |
class XlaMixin(ABC):
'Mixin to provide XLA for envpool class.'
def xla(self: Any) -> Tuple[(Any, Callable, Callable, Callable)]:
'Return the XLA version of send/recv/step functions.'
(_handle, _recv, _send) = make_xla(self)
def recv(handle: jnp.ndarray) -> Union[(TimeStep, Tuple)]:
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.