code stringlengths 17 6.64M |
|---|
@ex.named_config
def gsn_transfer_residual_prenorm():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8, 'residual': True}, 'normalize_pre_transfer': True}}}
|
@ex.config
def cfg_base():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {'model': 'atari_residual', 'model_kwargs': {}, 'eps': 1e-05, 'lr': 0.001, 'optimizer_class': 'optim.Adam', 'optimizer_kwargs': {'weight_decay': 0.0001}, 'lr_scheduler_method': None, 'lr_scheduler_method_kwargs': {}, 'max_grad_norm': 1.0, 'scheduler': 'plateau'}
cfg['training'] = {'algo': 'student', 'post_aggregation_transform_fn': None, 'post_aggregation_transform_fn_kwargs': {}, 'batch_size': 32, 'batch_size_val': 32, 'cuda': True, 'loss_fn': 'L1', 'loss_kwargs': {}, 'loss_list': ['total'], 'regularizer_fn': None, 'regularizer_kwargs': {}, 'epochs': 100, 'num_epochs': 100, 'resume_from_checkpoint_path': None, 'resume_training': False, 'resume_w_no_add_epochs': False, 'seed': random.randint(0, 1000), 'suppress_target_and_use_annotator': False, 'sources': ['rgb'], 'sources_as_dict': False, 'targets': ['normal_decoding'], 'train': True, 'test': False, 'use_masks': True, 'dataloader_fn': None, 'dataloader_fn_kwargs': {}, 'data_dir': '/mnt/hdd2/taskonomy_reps', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'split_to_use': 'splits.taskonomy_no_midlevel["debug"]'}
cfg['saving'] = {'obliterate_logs': False, 'log_dir': LOG_DIR, 'log_interval': 0.25, 'ticks_per_epoch': 100, 'logging_type': 'tensorboard', 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'save_interval': 1, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'visdom_server': 'localhost', 'visdom_port': '8097', 'in_background': False}
|
@ex.named_config
def taskonomy_data():
cfg = {'training': {'dataloader_fn': 'taskonomy_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/', 'train_folders': 'debug', 'val_folders': 'debug', 'test_folders': 'debug', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True}}}
|
@ex.named_config
def imagenet_data():
cfg = {'training': {'split_to_use': 'splits.taskonomy_no_midlevel["debug"]', 'dataloader_fn': 'imagenet_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/ILSVRC2012', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': False}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False}}
|
@ex.named_config
def rotating_data():
cfg = {'training': {'suppress_target_and_use_annotator': False, 'sources': ['rgb', 'rotation'], 'post_aggregation_transform_fn': 'imagenet_dataset.RotateBatch()', 'post_aggregation_transform_fn_kwargs': {}}, 'learner': {'model': 'DummyLifelongTaskonomyNetwork', 'model_kwargs': {'out_channels': 1000, 'trainable': True, 'is_decoder_mlp': True}}}
|
@ex.named_config
def icifar0_data():
cfg = {'training': {'dataloader_fn': 'icifar_dataset.get_cifar_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochlength': 2000, 'sources': ['rgb'], 'targets': [['cifar0-9']], 'masks': None}, 'use_masks': False, 'targets': ['class_object']}}
|
@ex.named_config
def data_size_few100():
cfg = {'training': {'split_to_use': "splits.taskonomy_no_midlevel['few100']", 'num_epochs': 10000}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 250, 'save_interval': 1000}}
|
@ex.named_config
def data_size_fullplus():
cfg = {'training': {'split_to_use': splits.taskonomy_no_midlevel['fullplus'], 'num_epochs': 10}, 'saving': {'ticks_per_epoch': 0.25, 'log_interval': 1, 'save_interval': 1}}
|
@ex.named_config
def model_resnet_cifar():
cfg = {'learner': {'model': 'FCN5SkipCifar', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_outputs': False}}}
|
@ex.named_config
def model_taskonomy():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_taskonomy_class():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 1000, 'trainable': True, 'is_decoder_mlp': True}}, 'training': {'sources': ['rgb'], 'targets': ['class_object']}}
|
@ex.named_config
def model_fcn8():
cfg = {'learner': {'model': 'FCN8', 'model_kwargs': {'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5():
cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_residual():
cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': True, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_skip():
cfg = {'learner': {'model': 'FCN5', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_skip_residual():
cfg = {'learner': {'model': 'FCN5', 'model_kwargs': {'num_groups': 2, 'use_residual': True, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn3():
cfg = {'learner': {'model': 'FCN3', 'model_kwargs': {'num_groups': 2, 'normalize_outputs': False}}}
|
@ex.named_config
def model_taskonomy_net():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_sidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_nosidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_pix_only_side():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_pix_only_encoder():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': False, 'normalize_outputs': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_sidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_nosidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_pix_only_encoder():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': False, 'normalize_outputs': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_pix_only_side():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': None, 'base_weights_path': None, 'base_kwargs': None, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_taskonomy_decoder():
cfg = {'learner': {'model': 'TaskonomyDecoder', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_blind():
cfg = {'learner': {'model': 'ConstantModel', 'model_kwargs': {'data': '/mnt/data/normal/median_tiny.png'}}, 'training': {'sources': ['rgb']}}
|
@ex.named_config
def model_unet():
cfg = {'learner': {'model': 'UNet', 'model_kwargs': {'dcwnsample': 6}}}
|
@ex.named_config
def model_unet_heteroscedastic():
cfg = {'learner': {'model': 'UNetHeteroscedastic', 'model_kwargs': {'downsample': 6}}}
|
@ex.named_config
def model_unet_hetero_pooled():
cfg = {'learner': {'model': 'UNetHeteroscedasticPooled', 'model_kwargs': {'downsample': 6}}}
|
@ex.named_config
def gsn_base_resnet50():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': True}}}
|
@ex.named_config
def gsn_base_fcn5s():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': None, 'base_kwargs': {'img_channels': 3, 'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': True}}}
|
@ex.named_config
def gsn_base_learned():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False}}}
|
@ex.named_config
def gsn_side_resnet50():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_class': 'TaskonomyEncoder', 'side_weights_path': None, 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}}}}
|
@ex.named_config
def gsn_side_fcn5s():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN5', 'side_weights_path': None, 'side_kwargs': {'img_channels': 3, 'eval_only': False, 'normalize_outputs': False}}}}
|
@ex.named_config
def gsn_side_frozen():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_kwargs': {'eval_only': True}}}}
|
@ex.named_config
def gsn_transfer_residual_prenorm():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8, 'residual': True}, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def gsn_merge_concat():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8, 'n_channels_in': (2 * 8), 'residual': True}, 'normalize_pre_transfer': True, 'alpha_blend': False, 'concat': True}}}
|
@ex.named_config
def loss_distill_cross_entropy():
cfg = {}
cfg['learner'] = {'lr': 0.001, 'optimizer_kwargs': {'weight_decay': 1e-06}}
cfg['training'] = {'loss_fn': 'dense_softmax_cross_entropy', 'targets': ['class_object'], 'loss_kwargs': {}, 'suppress_target_and_use_annotator': True, 'annotator_class': 'ResnetiCifar44', 'annotator_weights_path': '/mnt/models/resnet44-cifar.pth', 'annotator_kwargs': {}}
|
@ex.named_config
def loss_perceptual():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_l1', 'targets': ['normal_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/normal_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/normal_encoder.dat', 'annotator_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def loss_perceptual_l2():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_l2', 'targets': ['normal_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/normal_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/normal_encoder.dat', 'annotator_kwargs': {'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def loss_perceptual_cross_entropy():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_cross_entropy', 'targets': ['class_object_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/class_scene_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/class_scene_encoder.dat', 'annotator_kwargs': {'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def loss_softmax_cross_entropy():
cfg = {}
cfg['learner'] = {'lr': 1e-06, 'optimizer_kwargs': {'weight_decay': 1e-05}}
cfg['training'] = {'suppress_target_and_use_annotator': False, 'loss_fn': 'softmax_cross_entropy', 'targets': ['class_object'], 'loss_kwargs': {}}
|
@ex.named_config
def taskonomy_hp():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {'lr': 0.0001, 'optimizer_kwargs': {'weight_decay': 2e-06}}
|
@ex.named_config
def scheduler_reduce_on_plateau():
cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.ReduceLROnPlateau', 'lr_scheduler_method_kwargs': {'factor': 0.1, 'patience': 5}}}
|
@ex.named_config
def scheduler_step_lr():
cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.StepLR', 'lr_scheduler_method_kwargs': {'lr_decay_epochs': 30, 'gamma': 0.1}}}
|
@ex.named_config
def test():
cfg = {'training': {'train': False, 'test': True}}
|
@ex.named_config
def treg():
cfg = {}
cfg['training'] = {'regularizer_fn': 'transfer_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'reg_loss_fn': 'F.l1_loss'}, 'loss_list': ['standard', 'final', 'weight_tying']}
|
@ex.named_config
def dreg_t():
cfg = {}
cfg['training'] = {'regularizer_fn': 'perceptual_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'decoder_path': '/mnt/models/curvature_decoder.dat', 'reg_loss_fn': 'F.mse_loss'}, 'loss_list': ['standard', 'final', 'weight_tying']}
|
@ex.named_config
def dreg():
cfg = {}
cfg['training'] = {'regularizer_fn': 'perceptual_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'decoder_path': '/mnt/models/curvature_decoder.dat', 'use_transfer': False, 'reg_loss_fn': 'F.mse_loss'}, 'loss_list': ['standard', 'final', 'weight_tying']}
|
class BaseEmbodiedEnv(gym.Env):
' Abstract class for all embodied environments. '
is_embodied = True
|
class DistributedEnv(object):
distribution_schemes = _distribution_schemes
@classmethod
def new(cls, envs, gae_gamma=None, distribution_method=_distribution_schemes):
if (distribution_method == cls.distribution_schemes.vectorize):
return cls.vectorized(envs, gae_gamma)
elif (distribution_method == cls.distribution_schemes.independent):
return cls.independent(envs, gae_gamma)
else:
raise NotImplementedError
def vectorized(envs, gae_gamma=None):
' Vectorizes an interable of environments \n Params:\n envs: an iterable of environments\n gae_gamma: if not none and there observation space is one-dimensional, then apply the gamma parameter from GAE\n '
envs = SubprocVecEmbodiedEnv(envs)
if (gae_gamma is not None):
if (hasattr(envs.observation_space, 'spaces') and (len(envs.observation_space.spaces) == 1) and (len(list(envs.observation_space.spaces.values())[0].shape) == 1)):
envs = VecNormalize(envs, gamma=gae_gamma)
elif ((not hasattr(envs.observation_space, 'spaces')) and (len(envs.observation_space.shape) == 1)):
envs = VecNormalize(envs, gamma=gae_gamma)
return envs
def independent(envs, gae_gamma=None):
if (gae_gamma is not None):
raise NotImplementedError('gae_gamma not supported for "independent" distributed environments')
envs = [e() for e in envs]
return envs
|
class EnvFactory(object):
@staticmethod
def vectorized(env_id, seed, num_processes, log_dir, add_timestep, sensors={DEFAULT_SENSOR_NAME: None}, addl_repeat_count=0, preprocessing_fn=None, env_specific_kwargs={}, vis_interval=20, visdom_name='main', visdom_log_file=None, visdom_server='localhost', visdom_port='8097', num_val_processes=0, gae_gamma=None):
"Returns vectorized environment. Either the simulator implements this (habitat) or\n 'vectorized' uses the call_to_run helper\n "
(simulator, scenario) = env_id.split('_')
if (simulator.lower() in ['habitat']):
env = make_habitat_vector_env(scenario=scenario, num_processes=num_processes, preprocessing_fn=preprocessing_fn, log_dir=log_dir, num_val_processes=num_val_processes, vis_interval=vis_interval, visdom_name=visdom_name, visdom_log_file=visdom_log_file, visdom_server=visdom_server, visdom_port=visdom_port, seed=seed, **env_specific_kwargs)
else:
envs = [EnvFactory.call_to_run(env_id, seed, rank, log_dir, add_timestep, sensors=sensors, addl_repeat_count=addl_repeat_count, preprocessing_fn=preprocessing_fn, env_specific_kwargs=env_specific_kwargs, vis_interval=vis_interval, visdom_name=visdom_name, visdom_log_file=visdom_log_file, visdom_server=visdom_server, visdom_port=visdom_port, num_val_processes=num_val_processes, num_processes=num_processes) for rank in range(num_processes)]
if (num_processes == 1):
env = DummyVecEnv(envs)
else:
env = DistributedEnv.new(envs, gae_gamma=gae_gamma, distribution_method=DistributedEnv.distribution_schemes.vectorize)
return env
@staticmethod
def call_to_run(env_id, seed, rank, log_dir, add_timestep, sensors={DEFAULT_SENSOR_NAME: None}, addl_repeat_count=0, preprocessing_fn=None, gibson_config=None, blank_sensor=False, start_locations_file=None, target_dim=16, blind=False, env_specific_kwargs=None, vis_interval=20, visdom_name='main', visdom_log_file=None, visdom_server='localhost', visdom_port='8097', num_val_processes=0, num_processes=1):
'Returns a function which can be called to instantiate a new environment.\n \n \n Args:\n env_id: Name of the ID to make\n seed: random seed for environment\n rank: environment number (i of k)\n log_dir: directory to log to\n add_timestep: ???\n sensors: A configuration of sensor names -> specs (for now, just none)\n preprocessing_fn(env): function which returns (transform, obs_shape)\n transform(obs): a function that is run on every obs\n obs_shape: the final shape of transform(obs)\n gibson_config: If using gibson, which config to use\n visdon_name: If using visdom, what to name the visdom environment\n visdom_log_file: Where to store visdom logging entries. This allows replaying\n training back to visdom. If this is set to none, then disable visdom logging. \n visdom_server: visdom server ip (http:// is automatically appended)\n visdom_port: Which port the visdom server is listening on\n \n Returns:\n A callable function (no parameters) which instantiates an enviroment.\n '
(simulator, scenario) = env_id.split('_')
if (env_specific_kwargs is None):
env_specific_kwargs = {}
def _thunk():
preprocessing_fn_implemented_inside_env = False
logging_implemented_inside_env = False
already_distributed = False
if env_id.startswith('dm'):
(_, domain, task) = env_id.split('.')
env = dm_control2gym.make(domain_name=domain, task_name=task)
elif env_id.startswith('Gibson'):
env = GibsonEnv(env_id=env_id, gibson_config=gibson_config, blind=blind, blank_sensor=blank_sensor, start_locations_file=start_locations_file, target_dim=target_dim, **env_specific_kwargs)
elif env_id.startswith('DummyGibson'):
env = DummyGibsonEnv(env_id=env_id, gibson_config=gibson_config, blind=blind, blank_sensor=blank_sensor, start_locations_file=start_locations_file, target_dim=target_dim, **env_specific_kwargs)
elif env_id.startswith('Doom'):
env_specific_kwargs['repeat_count'] = (addl_repeat_count + 1)
num_train_processes = (num_processes - num_val_processes)
env_specific_kwargs['randomize_textures'] = (1 if (rank < num_train_processes) else 2)
vizdoom_class = eval(scenario.split('.')[0])
env = vizdoom_class(**env_specific_kwargs)
elif env_id.startswith('Habitat'):
env = make_habitat_vector_env(num_processes=rank, target_dim=target_dim, preprocessing_fn=preprocessing_fn, log_dir=log_dir, num_val_processes=num_val_processes, visdom_name=visdom_name, visdom_log_file=visdom_log_file, visdom_server=visdom_server, visdom_port=visdom_port, seed=seed, **env_specific_kwargs)
already_distributed = True
preprocessing_fn_implemented_inside_env = True
logging_implemented_inside_env = True
else:
env = gym.make(env_id)
if already_distributed:
return env
is_atari = (hasattr(gym.envs, 'atari') and isinstance(env.unwrapped, gym.envs.atari.atari_env.AtariEnv))
if is_atari:
env = make_atari(env_id)
if add_timestep:
raise NotImplementedError('AddTimestep not implemented for SensorDict')
obs_shape = env.observation_space.shape
if (add_timestep and (len(obs_shape) == 1) and (str(env).find('TimeLimit') > (- 1))):
env = AddTimestep(env)
if (not (logging_implemented_inside_env or (log_dir is None))):
os.makedirs(os.path.join(log_dir, visdom_name), exist_ok=True)
print('Visdom log file', visdom_log_file)
first_val_process = (num_processes - num_val_processes)
if (((rank == 0) or (rank == first_val_process)) and (visdom_log_file is not None)):
env = VisdomMonitor(env, directory=os.path.join(log_dir, visdom_name), video_callable=(lambda x: ((x % vis_interval) == 0)), uid=str(rank), server=visdom_server, port=visdom_port, visdom_log_file=visdom_log_file, visdom_env=visdom_name)
else:
print('Not using visdom')
env = wrappers.Monitor(env, directory=os.path.join(log_dir, visdom_name), uid=str(rank))
if is_atari:
env = wrap_deepmind(env)
if (addl_repeat_count > 0):
if ((not hasattr(env, 'repeat_count')) and (not hasattr(env.unwrapped, 'repeat_count'))):
env = SkipWrapper(repeat_count)(env)
if (sensors is not None):
if (hasattr(env, 'is_embodied') or hasattr(env.unwrapped, 'is_embodied')):
pass
else:
assert (len(sensors) == 1), 'Can only handle one sensor'
sensor_name = list(sensors.keys())[0]
env = SensorEnvWrapper(env, name=sensor_name)
if (not (preprocessing_fn_implemented_inside_env or (preprocessing_fn is None))):
(transform, space) = preprocessing_fn(env.observation_space)
env = ProcessObservationWrapper(env, transform, space)
env.seed((seed + rank))
return env
return _thunk
|
class AddTimestep(gym.ObservationWrapper):
def __init__(self, env=None):
super(AddTimestep, self).__init__(env)
self.observation_space = Box(self.observation_space.low[0], self.observation_space.high[0], [(self.observation_space.shape[0] + 1)], dtype=self.observation_space.dtype)
def observation(self, observation):
return np.concatenate((observation, [self.env._elapsed_steps]))
|
class WrapPyTorch(gym.ObservationWrapper):
def __init__(self, env=None):
super(WrapPyTorch, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = Box(self.observation_space.low[(0, 0, 0)], self.observation_space.high[(0, 0, 0)], [obs_shape[2], obs_shape[0], obs_shape[1]], dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1)
|
def cfg(config_file: Optional[str]=None, config_dir: str=DEFAULT_CONFIG_DIR) -> CN:
config = _C.clone()
if config_file:
config.merge_from_file(os.path.join(config_dir, config_file))
return config
|
def make_habitat_vector_env(scenario='PointNav', num_processes=2, target_dim=7, preprocessing_fn=None, log_dir=None, visdom_name='main', visdom_log_file=None, visdom_server='localhost', visdom_port='8097', vis_interval=200, train_scenes=None, val_scenes=None, num_val_processes=0, swap_building_k_episodes=10, gpu_devices=[0], map_kwargs={}, reward_kwargs={}, seed=42, test_mode=False, debug_mode=False, scenario_kwargs={}):
assert (map_kwargs['map_building_size'] > 0), 'Map building size must be positive!'
default_reward_kwargs = {'slack_reward': (- 0.01), 'success_reward': 10, 'use_visit_penalty': False, 'visit_penalty_coef': 0, 'penalty_eps': 999, 'sparse': False, 'dist_coef': 1.0}
for (k, v) in default_reward_kwargs.items():
if (k not in reward_kwargs):
reward_kwargs[k] = v
habitat_path = os.path.dirname(os.path.dirname(habitat.__file__))
if ((scenario == 'PointNav') or (scenario == 'Exploration')):
task_config = os.path.join(habitat_path, 'configs/tasks/pointnav_gibson_train.yaml')
else:
assert False, f'Do not recognize scenario {scenario}'
env_configs = []
baseline_configs = []
encoders = []
target_dims = []
is_val = []
config_env = cfg_env(task_config)
print('Loading val dataset (partition by episode)...')
datasetfile_path = config_env.DATASET.POINTNAVV1.DATA_PATH.format(split='val')
dataset = PointNavDatasetV1()
with gzip.open(datasetfile_path, 'rt') as f:
dataset.from_json(f.read())
val_datasets = get_splits(dataset, max(num_val_processes, 1))
print('Loaded.')
print('Loading train dataset (partition by building)...')
train_datasets = []
if ((num_processes - num_val_processes) > 0):
train_datasets = [None for _ in range((num_processes - num_val_processes))]
print('Loaded.')
if (num_processes > num_val_processes):
train_process_scenes = [[] for _ in range((num_processes - num_val_processes))]
if (train_scenes is None):
train_scenes = PointNavDatasetV1.get_scenes_to_load(config_env.DATASET)
random.shuffle(train_scenes)
for (i, scene) in enumerate(train_scenes):
train_process_scenes[(i % len(train_process_scenes))].append(scene)
for (j, process) in enumerate(train_process_scenes):
if (len(process) == 0):
train_process_scenes[j] = list(train_scenes)
get_scenes = (lambda d: list(Counter([e.scene_id.split('/')[(- 1)].split('.')[0] for e in d.episodes]).items()))
for i in range(num_processes):
config_env = cfg_env(task_config)
config_env.defrost()
if (i < (num_processes - num_val_processes)):
config_env.DATASET.SPLIT = 'train'
config_env.DATASET.POINTNAVV1.CONTENT_SCENES = train_process_scenes[i]
else:
val_i = (i - (num_processes - num_val_processes))
config_env.DATASET.SPLIT = 'val'
if (val_scenes is not None):
config_env.DATASET.POINTNAVV1.CONTENT_SCENES = val_scenes
else:
config_env.DATASET.POINTNAVV1.CONTENT_SCENES = get_scenes(val_datasets[val_i])
print('Env {}:'.format(i), config_env.DATASET.POINTNAVV1.CONTENT_SCENES)
config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_devices[(i % len(gpu_devices))]
config_env.SIMULATOR.SCENE = os.path.join(habitat_path, config_env.SIMULATOR.SCENE)
config_env.SIMULATOR.AGENT_0.SENSORS = ['RGB_SENSOR']
config_env.TASK.MEASUREMENTS.append('COLLISIONS')
config_env.freeze()
env_configs.append(config_env)
config_baseline = cfg_baseline()
baseline_configs.append(config_baseline)
encoders.append(preprocessing_fn)
target_dims.append(target_dim)
should_record = [((i == 0) or (i == (num_processes - num_val_processes))) for i in range(num_processes)]
if debug_mode:
env = make_env_fn(scenario, env_configs[0], baseline_configs[0], 0, 0, 1, target_dim, log_dir, visdom_name, visdom_log_file, vis_interval, visdom_server, visdom_port, swap_building_k_episodes, map_kwargs, reward_kwargs, True, seed, test_mode, (train_datasets + val_datasets)[0], scenario_kwargs)
envs = PreprocessEnv(env, preprocessing_fn=preprocessing_fn)
else:
envs = HabitatPreprocessVectorEnv(make_env_fn=make_env_fn, env_fn_args=tuple(tuple(zip([scenario for _ in range(num_processes)], env_configs, baseline_configs, range(num_processes), [num_val_processes for _ in range(num_processes)], [num_processes for _ in range(num_processes)], target_dims, [log_dir for _ in range(num_processes)], [visdom_name for _ in range(num_processes)], [visdom_log_file for _ in range(num_processes)], [vis_interval for _ in range(num_processes)], [visdom_server for _ in range(num_processes)], [visdom_port for _ in range(num_processes)], [swap_building_k_episodes for _ in range(num_processes)], [map_kwargs for _ in range(num_processes)], [reward_kwargs for _ in range(num_processes)], should_record, [(seed + i) for i in range(num_processes)], [test_mode for _ in range(num_processes)], (train_datasets + val_datasets), [scenario_kwargs for _ in range(num_processes)]))), preprocessing_fn=preprocessing_fn)
envs.observation_space = envs.observation_spaces[0]
envs.action_space = spaces.Discrete(3)
envs.reward_range = None
envs.metadata = None
envs.is_embodied = True
return envs
|
def make_env_fn(scenario, config_env, config_baseline, rank, num_val_processes, num_processes, target_dim, log_dir, visdom_name, visdom_log_file, vis_interval, visdom_server, visdom_port, swap_building_k_episodes, map_kwargs, reward_kwargs, should_record, seed, test_mode, dataset, scenario_kwargs):
if (config_env.DATASET.SPLIT == 'train'):
dataset = PointNavDatasetV1(config_env.DATASET)
habitat_path = os.path.dirname(os.path.dirname(habitat.__file__))
for ep in dataset.episodes:
ep.scene_id = os.path.join(habitat_path, ep.scene_id)
config_env.defrost()
config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
config_env.freeze()
if (scenario == 'PointNav'):
dataset.episodes = [epi for epi in dataset.episodes if (epi.info['geodesic_distance'] < scenario_kwargs['max_geodesic_dist'])]
env = MidlevelNavRLEnv(config_env=config_env, config_baseline=config_baseline, dataset=dataset, target_dim=target_dim, map_kwargs=map_kwargs, reward_kwargs=reward_kwargs, loop_episodes=(not test_mode), scenario_kwargs=scenario_kwargs)
elif (scenario == 'Exploration'):
env = ExplorationRLEnv(config_env=config_env, config_baseline=config_baseline, dataset=dataset, map_kwargs=map_kwargs, reward_kwargs=reward_kwargs, loop_episodes=(not test_mode), scenario_kwargs=scenario_kwargs)
else:
assert False, f'do not recognize scenario {scenario}'
if test_mode:
env.episodes = env.episodes
else:
env.episodes = shuffle_episodes(env, swap_every_k=swap_building_k_episodes)
env.seed(seed)
if (should_record and (visdom_log_file is not None)):
print(f'Recording videos from env {rank} every {vis_interval} episodes (via visdom)')
env = VisdomMonitor(env, directory=os.path.join(log_dir, visdom_name), video_callable=(lambda x: ((x % vis_interval) == 0)), uid=str(rank), server=visdom_server, port=visdom_port, visdom_log_file=visdom_log_file, visdom_env=visdom_name)
return env
|
def get_splits(dataset, num_splits: int, episodes_per_split: int=None, remove_unused_episodes: bool=False, collate_scene_ids: bool=True, sort_by_episode_id: bool=False, allow_uneven_splits: bool=True):
'Returns a list of new datasets, each with a subset of the original\n episodes. All splits will have the same number of episodes, but no\n episodes will be duplicated.\n Args:\n num_splits: the number of splits to create.\n episodes_per_split: if provided, each split will have up to\n this many episodes. If it is not provided, each dataset will\n have ``len(original_dataset.episodes) // num_splits`` \n episodes. If max_episodes_per_split is provided and is \n larger than this value, it will be capped to this value.\n remove_unused_episodes: once the splits are created, the extra\n episodes will be destroyed from the original dataset. This\n saves memory for large datasets.\n collate_scene_ids: if true, episodes with the same scene id are\n next to each other. This saves on overhead of switching \n between scenes, but means multiple sequential episodes will \n be related to each other because they will be in the \n same scene.\n sort_by_episode_id: if true, sequences are sorted by their episode\n ID in the returned splits.\n allow_uneven_splits: if true, the last split can be shorter than\n the others. This is especially useful for splitting over\n validation/test datasets in order to make sure that all\n episodes are copied but none are duplicated.\n Returns:\n a list of new datasets, each with their own subset of episodes.\n '
assert (len(dataset.episodes) >= num_splits), 'Not enough episodes to create this many splits.'
if (episodes_per_split is not None):
assert (not allow_uneven_splits), "You probably don't want to specify allow_uneven_splits and episodes_per_split."
assert ((num_splits * episodes_per_split) <= len(dataset.episodes))
new_datasets = []
if allow_uneven_splits:
stride = int(np.ceil(((len(dataset.episodes) * 1.0) / num_splits)))
split_lengths = ([stride] * (num_splits - 1))
split_lengths.append((len(dataset.episodes) - (stride * (num_splits - 1))))
else:
if (episodes_per_split is not None):
stride = episodes_per_split
else:
stride = (len(dataset.episodes) // num_splits)
split_lengths = ([stride] * num_splits)
num_episodes = sum(split_lengths)
rand_items = np.random.choice(len(dataset.episodes), num_episodes, replace=False)
if collate_scene_ids:
scene_ids = {}
for rand_ind in rand_items:
scene = dataset.episodes[rand_ind].scene_id
if (scene not in scene_ids):
scene_ids[scene] = []
scene_ids[scene].append(rand_ind)
rand_items = []
list(map(rand_items.extend, scene_ids.values()))
ep_ind = 0
new_episodes = []
for nn in range(num_splits):
new_dataset = copy.copy(dataset)
new_dataset.episodes = []
new_datasets.append(new_dataset)
for ii in range(split_lengths[nn]):
new_dataset.episodes.append(dataset.episodes[rand_items[ep_ind]])
ep_ind += 1
if sort_by_episode_id:
new_dataset.episodes.sort(key=(lambda ep: ep.episode_id))
new_episodes.extend(new_dataset.episodes)
if remove_unused_episodes:
dataset.episodes = new_episodes
return new_datasets
|
def transform_target(target):
r = target[0]
theta = target[1]
return np.array([np.cos(theta), np.sin(theta), r])
|
def transform_observations(observations, target_dim=16, omap=None):
new_obs = observations
new_obs['rgb_filled'] = observations['rgb']
new_obs['taskonomy'] = observations['rgb']
new_obs['target'] = np.moveaxis(np.tile(transform_target(observations['pointgoal']), (target_dim, target_dim, 1)), (- 1), 0)
if (omap is not None):
new_obs['map'] = omap.construct_occupancy_map()
new_obs['global_pos'] = omap.get_current_global_xy_pos()
del new_obs['rgb']
return new_obs
|
def get_obs_space(image_dim=256, target_dim=16, map_dim=None, use_depth=False):
prep_dict = {'taskonomy': spaces.Box(low=0, high=255, shape=(image_dim, image_dim, 3), dtype=np.uint8), 'rgb_filled': spaces.Box(low=0.0, high=255.0, shape=(image_dim, image_dim, 3), dtype=np.uint8), 'target': spaces.Box(low=(- np.inf), high=np.inf, shape=(3, target_dim, target_dim), dtype=np.float32), 'pointgoal': spaces.Box(low=(- np.inf), high=np.inf, shape=(2,), dtype=np.float32)}
if (map_dim is not None):
prep_dict['map'] = spaces.Box(low=0.0, high=255.0, shape=(map_dim, map_dim, 3), dtype=np.uint8)
prep_dict['global_pos'] = spaces.Box(low=(- np.inf), high=np.inf, shape=(3,), dtype=np.float32)
if use_depth:
prep_dict['depth'] = spaces.Box(low=0.0, high=1.0, shape=(image_dim, image_dim, 3), dtype=np.float32)
return spaces.Dict(prep_dict)
|
class NavRLEnv(habitat.RLEnv):
def __init__(self, config_env, config_baseline, dataset):
config_env.TASK.MEASUREMENTS.append('TOP_DOWN_MAP')
config_env.TASK.SENSORS.append('HEADING_SENSOR')
self._config_env = config_env.TASK
self._config_baseline = config_baseline
self._previous_target_distance = None
self._previous_action = None
self._episode_distance_covered = None
super().__init__(config_env, dataset)
def reset(self):
self._previous_action = None
observations = super().reset()
self._previous_target_distance = self.habitat_env.current_episode.info['geodesic_distance']
return observations
def step(self, action):
if (self._distance_target() < self._config_env.SUCCESS_DISTANCE):
action = SimulatorActions.STOP.value
self._previous_action = action
obs = super().step(action)
return obs
def get_reward_range(self):
return ((self._config_baseline.BASELINE.RL.SLACK_REWARD - 1.0), (self._config_baseline.BASELINE.RL.SUCCESS_REWARD + 1.0))
def get_reward(self, observations):
reward = self._config_baseline.BASELINE.RL.SLACK_REWARD
current_target_distance = self._distance_target()
reward += (self._previous_target_distance - current_target_distance)
self._previous_target_distance = current_target_distance
if self._episode_success():
reward += self._config_baseline.BASELINE.RL.SUCCESS_REWARD
return reward
def _distance_target(self):
current_position = self._env.sim.get_agent_state().position.tolist()
target_position = self._env.current_episode.goals[0].position
distance = self._env.sim.geodesic_distance(current_position, target_position)
return distance
def _episode_success(self):
if ((self._previous_action == SimulatorActions.STOP.value) and (self._distance_target() < self._config_env.SUCCESS_DISTANCE)):
return True
return False
def get_done(self, observations):
done = False
if (self._env.episode_over or self._episode_success()):
done = True
return done
def get_info(self, observations):
info = self.habitat_env.get_metrics()
if self.get_done(observations):
info['success'] = np.ceil(info['spl'])
info['episode_info'] = {'geodesic_distance': self._env.current_episode.info['geodesic_distance'], 'scene_id': self._env.current_episode.scene_id, 'episode_id': self._env.current_episode.episode_id}
return info
|
class MidlevelNavRLEnv(NavRLEnv):
metadata = {'render.modes': ['rgb_array']}
def __init__(self, config_env, config_baseline, dataset, target_dim=7, map_kwargs={}, reward_kwargs={}, loop_episodes=True, scenario_kwargs={}):
if scenario_kwargs['use_depth']:
config_env.SIMULATOR.AGENT_0.SENSORS.append('DEPTH_SENSOR')
super().__init__(config_env, config_baseline, dataset)
self.target_dim = target_dim
self.image_dim = 256
self.use_map = (map_kwargs['map_building_size'] > 0)
self.map_dim = (84 if self.use_map else None)
self.map_kwargs = map_kwargs
self.reward_kwargs = reward_kwargs
self.scenario_kwargs = scenario_kwargs
self.last_map = None
self.observation_space = get_obs_space(self.image_dim, self.target_dim, self.map_dim, scenario_kwargs['use_depth'])
self.omap = None
if self.use_map:
self.omap = OccupancyMap(map_kwargs=map_kwargs)
self.loop_episodes = loop_episodes
self.n_episodes_completed = 0
def get_reward(self, observations):
reward = self.reward_kwargs['slack_reward']
if (not self.reward_kwargs['sparse']):
current_target_distance = self._distance_target()
reward += ((self._previous_target_distance - current_target_distance) * self.reward_kwargs['dist_coef'])
self._previous_target_distance = current_target_distance
if (self.reward_kwargs['use_visit_penalty'] and (len(self.omap.history) > 5)):
reward += (self.reward_kwargs['visit_penalty_coef'] * self.omap.compute_eps_ball_ratio(self.reward_kwargs['penalty_eps']))
if self._episode_success():
reward += self.reward_kwargs['success_reward']
return reward
def reset(self):
self.obs = self._reset()
return self.obs
def _reset(self):
self.info = None
self.obs = super().reset()
if self.use_map:
self.omap = OccupancyMap(initial_pg=self.obs['pointgoal'], map_kwargs=self.map_kwargs)
self.obs = transform_observations(self.obs, target_dim=self.target_dim, omap=self.omap)
if ('map' in self.obs):
self.last_map = self.obs['map']
return self.obs
def step(self, action):
if ((self.n_episodes_completed >= len(self.episodes)) and (not self.loop_episodes)):
return (self.obs, 0.0, False, self.info)
(self.obs, reward, done, self.info) = super().step(action)
if self.use_map:
self.omap.add_pointgoal(self.obs['pointgoal'])
self.omap.step(action)
self.obs = transform_observations(self.obs, target_dim=self.target_dim, omap=self.omap)
if ('map' in self.obs):
self.last_map = self.obs['map']
if done:
self.n_episodes_completed += 1
return (self.obs, reward, done, self.info)
def render(self, mode='human'):
if (mode == 'rgb_array'):
im = self.obs['rgb_filled']
to_concat = [im]
if ('depth' in self.obs):
depth_im = gray_to_rgb((self.obs['depth'] * 255)).astype(np.uint8)
to_concat.append(depth_im)
if (self.info is not None):
top_down_map = draw_top_down_map(self.info, self.obs['heading'], im.shape[0])
top_down_map = np.array(Image.fromarray(top_down_map).resize((256, 256)))
else:
top_down_map = np.zeros((256, 256, 3), dtype=np.uint8)
to_concat.append(top_down_map)
if ('map' in self.obs):
occupancy_map = np.copy(self.obs['map'])
(h, w, _) = occupancy_map.shape
occupancy_map[(int((h // 2)), int((w // 2)), 2)] = 255
occupancy_map = np.array(Image.fromarray(occupancy_map).resize((256, 256)))
to_concat.append(occupancy_map)
output_im = np.concatenate(to_concat, axis=1)
return output_im
else:
super().render(mode=mode)
|
def chunks(l, n):
'Yield successive n-sized chunks from l.'
for i in range(0, len(l), n):
(yield l[i:(i + n)])
|
def shuffle_episodes(env, swap_every_k=10):
episodes = env.episodes
episodes = env.episodes = random.sample([c for c in chunks(episodes, swap_every_k)], (len(episodes) // swap_every_k))
env.episodes = flatten(episodes)
return env.episodes
|
def draw_top_down_map(info, heading, output_size):
if (info is None):
return
top_down_map = maps.colorize_topdown_map(info['top_down_map']['map'])
original_map_size = top_down_map.shape[:2]
map_scale = np.array((1, ((original_map_size[1] * 1.0) / original_map_size[0])))
new_map_size = np.round((output_size * map_scale)).astype(np.int32)
top_down_map = cv2.resize(top_down_map, (new_map_size[1], new_map_size[0]))
map_agent_pos = info['top_down_map']['agent_map_coord']
map_agent_pos = np.round(((map_agent_pos * new_map_size) / original_map_size)).astype(np.int32)
top_down_map = maps.draw_agent(top_down_map, map_agent_pos, (heading - (np.pi / 2)), agent_radius_px=(top_down_map.shape[0] / 40))
return top_down_map
|
def gray_to_rgb(img_arr):
if ((len(img_arr.shape) == 3) and (img_arr.shape[2] == 3)):
return img_arr
if (len(img_arr.shape) == 3):
img_arr = img_arr.squeeze(2)
return np.dstack((img_arr, img_arr, img_arr))
|
class HabitatPreprocessVectorEnv(habitat.VectorEnv):
def __init__(self, make_env_fn, env_fn_args, preprocessing_fn=None, auto_reset_done: bool=True, multiprocessing_start_method: str='forkserver'):
super().__init__(make_env_fn, env_fn_args, auto_reset_done, multiprocessing_start_method)
obs_space = self.observation_spaces[0]
self.transform = None
if (preprocessing_fn is not None):
(self.transform, obs_space) = preprocessing_fn(obs_space)
for i in range(self.num_envs):
self.observation_spaces[i] = obs_space
self.collate_obs_before_transform = False
self.keys = []
(shapes, dtypes) = ({}, {})
for (key, box) in obs_space.spaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = {k: np.zeros(((self.num_envs,) + tuple(shapes[k])), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
def reset(self):
observation_list = super().reset()
if self.collate_obs_before_transform:
self._save_init_obs(observation_list)
if (self.transform is not None):
obs = self.transform(self.buf_init_obs)
self._save_all_obs(obs)
else:
for (e, obs) in enumerate(observation_list):
if (self.transform is not None):
obs = self.transform(obs)
self._save_obs(e, obs)
return self._obs_from_buf()
def step(self, action):
results_list = super().step(action)
for (e, result) in enumerate(results_list):
self.buf_rews[e] = result[1]
self.buf_dones[e] = result[2]
self.buf_infos[e] = result[3]
if self.collate_obs_before_transform:
self._save_init_obs([r[0] for r in results_list])
if (self.transform is not None):
obs = self.transform(self.buf_init_obs)
self._save_all_obs(obs)
else:
for (e, (obs, _, _, _)) in enumerate(results_list):
if (self.transform is not None):
obs = self.transform(obs)
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy())
def _save_init_obs(self, all_obs):
self.buf_init_obs = {}
for k in all_obs[0].keys():
if (k is None):
self.buf_init_obs[k] = torch.stack([torch.Tensor(o) for o in all_obs])
else:
self.buf_init_obs[k] = torch.stack([torch.Tensor(o[k]) for o in all_obs])
def _save_obs(self, e, obs):
try:
for k in self.keys:
if (k is None):
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
except Exception as e:
print(k, e)
raise e
def _save_all_obs(self, obs):
for k in self.keys:
if (k is None):
self.buf_obs[k] = obs
else:
self.buf_obs[k] = obs[k]
def _obs_from_buf(self):
if (self.keys == [None]):
return self.buf_obs[None]
else:
return self.buf_obs
|
class PreprocessEnv(habitat.RLEnv):
def __init__(self, env, preprocessing_fn=None):
self.env = env
self.transform = None
self.observation_space = self.env.observation_space
if (preprocessing_fn is not None):
(self.transform, self.observation_space) = preprocessing_fn(self.env.observation_space)
def reset(self):
self.done = False
obs = self.env.reset()
obs = copy.deepcopy(obs)
if (self.transform is not None):
obs = self.transform(obs)
return self.wrap(obs)
def step(self, action):
action = (action[0] if isinstance(action, list) else action)
(obs, reward, self.done, info) = self.env.step(action)
obs = copy.deepcopy(obs)
if (self.transform is not None):
obs = self.transform(obs)
return (self.wrap(obs), np.array([reward], dtype=np.float32), np.array([self.done]), [info])
def wrap(self, x):
assert isinstance(x, dict)
for (k, v) in x.items():
if isinstance(v, torch.Tensor):
x[k] = v.unsqueeze(0)
elif isinstance(v, np.ndarray):
x[k] = np.expand_dims(v, axis=0)
elif isinstance(v, list):
x[k] = [x[k]]
else:
print(f'Habitat Single Env Wrapper: not wrapping {k}')
return x
def close(self):
self.env.close()
|
def tile_images(img_nhwc):
'\n Tile N images into one big PxQ image\n (P,Q) are chosen to be as close as possible, and if N\n is square, then P=Q.\n\n input: img_nhwc, list or array of images, ndim=4 once turned into array\n n = batch index, h = height, w = width, c = channel\n returns:\n bigim_HWc, ndarray with ndim=3\n '
img_nhwc = np.asarray(img_nhwc)
(N, h, w, c) = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil((float(N) / H)))
img_nhwc = np.array((list(img_nhwc) + [(img_nhwc[0] * 0) for _ in range(N, (H * W))]))
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape((H * h), (W * w), c)
return img_Hh_Ww_c
|
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
(shapes, dtypes) = ({}, {})
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for (key, box) in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = {k: np.zeros(((self.num_envs,) + tuple(shapes[k])), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
(obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e]) = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if (k is None):
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if (self.keys == [None]):
return self.buf_obs[None]
else:
return self.buf_obs
|
class ProcessObservationWrapper(gym.ObservationWrapper):
' Wraps an environment so that instead of\n obs = env.step(),\n obs = transform(env.step())\n \n Args:\n transform: a function that transforms obs\n obs_shape: the final obs_shape is needed to set the observation space of the env\n '
def __init__(self, env, transform, obs_space):
super().__init__(env)
self.observation_space = obs_space
self.transform = transform
def observation(self, observation):
return self.transform(observation)
|
class SensorEnvWrapper(gym.ObservationWrapper):
' Wraps a typical gym environment so to work with our package\n obs = env.step(),\n obs = {sensor_name: env.step()}\n \n Parameters:\n name: what to name the sensor\n '
def __init__(self, env, name='obs'):
super().__init__(env)
self.name = name
self.observation_space = spaces.Dict({self.name: self.observation_space})
def observation(self, observation):
return SensorDict({self.name: observation})
|
def SkipWrapper(repeat_count):
class SkipWrapper(gym.Wrapper):
'\n Generic common frame skipping wrapper\n Will perform action for `x` additional steps\n '
def __init__(self, env):
super(SkipWrapper, self).__init__(env)
self.repeat_count = repeat_count
self.stepcount = 0
def step(self, action):
done = False
total_reward = 0
current_step = 0
while ((current_step < (self.repeat_count + 1)) and (not done)):
self.stepcount += 1
if (current_step < self.repeat_count):
(_, reward, done, info) = self.env.step_physics(action)
else:
(self.obs, reward, done, info) = self.env.step(action)
total_reward += reward
current_step += 1
if ('skip.stepcount' in info):
raise gym.error.Error('Key "skip.stepcount" already in info. Make sure you are not stacking the SkipWrapper wrappers.')
info['skip.stepcount'] = self.stepcount
info['skip.repeat_count'] = self.repeat_count
return (self.obs, total_reward, done, info)
def reset(self):
self.stepcount = 0
self.obs = self.env.reset()
return self.obs
return SkipWrapper
|
class VisdomMonitor(Monitor):
def __init__(self, env, directory, video_callable=None, force=False, resume=False, write_upon_reset=False, uid=None, mode=None, server='localhost', visdom_env='main', port=8097, visdom_log_file=None):
self.visdom_env = visdom_env
self.server = server
self.port = port
self.video_logger = None
self.visdom_log_file = visdom_log_file
super(VisdomMonitor, self).__init__(env, directory, video_callable=video_callable, force=force, resume=resume, write_upon_reset=write_upon_reset, uid=uid, mode=mode)
def step_physics(self, action):
self._before_step(action)
if hasattr(self.env, 'step_physics'):
(observation, reward, done, info) = self.env.step_physics(action)
else:
(observation, reward, done, info) = self.env.step(action)
done = self._after_step(observation, reward, done, info)
return (observation, reward, done, info)
def reset(self, **kwargs):
self._before_reset()
if ('save_replay_file_path' not in kwargs):
if self.enabled:
kwargs['save_replay_file_path'] = self.episode_base_path
try:
observation = self.env.reset(**kwargs)
except TypeError:
kwargs.pop('save_replay_file_path')
observation = self.env.reset(**kwargs)
self._after_reset(observation)
return observation
def reset_video_recorder(self):
if self.video_recorder:
self._close_video_recorder()
self.video_recorder = VideoRecorder(env=self.env, base_path=self.episode_base_path, metadata={'episode_id': self.episode_id}, enabled=self._video_enabled())
self._create_video_logger()
self.video_recorder.capture_frame()
def _create_video_logger(self):
self.video_logger = VisdomLogger('video', env=self.visdom_env, server=self.server, port=self.port, log_to_filename=self.visdom_log_file, opts={'title': 'env{}.ep{:06}'.format(self.file_infix, self.episode_id)})
def _close_video_recorder(self):
self.video_recorder.close()
if self.video_recorder.functional:
(path, metadata_path) = (self.video_recorder.path, self.video_recorder.metadata_path)
self.videos.append((path, metadata_path))
self.video_logger.log(videofile=path)
@property
def episode_base_path(self):
return os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.episode_id))
|
class VideoRecorder(video_recorder.VideoRecorder):
def _encode_image_frame(self, frame):
if (not self.encoder):
self.encoder = video_recorder.ImageEncoder(self.path, frame.shape, self.frames_per_sec)
self.metadata['encoder_version'] = self.encoder.version_info
try:
self.encoder.capture_frame(frame)
except error.InvalidFrame as e:
logger.warn('Tried to pass invalid video frame, marking as broken: %s', e)
self.broken = True
else:
self.empty = False
|
class SingleSensorModule(nn.Module):
def __init__(self, module, sensor_name):
super().__init__()
self.module = module
self.sensor_name = sensor_name
def __call__(self, obs):
return self.module(obs[self.sensor_name])
|
class ActorCriticModule(nn.Module):
def __init__(self):
super().__init__()
@property
def internal_state_size(self):
raise NotImplementedError('internal_state_size not implemented in abstract class LearnerModel')
@property
def output_size(self):
raise NotImplementedError('output_size not implemented in abstract class LearnerModel')
def forward(self, inputs, states, masks):
' value, actor_features, states = self.base(inputs, states, masks) '
raise NotImplementedError('forward not implemented in abstract class LearnerModel')
|
class NaivelyRecurrentACModule(ActorCriticModule):
' consists of a perception unit, a recurrent unit. \n The perception unit produces a state representation P of shape internal_state_shape \n The recurrent unit learns a function f(P) to generate a new internal_state\n The action and value should both be linear combinations of the internal state\n '
def __init__(self, perception_unit, use_gru=False, internal_state_size=512, perception_unit_class=None, perception_unit_kwargs={}):
super(NaivelyRecurrentACModule, self).__init__()
self._internal_state_size = internal_state_size
if use_gru:
self.gru = nn.GRUCell(input_size=internal_state_size, hidden_size=internal_state_size)
if (perception_unit is None):
self.perception_unit = eval(perception_unit_class)(**perception_unit_kwargs)
else:
self.perception_unit = perception_unit
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0))))
self.critic_linear = init_(nn.Linear(internal_state_size, 1))
@property
def internal_state_size(self):
return self._internal_state_size
@property
def output_size(self):
return self.internal_state_size
def forward(self, observations, internal_states, masks, cache=None):
' \n Returns:\n values: estimates of the values of new states\n dist_params: Something which paramaterizes the distribtion that gives action_log_probabilities\n internal_states: next states\n '
try:
x = self.perception_unit(observations, cache)
except:
x = self.perception_unit(observations)
if hasattr(self, 'gru'):
N = internal_states.size(0)
if (observations.size(0) == N):
x = internal_states = self.gru(x, (internal_states * masks))
else:
T = int((x.size(0) / N))
x = x.view(T, N, x.size(1))
masks = masks.view(T, N, 1)
outputs = []
for i in range(T):
hx = self.gru(x[i], (internal_states * masks[i]))
internal_states = hx
outputs.append(hx)
x = torch.stack(outputs, dim=0)
x = x.view((T * N), (- 1))
return (self.critic_linear(x), x, internal_states)
|
class ForwardInverseACModule(NaivelyRecurrentACModule):
' \n This Module adds a forward-inverse model on top of the perception unit. \n '
def __init__(self, perception_unit, forward_model, inverse_model, use_recurrency=False, internal_state_size=512):
super().__init__(perception_unit, use_recurrency, internal_state_size)
self.forward_model = forward_model
self.inverse_model = inverse_model
|
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, normalize_outputs=False, eval_only=True, train=False, stop_layer=None):
super(AlexNet, self).__init__()
assert (normalize_outputs == False), 'AlexNet cannot set normalize_outputs to True'
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
self.stop_layer = stop_layer
self.eval_only = eval_only
if self.eval_only:
self.eval()
def forward(self, x, stop_layer=19, cache={}):
if (self.stop_layer is not None):
stop_layer = self.stop_layer
if (stop_layer == 'conv5'):
stop_layer = 11
elif (stop_layer == 'fc7'):
stop_layer = 19
featuremodulelist = list(self.features.modules())[1:]
classifiermodulelist = list(self.classifier.modules())[1:]
if (stop_layer == None):
stop_layer = ((len(featuremodulelist) + 1) + len(classifiermodulelist))
for (i, l) in enumerate(featuremodulelist[:stop_layer]):
x = l(x)
if (stop_layer <= (len(featuremodulelist) - 1)):
return x
x = self.avgpool(x)
x = x.view(x.size(0), ((256 * 6) * 6))
if (stop_layer == len(featuremodulelist)):
return x
for (i, l) in enumerate(classifiermodulelist[:(stop_layer - (len(featuremodulelist) + 1))]):
x = l(x)
return x
def train(self, val):
if (val and self.eval_only):
warnings.warn("Ignoring 'train()' in TaskonomyEncoder since 'eval_only' was set during initialization.", RuntimeWarning)
else:
return super().train(val)
|
def alexnet(pretrained=False, load_path=None, **kwargs):
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = AlexNet(**kwargs)
if pretrained:
if (load_path is None):
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
else:
model.load_state_dict(torch.load(load_path))
return model
|
def alexnet_transform(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline1 = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), vision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def pipeline(im):
x = pipeline1(im)
return x
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def alexnet_features_transform(task_path, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
net = alexnet(pretrained=True, load_path=task_path).cuda()
net.eval()
def encode(x):
if (task_path == 'pixels_as_state'):
return x
with torch.no_grad():
return net(x, 'conv5')
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
if (task_path == 'pixels_as_state'):
raise NotImplementedError('pixels_as_state not defined for alexnet transform')
else:
return (pipeline, spaces.Box((- 1), 1, (256, 13, 13), dtype))
return _taskonomy_features_transform_thunk
|
class AlexNetFeaturesOnlyNet(nn.Module):
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512, extra_kwargs={}):
super(AlexNetFeaturesOnlyNet, self).__init__()
self.n_frames = n_frames
self.use_target = use_target
self.use_map = (n_map_channels > 0)
self.map_channels = n_map_channels
self.output_size = output_size
if self.use_map:
self.map_tower = atari_conv((self.n_frames * self.map_channels))
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.conv1 = nn.Conv2d((self.n_frames * (256 + self.target_channels)), 64, 4, stride=2)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear(((((32 * 7) * 7) * self.use_map) + (((64 * 5) * 5) * 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
def forward(self, x, cache={}):
x_taskonomy = x['taskonomy']
x_taskonomy = self.groupnorm(x_taskonomy)
if self.use_target:
x_taskonomy = torch.cat([x_taskonomy, x['target']], dim=1)
x_taskonomy = F.relu(self.conv1(x_taskonomy))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_taskonomy = torch.cat([self.flatten(x_taskonomy), self.flatten(x_map)], dim=1)
x = self.flatten(x_taskonomy)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
|
def atari_nature(num_inputs, num_outputs=512):
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
return nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(((32 * 7) * 7), num_outputs)), nn.ReLU())
|
def atari_conv(num_inputs):
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
return nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU())
|
def atari_small_conv(num_inputs):
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
return nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU())
|
def atari_match_conv(num_frames, num_inputs_per_frame):
num_inputs = (num_frames * num_inputs_per_frame)
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
return nn.Sequential(init_(nn.Conv2d(num_inputs, 64, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(64, (8 * num_frames), 5, stride=1)), nn.ReLU())
|
def atari_big_conv(num_frames, num_inputs_per_frame):
num_inputs = (num_frames * num_inputs_per_frame)
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
return nn.Sequential(init_(nn.Conv2d(num_inputs, 64, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(64, 64, 5, stride=1)), nn.ReLU(), init_(nn.Conv2d(64, (8 * num_frames), 5, stride=1)), nn.ReLU())
|
def atari_nature_vae(num_inputs, num_outputs=512):
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(((32 * 7) * 7), num_outputs)), nn.ReLU())
|
def is_cuda(model):
return next(model.parameters()).is_cuda
|
def task_encoder(checkpoint_path):
net = TaskonomyEncoder()
net.eval()
print(checkpoint_path)
if (checkpoint_path != None):
path_pth_ckpt = os.path.join(checkpoint_path)
checkpoint = torch.load(path_pth_ckpt)
net.load_state_dict(checkpoint['state_dict'])
return net
|
class AtariNet(nn.Module):
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512):
super(AtariNet, self).__init__()
self.n_frames = n_frames
self.use_target = use_target
self.use_map = (n_map_channels > 0)
self.map_channels = n_map_channels
self.output_size = output_size
if self.use_map:
self.map_tower = atari_conv(num_inputs=(self.n_frames * self.map_channels))
else:
self.map_channels = 0
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.image_tower = atari_small_conv(num_inputs=(self.n_frames * 3))
self.conv1 = nn.Conv2d((64 + (self.n_frames * self.target_channels)), 32, 3, stride=1)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear((((32 * 7) * 7) * (self.use_map + 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
def forward(self, x):
x_rgb = x['rgb_filled']
x_rgb = self.image_tower(x_rgb)
if self.use_target:
x_rgb = torch.cat([x_rgb, x['target']], dim=1)
x_rgb = F.relu(self.conv1(x_rgb))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_rgb = torch.cat([x_rgb, x_map], dim=1)
x = self.flatten(x_rgb)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
|
class AtariNatureEncoder(nn.Module):
' VAE encoder '
def __init__(self, img_channels, latent_size):
super().__init__()
self.latent_size = latent_size
self.img_channels = img_channels
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
self.conv1 = init_(nn.Conv2d(img_channels, 32, 8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))
self.flatten = Flatten()
self.fc1 = init_(nn.Linear(((32 * 7) * 7), latent_size))
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = F.relu(self.fc1(x))
return x
|
class FrameStacked(nn.Module):
def __init__(self, net, n_stack, parallel=False, max_parallel=50):
super().__init__()
self.net = net
self.n_stack = n_stack
self.parallel = parallel
self.max_parallel = max_parallel
def _prepare_inputs(self, x):
if isinstance(x, dict):
x_dict = {}
for k in x.keys():
x_dict[k] = torch.chunk(x[k], self.n_stack, dim=1)
xs = []
for i in range(self.n_stack):
dict_i = {k: v[i] for (k, v) in x_dict.items()}
xs.append(dict_i)
else:
xs = torch.chunk(x, self.n_stack, dim=1)
return xs
def forward(self, x):
xs = self._prepare_inputs(x)
if (self.parallel and (len(x) <= self.max_parallel)):
xs = torch.cat(xs, dim=0)
res = self.net(xs)
res = torch.chunk(res, self.n_stack, dim=0)
else:
res = [self.net(x) for x in xs]
out = torch.cat(res, dim=1)
return out
|
class MemoryFrameStacked(FrameStacked):
def __init__(self, net, n_stack, parallel=False, max_parallel=50, attrs_to_remember=[]):
super().__init__(net, n_stack, parallel=parallel, max_parallel=max_parallel)
self.attrs_to_remember = attrs_to_remember
def forward(self, x, cache=None):
xs = self._prepare_inputs(x)
res = []
if (cache is not None):
assert isinstance(cache, dict)
for name in self.attrs_to_remember:
if (name not in cache):
cache[name] = []
for x in xs:
res.append(self.net(x))
if (cache is not None):
for name in self.attrs_to_remember:
cache[name].append(getattr(self.net, name))
out = torch.cat(res, dim=1)
return out
|
class FramestackResnet(nn.Module):
def __init__(self, n_frames):
super(FramestackResnet, self).__init__()
self.n_frames = n_frames
self.resnet = resnet18(pretrained=True)
def forward(self, x):
assert ((x.shape[1] / 3) == self.n_frames), 'Dimensionality mismatch of input, is n_frames set right?'
num_observations = x.shape[0]
reshaped = x.reshape(((x.shape[0] * self.n_frames), 3, x.shape[2], x.shape[3]))
features = self.resnet(reshaped)
return features.reshape((num_observations, ((features.shape[0] * features.shape[1]) // num_observations)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.