code
stringlengths
17
6.64M
@ex.named_config def cifar10_data(): cfg = {'learner': {'lr': 0.1, 'optimizer_class': 'optim.SGD', 'optimizer_kwargs': {'momentum': 0.9, 'weight_decay': 0.0001}, 'lr_scheduler_method': 'optim.lr_scheduler.MultiStepLR', 'lr_scheduler_method_kwargs': {'milestones': [100, 150]}, 'max_grad_norm': None, 'use_feedback': False}, 'training': {'dataloader_fn': 'icifar_dataset.get_cifar_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar10', 'num_workers': 8, 'pin_memory': True, 'epochlength': 20000, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': [['rgb']], 'targets': [['cifar10']], 'masks': None, 'task_is_classification': [True], 'num_epochs': 1000}, 'saving': {'ticks_per_epoch': 5, 'log_interval': 1, 'save_interval': 200}}
@ex.named_config def icifar_data(): n_epochs = 4 n_classes = 100 n_tasks = N_TASKS n = (100 // n_tasks) chunked_classes = [] for i in range((((n_classes + n) - 1) // n)): chunked_classes.append(np.arange((i * n), ((i + 1) * n))) chunked_names = [[f'cifar{cs.min()}-{cs.max()}'] for cs in chunked_classes] cfg = {'training': {'dataloader_fn': 'icifar_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar100', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochlength': (5000 * n_epochs), 'epochs_until_cycle': 0, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': ([['rgb']] * len(chunked_classes)), 'targets': chunked_names, 'masks': None, 'task_is_classification': ([True] * len(chunked_classes)), 'num_epochs': N_TASKS}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 1, 'save_interval': 10}, 'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'dataset': 'icifar'}, 'use_feedback': False}} del n, n_tasks, n_classes, chunked_classes, i, chunked_names, n_epochs
@ex.named_config def icifar0_10_data(): cfg = {'training': {'dataloader_fn': 'icifar_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar100', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochlength': 20000, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': ([['rgb']] * N_TASKS), 'targets': ([['cifar0-9']] * N_TASKS), 'masks': None, 'task_is_classification': ([True] * N_TASKS)}}
@ex.named_config def cifar_hp(): uuid = 'no_uuid' cfg = {} cfg['learner'] = {'lr': 0.001, 'optimizer_kwargs': {'weight_decay': 0.0}}
@ex.named_config def debug_cifar100(): cfg = {'training': {'dataloader_fn_kwargs': {'epochlength': (50000 // 128)}}, 'learner': {'model_kwargs': {'num_classes': 100}}}
@ex.named_config def model_resnet_cifar(): cfg = {'learner': {'model': 'ResnetiCifar44'}, 'training': {'resume_from_checkpoint_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'resume_training': True}}
@ex.named_config def init_lowenergy_cifar(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN4Reshaped', 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar-lowenergy.pth'}}}
@ex.named_config def init_xavier(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_weights_path': None}}}
@ex.named_config def bsp_cifar(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', 'base_kwargs': {'bsp': True, 'period': 10}, 'side_kwargs': {'bsp': True, 'period': 10}}}}}
@ex.named_config def bsp_norecurse_cifar(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', 'base_kwargs': {'bsp': True, 'period': 10}}}}
@ex.named_config def bsp_debug(): cfg = {'learner': {'model_kwargs': {'base_kwargs': {'bsp': True, 'debug': True}}}}
@ex.named_config def model_boosted_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'BoostedNetwork', 'model_kwargs': {'base_class': None, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_boosted_wbase_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'BoostedNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_resnet_icifar0_10(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': None, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_lifelong_independent_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'GenericSidetuneNetwork', 'side_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth'}, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}}}} del n_channels_out
@ex.named_config def model_lifelong_independent_resnet_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'ResnetiCifar44NoLinear', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_lifelong_independent_fcn4_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_lifelong_finetune_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth'}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}}}} del n_channels_out
@ex.named_config def model_lifelong_finetune_resnet44_cifar(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None}}}
@ex.named_config def model_lifelong_finetune_fcn4_cifar(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN4Reshaped', 'base_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None}}}
@ex.named_config def model_lifelong_sidetune_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None}}} del n_channels_out
@ex.named_config def model_lifelong_features_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None}}} del n_channels_out
@ex.named_config def pnn_v2_cifar(): cfg = {'learner': {'model_kwargs': {'base_class': 'ResnetiCifar44NoLinearWithCache', 'side_class': 'FCN4Progressive', 'side_kwargs': {}, 'pnn': True}}}
@ex.named_config def pnn_v4_cifar(): cfg = {'learner': {'model_kwargs': {'base_class': 'ResnetiCifar44NoLinearWithCache', 'side_class': 'FCN4ProgressiveH', 'side_kwargs': {}, 'pnn': True}}}
@ex.named_config def model_lifelong_sidetune_reverse_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN4Reshaped', 'base_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'ResnetiCifar44NoLinear', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def model_lifelong_sidetune_double_resnet_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'ResnetiCifar44NoLinear', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
@ex.named_config def imitation_learning(): cfg = {} cfg['learner'] = {'model': 'PolicyWithBase', 'lr': 0.0002, 'optimizer_kwargs': {'weight_decay': 3.8e-07}, 'model_kwargs': {'base': None, 'action_space': spaces.Discrete(3), 'base_class': 'NaivelyRecurrentACModule', 'base_kwargs': {'use_gru': False, 'internal_state_size': 512, 'perception_unit': None, 'perception_unit_class': 'RLSidetuneWrapper', 'perception_unit_kwargs': {'n_frames': 4, 'n_map_channels': 3, 'use_target': True, 'blind': False, 'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': None}}}}}} cfg['training'] = {'sources': ['rgb_filled', 'map', 'target', 'taskonomy'], 'sources_as_dict': True, 'targets': ['action']}
@ex.named_config def expert_data(): cfg = {'training': {'dataloader_fn': 'expert_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/large', 'num_frames': 4, 'load_to_mem': False, 'num_workers': 8, 'pin_memory': False, 'batch_size': 64, 'batch_size_val': 64, 'remove_last_step_in_traj': True}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False}}
@ex.named_config def il_source_rmtt(): cfg = {} cfg['training'] = {'sources': ['rgb_filled', 'map', 'target', 'taskonomy'], 'sources_as_dict': True}
@ex.named_config def il_source_rmt(): cfg = {} cfg['training'] = {'sources': ['rgb_filled', 'map', 'target'], 'sources_as_dict': True}
@ex.named_config def il_blind(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'side_class': None, 'side_weights_path': None, 'side_kwargs': {}}}}}}} cfg['training'] = {'sources': ['map', 'target']}
@ex.named_config def il_sidetune(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': None, 'alpha_blend': True}, 'attrs_to_remember': ['base_encoding', 'side_output', 'merged_encoding']}}}}}
@ex.named_config def il_debug(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/tiny'}, 'num_epochs': 10}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 5, 'save_interval': 5}}
@ex.named_config def il_tiny(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/tiny'}, 'num_epochs': 3000}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 500, 'save_interval': 100}}
@ex.named_config def il_small(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/small'}, 'num_epochs': 600}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 20, 'save_interval': 20}}
@ex.named_config def il_medium(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/medium'}, 'num_epochs': 100}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 10, 'save_interval': 10}}
@ex.named_config def il_large(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/large'}, 'num_epochs': 12}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 1, 'save_interval': 1}}
@ex.named_config def il_largeplus(): cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/largeplus'}, 'num_epochs': 5}, 'saving': {'ticks_per_epoch': 100, 'log_interval': 0.1, 'save_interval': 1}}
@ex.named_config def ilgsn_base_resnet50(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}}}}}}}
@ex.named_config def ilgsn_base_fcn5(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_class': 'FCN5', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}}}}}}}
@ex.named_config def ilgsn_base_learned(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_kwargs': {'eval_only': False}}}}}}}
@ex.named_config def ilgsn_side_resnet50(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_class': 'TaskonomyEncoder', 'side_weights_path': None, 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}}}}}}}
@ex.named_config def ilgsn_side_fcn5(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_class': 'FCN5', 'side_weights_path': None, 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}}}}}}}
@ex.named_config def ilgsn_no_side(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_class': None, 'side_weights_path': None, 'side_kwargs': {}, 'alpha_blend': False}}}}}}
@ex.named_config def ilgsn_side_frozen(): cfg = {} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'side_kwargs': {'eval_only': True}}}}}}
@ex.named_config def alpha_blend(): cfg = {} cfg['learner'] = {'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'alpha_blend': True}}}, 'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'alpha_blend': True}}}}}}
@ex.named_config def alpha8(): cfg = {} cfg['learner'] = {'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'alpha_blend': True, 'alpha_kwargs': {'init_value': 1.39}}}}, 'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'alpha_blend': True, 'alpha_kwargs': {'init_value': 1.39}}}}}}}
@ex.named_config def dreg_il(): cfg = {} cfg['training'] = {'regularizer_fn': 'perceptual_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'decoder_path': '/mnt/models/curvature_decoder.dat', 'use_transfer': False, 'reg_loss_fn': 'F.mse_loss'}, 'loss_list': ['standard', 'final', 'weight_tying'], 'batch_size': 16} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'merged_encoding', 'side_output']}}}}}
@ex.named_config def treg_il(): cfg = {} cfg['training'] = {'regularizer_fn': 'transfer_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'reg_loss_fn': 'F.l1_loss'}, 'loss_list': ['standard', 'final', 'weight_tying']} cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'transfered_encoding']}}}}}
@ex.named_config def expert(): uuid = 'habitat_expert' cfg = {} override = {} cfg['learner'] = {'algo': 'expert', 'algo_class': 'Expert', 'algo_kwargs': {'data_dir': '/mnt/data/expert_trajs/large', 'compare_with_saved_trajs': False, 'follower': None}} cfg['env'] = {'env_name': 'Habitat_PointNav'} override['env'] = {'num_processes': 1, 'num_val_processes': 1, 'env_specific_kwargs': {'debug_mode': True}}
@ex.named_config def taskonomy_features(): " Implements an agent with some mid-level feature.\n From the paper:\n From Learning to Navigate Using Mid-Level Visual Priors (Sax et al. '19)\n Taskonomy: Disentangling Task Transfer Learning\n Amir R. Zamir, Alexander Sax*, William B. Shen*, Leonidas Guibas, Jitendra Malik, Silvio Savarese.\n 2018\n Viable feature options are:\n []\n " uuid = 'habitat_taskonomy_feature' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet'}}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'taskonomy': 'rescale_centercrop_resize((3,256,256))'}}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'taskonomy': "taskonomy_features_transform('/mnt/models/curvature_encoder.dat')"}, 'keep_unnamed': True}}
@ex.named_config def blind(): ' Implements a blinded agent. This has no visual input, but is still able to reason about its movement\n via path integration.\n ' uuid = 'blind' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet'} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'taskonomy': 'blind((8,16,16))'}}}
@ex.named_config def midtune(): uuid = 'habitat_midtune' cfg = {} cfg['learner'] = {'perception_network_reinit': True, 'rollout_value_batch_multiplier': 1, 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': 'FCN5', 'base_kwargs': {'normalize_outputs': False}, 'base_weights_path': None, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None}}}} cfg['saving'] = {'checkpoint': None} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,256,256))'}}}
@ex.named_config def finetune(): uuid = 'habitat_finetune' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None}}}, 'rollout_value_batch_multiplier': 1} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,256,256))'}}}
@ex.named_config def sidetune(): uuid = 'habitat_sidetune' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None, 'alpha_blend': True}, 'attrs_to_remember': ['base_encoding', 'side_output', 'merged_encoding']}}, 'rollout_value_batch_multiplier': 1} cfg['env'] = {'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,256,256))'}}}
@ex.named_config def rlgsn_base_resnet50(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}}}}}
@ex.named_config def rlgsn_base_fcn5s(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_class': 'FCN5', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}}}}}
@ex.named_config def rlgsn_base_learned(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_kwargs': {'eval_only': False}}}}}
@ex.named_config def rlgsn_side_resnet50(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_class': 'TaskonomyEncoder', 'side_weights_path': None, 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}}}}}
@ex.named_config def rlgsn_side_fcn5s(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_class': 'FCN5', 'side_weights_path': None, 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}}}}}
@ex.named_config def rlgsn_side_frozen(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'side_kwargs': {'eval_only': True}}}}}
@ex.named_config def alexnet(): ' Implements an agent with some alexnet features. ' uuid = 'habitat_alexnet_feature' cfg = {} cfg['learner'] = {'perception_network': 'AlexNetFeaturesOnlyNet', 'perception_network_kwargs': {'extra_kwargs': {'main_perception_network': 'AlexNetFeaturesOnlyNet'}}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 13}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'taskonomy': 'alexnet_transform((3, 224, 224))'}}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'taskonomy': "alexnet_features_transform('{load_path}')".format(load_path='/mnt/models/alexnet-owt-4df8aa71.pth')}, 'keep_unnamed': True}}
@ex.named_config def slam(): uuid = 'habitat_slam' cfg = {} cfg['learner'] = {'algo': 'slam', 'num_stack': 1, 'slam_class': 'DepthMapperAndPlanner', 'slam_kwargs': {'map_size_cm': 1200, 'mark_locs': False, 'reset_if_drift': False, 'count': (- 1), 'close_small_openings': False, 'recover_on_collision': False, 'fix_thrashing': False, 'goal_f': 1.1, 'point_cnt': 2}} cfg['env'] = {'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'scenario_kwargs': {'use_depth': True}}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'rgb_filled':identity_transform(),\n 'depth':identity_transform(),\n 'pointgoal':identity_transform(),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(taskonomy_encoder='/mnt/models/normal_encoder.dat')} override = {} override['env'] = {'num_processes': 1, 'num_val_processes': 1, 'env_specific_kwargs': {'debug_mode': True}}
@ex.named_config def slam_estimated(): uuid = 'habitat_slam_estimated_depth' cfg = {} cfg['learner'] = {'algo': 'slam', 'num_stack': 1, 'slam_class': 'TaskonomyDepthMapperAndPlanner', 'slam_kwargs': {'map_size_cm': 1200, 'out_dir': None, 'mark_locs': True, 'reset_if_drift': True, 'count': (- 1), 'close_small_openings': True, 'recover_on_collision': False, 'fix_thrashing': False, 'goal_f': 1.1, 'point_cnt': 2, 'depth_estimator_kwargs': {'load_encoder_path': '/mnt/models/depth_euclidean_encoder.dat', 'load_decoder_path': '/mnt/models/depth_euclidean_decoder.dat'}}} cfg['env'] = {'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'scenario_kwargs': {'use_depth': False}}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'rgb_filled':identity_transform(),\n 'pointgoal':identity_transform(),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(taskonomy_encoder='/mnt/models/normal_encoder.dat')} override = {} override['env'] = {'num_processes': 1, 'num_val_processes': 1, 'env_specific_kwargs': {'debug_mode': True}}
@ex.named_config def srl_features(): ' Implements an agent with some alexnet features. ' uuid = 'habitat_alexnet_feature' cfg = {} cfg['learner'] = {'perception_network': 'BaseModelAutoEncoder', 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 6}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy': rescale_centercrop_resize((3,224,224)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':srl_features_transform('{load_path}'),\n 'map':identity_transform(),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(load_path='/mnt/share/midlevel_control/baselines/srl_models/HabitatPlanning/forward_inverse/srl_model.pth')}
@ex.named_config def curiosity(): uuid = 'habitat_curiosity' cfg = {} cfg['learner'] = {'curiosity_reward_coef': 0.1, 'forward_loss_coef': 0.2, 'inverse_loss_coef': 0.8}
@ex.named_config def scratch(): uuid = 'habitat_scratch_map' cfg = {} cfg['learner'] = {'perception_network': 'AtariNet'} cfg['env'] = {'env_specific_kwargs': {'target_dim': 9}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,84,84))'}}}
@ex.named_config def all_features_resnet(): features_list_resnet = ['denoising', 'egomotion', 'fixated_pose', 'jigsaw', 'nonfixated_pose', 'point_matching', 'room_layout', 'segment_unsup25d', 'segment_unsup2d', 'segment_semantic', 'class_scene', 'inpainting', 'vanishing_point', 'autoencoding', 'class_object', 'curvature', 'denoising', 'depth_euclidean', 'depth_zbuffer', 'edge_occlusion', 'edge_texture', 'keypoints2d', 'keypoints3d', 'normal', 'reshading'] features_list_distil = [] features_paths = ([f'/mnt/models/{feat}_encoder.dat' for feat in features_list_resnet] + [f'/mnt/models/{feat}-distilled.pth' for feat in features_list_distil]) uuid = 'many_features' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': len(features_paths)}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=features_paths)} del features_list_resnet, features_list_distil, features_paths
@ex.named_config def all_features(): features_list_resnet = ['denoising', 'egomotion', 'fixated_pose', 'jigsaw', 'nonfixated_pose', 'point_matching', 'room_layout', 'segment_unsup25d', 'segment_unsup2d', 'segment_semantic', 'class_scene', 'inpainting', 'vanishing_point'] features_list_distil = ['autoencoding', 'class_object', 'curvature', 'denoising', 'depth_euclidean', 'depth_zbuffer', 'edge_occlusion', 'edge_texture', 'keypoints2d', 'keypoints3d', 'normal', 'reshading'] features_paths = ([f'/mnt/models/{feat}_encoder.dat' for feat in features_list_resnet] + [f'/mnt/models/{feat}-distilled.pth' for feat in features_list_distil]) uuid = 'many_features' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': len(features_paths)}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=features_paths)} del features_list_resnet, features_list_distil, features_paths
@ex.named_config def many_features(): features_list_resnet = ['fixated_pose', 'jigsaw', 'random', 'room_layout', 'segment_unsup25d', 'segment_unsup2d', 'segment_semantic', 'class_scene'] features_list_distil = ['autoencoding', 'class_object', 'curvature', 'denoising', 'depth_euclidean', 'depth_zbuffer', 'edge_occlusion', 'keypoints3d', 'normal', 'reshading'] features_paths = ([f'/mnt/models/{feat}_encoder.dat' for feat in features_list_resnet] + [f'/mnt/models/{feat}-distilled.pth' for feat in features_list_distil]) uuid = 'many_features' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': len(features_paths)}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=features_paths)} del features_list_resnet, features_list_distil, features_paths
@ex.named_config def max_coverage_perception(): ' Implements an agent with a Max-Coverage Min-Distance Featureset\n From the paper:\n Mid-Level Visual Representations Improve Generalization and Sample Efficiency for Learning Visuomotor Policies\n Alexander Sax, Bradley Emi, Amir R. Zamir, Silvio Savarese, Leonidas Guibas, Jitendra Malik.\n 2018\n ' uuid = 'habitat_max_coverage_featureset' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': 4}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=['/mnt/models/normal_encoder.dat', '/mnt/models/keypoints2d_encoder.dat', '/mnt/models/segment_unsup2d_encoder.dat', '/mnt/models/segment_semantic_encoder.dat'])}
@ex.named_config def max_coverage_perception3(): ' Implements an agent with a Max-Coverage Min-Distance Featureset\n From the paper:\n Mid-Level Visual Representations Improve Generalization and Sample Efficiency for Learning Visuomotor Policies\n Alexander Sax, Bradley Emi, Amir R. Zamir, Silvio Savarese, Leonidas Guibas, Jitendra Malik.\n 2018\n ' uuid = 'habitat_max_coverage_featureset' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': 3}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=['/mnt/models/edge_texture_encoder.dat', '/mnt/models/curvature_encoder.dat', '/mnt/models/reshading_encoder.dat'])}
@ex.named_config def max_coverage_perception2(): ' Implements an agent with a Max-Coverage Min-Distance Featureset\n From the paper:\n Mid-Level Visual Representations Improve Generalization and Sample Efficiency for Learning Visuomotor Policies\n Alexander Sax, Bradley Emi, Amir R. Zamir, Silvio Savarese, Leonidas Guibas, Jitendra Malik.\n 2018\n ' uuid = 'habitat_max_coverage_featureset' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True, 'num_tasks': 2}} cfg['env'] = {'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_multi_features_transform({encoder_paths}),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder_paths=['/mnt/models/segment_unsup2d_encoder.dat', '/mnt/models/segment_unsup25d_encoder.dat'])}
@ex.named_config def taskonomy_features_nomap(): ' Implements an agent with some mid-level feature.\n From the paper:\n Taskonomy: Disentangling Task Transfer Learning\n Amir R. Zamir, Alexander Sax*, William B. Shen*, Leonidas Guibas, Jitendra Malik, Silvio Savarese.\n 2018\n Viable feature options are:\n []\n ' uuid = 'habitat_taskonomy_feature' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_features_transform('{taskonomy_encoder}'),\n 'target':identity_transform(),\n 'map':blind((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(taskonomy_encoder='/mnt/models/normal_encoder.dat')}
@ex.named_config def scratch_nomap(): uuid = 'habitat_scratch_map' cfg = {} cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 9}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'map':blind((3,84,84)),\n 'rgb_filled':rescale_centercrop_resize((3,84,84)),\n 'target':identity_transform(),\n 'global_pos':identity_transform(),\n },\n keep_unnamed=False)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': None}
@ex.named_config def blind_nomap(): ' Implements a blinded agent. This has no visual input, but is still able to reason about its movement\n via path integration.\n ' uuid = 'blind' cfg = {} cfg['learner'] = {'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy': blind((8,16,16)),\n 'target': identity_transform(),\n 'map': blind((3, 84, 84)),\n 'global_pos':identity_transform(),\n },\n keep_unnamed=False)\n ".translate(remove_whitespace)}
@ex.named_config def buildings1(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:1]}}}
@ex.named_config def buildings2(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:2]}}}
@ex.named_config def buildings4(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:4]}}}
@ex.named_config def buildings8(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:8]}}}
@ex.named_config def buildings16(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:16]}}}
@ex.named_config def buildings32(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings[:32]}}}
@ex.named_config def buildings72(): cfg = {'env': {'env_specific_kwargs': {'train_scenes': all_buildings}}}
@ex.named_config def short3m(): uuid = 'habitat_planning_3m' cfg = {} cfg['env'] = {'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'scenario_kwargs': {'max_geodesic_dist': 3}}}
@ex.named_config def short5m(): uuid = 'habitat_planning_5m' cfg = {} cfg['env'] = {'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'scenario_kwargs': {'max_geodesic_dist': 5}}}
@ex.named_config def short7m(): uuid = 'habitat_planning_7m' cfg = {} cfg['env'] = {'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'scenario_kwargs': {'max_geodesic_dist': 7}}}
@ex.named_config def taskonomy_decoding(): ' Implements an agent with some mid-level decoding.\n From the paper:\n Taskonomy: Disentangling Task Transfer Learning\n Amir R. Zamir, Alexander Sax*, William B. Shen*, Leonidas Guibas, Jitendra Malik, Silvio Savarese.\n 2018\n Viable feature options are:\n []\n ' uuid = 'habitat_taskonomy_decoding' cfg = {} cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 9}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'rgb_filled': rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'rgb_filled':cross_modal_transform(TaskonomyNetwork(load_encoder_path='{encoder}', load_decoder_path='{decoder}').cuda()),\n 'taskonomy': identity_transform(),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(encoder='/mnt/models/normal_encoder.dat', decoder='/mnt/models/normal_decoder.dat')}
@ex.named_config def cfg_taskonomy_decoding(): cfg = {} cfg['env'] = {'collate_env_obs': False, 'transform_fn': "\n TransformFactory.independent(\n {{\n 'rgb_filled':cross_modal_transform(TaskonomyNetwork(load_encoder_path='/mnt/models/normal_encoder.dat',\n load_decoder_path='/mnt/models/normal_decoder.dat').cuda().eval(),\n (3,{image_dim}, {image_dim})),\n 'target':identity_transform()\n }},\n keep_unnamed=False)\n ".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=84)} cfg['learner'] = {'perception_network': 'scratch'}
@ex.named_config def cfg_taskonomy_decoding_collate(): image_dim = 84 cfg = {} cfg['learner'] = {'perception_network': 'scratch', 'taskonomy_encoder': '/mnt/models/normal_encoder.dat', 'encoder_type': 'taskonomy'} cfg['env'] = {'collate_env_obs': True, 'transform_fn': "\n TransformFactory.independent(\n {{\n 'rgb_filled':cross_modal_transform_collated(TaskonomyNetwork(\n load_encoder_path='/mnt/models/normal_encoder.dat',\n load_decoder_path='/mnt/models/normal_decoder.dat').cuda().eval(),\n (3,{image_dim},{image_dim})),\n 'target':identity_transform()\n }},\n keep_unnamed=False)".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=image_dim)}
@ex.named_config def cfg_unet_decoding(): image_dim = 84 cfg = {} cfg['env'] = {'collate_env_obs': False, 'transform_fn': "\n TransformFactory.independent(\n {{\n 'rgb_filled':cross_modal_transform(load_from_file(\n UNet(),\n '/mnt/logdir/homoscedastic_normal_regression-checkpoints-ckpt-4.dat').cuda().eval(),\n (3,{image_dim}, {image_dim})),\n 'target':identity_transform()\n }},\n keep_unnamed=False)".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=image_dim)} cfg['learner'] = {'perception_network': 'scratch'}
@ex.named_config def treg_l1(): uuid = 'habitat_regularization_transfer_l1' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'transfered_encoding']}}, 'loss_kwargs': {'intrinsic_loss_coefs': [0.1], 'intrinsic_loss_types': ['transfer_l1']}}
@ex.named_config def treg_l2(): uuid = 'habitat_regularization_transfer_l2' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'transfered_encoding']}}, 'loss_kwargs': {'intrinsic_loss_coefs': [0.1], 'intrinsic_loss_types': ['transfer_l2']}}
@ex.named_config def dreg_t(): uuid = 'habitat_regularization_perceptual_transfer' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'transfered_encoding']}}, 'loss_kwargs': {'intrinsic_loss_coefs': [0.1], 'intrinsic_loss_types': ['perceptual_transfer'], 'decoder_path': '/mnt/models/curvature_decoder.dat'}}
@ex.named_config def dreg(): uuid = 'habitat_regularization_perceptual' cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'attrs_to_remember': ['base_encoding', 'merged_encoding', 'side_output']}}, 'loss_kwargs': {'loss_fn': 'F.mse_loss', 'intrinsic_loss_coefs': [0.1], 'intrinsic_loss_types': ['perceptual'], 'decoder_path': '/mnt/models/curvature_decoder.dat'}}
@ex.named_config def unorm_t_only(): cfg = {} cfg['learner'] = {'perception_network_kwargs': {'extra_kwargs': {'normalize_taskonomy': False}}}
@ex.named_config def features_double(): cfg = {} cfg['learner'] = {'perception_network_kwargs': {'extra_kwargs': {'features_double': True}}}
@ex.named_config def transform_rgb256(): cfg = {} cfg['env'] = {'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,256,256))'}}}
@ex.named_config def taskonomy_base_data(): cfg = {'training': {'dataloader_fn': 'taskonomy_dataset.get_lifelong_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/tiny', 'split': 'tiny', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochs_per_task': epochs_per_task, 'epochs_until_cycle': 0, 'batch_size': 32, 'batch_size_val': 64}}, 'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'dataset': 'taskonomy'}, 'optimizer_class': 'optim.Adam', 'lr': 0.0001, 'optimizer_kwargs': {'weight_decay': 2e-06}, 'use_feedback': False}, 'saving': {'ticks_per_epoch': 100, 'log_interval': (1 / epochs_per_task), 'save_interval': 1}}
@ex.named_config def taskonomy_3_data(): uuid = 'louis_taskonomy' n_tasks = 3 cfg = {'training': {'num_epochs': 3, 'dataloader_fn_kwargs': {'split': 'debug2'}, 'loss_fn': ['weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss'], 'loss_kwargs': ([{}] * n_tasks), 'sources': ([['rgb']] * n_tasks), 'targets': [['normal'], ['rgb'], ['normal']], 'masks': [True, True, True], 'use_masks': [True, True, True], 'task_is_classification': [False, False, False]}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks)), 'task_specific_side_kwargs': [{} for _ in range(n_tasks)], 'task_specific_transfer_kwargs': [{'out_channels': 3, 'is_decoder_mlp': False}, {'out_channels': 3, 'is_decoder_mlp': False}, {'out_channels': 3, 'is_decoder_mlp': False}]}}} del n_tasks
@ex.named_config def taskonomy_shuffle3_data(): tasks = ['normal', 'rgb', 'normal'] random.shuffle(tasks) n_tasks = 3 cfg = {'training': {'num_epochs': n_tasks, 'dataloader_fn_kwargs': {'split': 'debug2'}, 'sources': ([['rgb']] * n_tasks)}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks))}}} cfg['training']['targets'] = [[t] for t in tasks] for task in tasks: cfg_task = tasks_to_kwargs[task] cfg = append_dict(cfg, cfg_task, stop_recurse_keys=stop_recurse_keys) del task, cfg_task, tasks, n_tasks