code stringlengths 17 6.64M |
|---|
@ex.named_config
def taskonomy_12_data():
n_tasks = 12
cfg = {'training': {'num_epochs': n_tasks, 'loss_fn': ['weighted_l2_loss', 'softmax_cross_entropy', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'weighted_l1_loss', 'dense_cross_entropy', 'weighted_l1_loss'], 'loss_kwargs': ([{}] * n_tasks), 'sources': ([['rgb']] * n_tasks), 'targets': [['curvature'], ['segment_semantic'], ['reshading'], ['keypoints3d'], ['keypoints2d'], ['edge_texture'], ['edge_occlusion'], ['depth_zbuffer'], ['depth_euclidean'], ['normal'], ['class_object'], ['rgb']], 'masks': [True, False, True, True, True, True, True, True, True, True, False, True], 'use_masks': [True, False, True, True, True, True, True, True, True, True, False, True], 'task_is_classification': [False, False, False, False, False, False, False, False, False, False, True, False]}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks)), 'task_specific_side_kwargs': [{} for _ in range(n_tasks)], 'task_specific_transfer_kwargs': [{'out_channels': 2, 'is_decoder_mlp': False}, {'out_channels': 18, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 1, 'is_decoder_mlp': False}, {'out_channels': 3, 'is_decoder_mlp': False}, {'out_channels': 1000, 'is_decoder_mlp': True}, {'out_channels': 3, 'is_decoder_mlp': False}]}}}
del n_tasks
|
@ex.named_config
def taskonomy_shuffle12_data():
random.shuffle(tasks)
n_tasks = len(tasks)
cfg = {'training': {'num_epochs': n_tasks, 'sources': ([['rgb']] * n_tasks)}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks))}}}
cfg['training']['targets'] = [[t] for t in tasks]
for task in tasks:
cfg_task = tasks_to_kwargs[task]
cfg = append_dict(cfg, cfg_task, stop_recurse_keys=stop_recurse_keys)
del task, cfg_task, n_tasks
|
@ex.named_config
def taskonomy_12cls_data():
order = [11, 7, 0, 2, 8, 9, 5, 10, 4, 1, 3, 6]
tasks = list(tasks_np[order])
n_tasks = len(tasks)
cfg = {'training': {'num_epochs': n_tasks, 'sources': ([['rgb']] * n_tasks)}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks))}}}
cfg['training']['targets'] = [[t] for t in tasks]
for task in tasks:
cfg_task = tasks_to_kwargs[task]
cfg = append_dict(cfg, cfg_task, stop_recurse_keys=stop_recurse_keys)
del task, cfg_task, n_tasks, order, tasks
|
@ex.named_config
def taskonomy_12txtr_data():
order = [5, 8, 9, 10, 1, 3, 6, 2, 11, 4, 0, 7]
tasks = list(tasks_np[order])
n_tasks = len(tasks)
cfg = {'training': {'num_epochs': n_tasks, 'sources': ([['rgb']] * n_tasks)}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks))}}}
cfg['training']['targets'] = [[t] for t in tasks]
for task in tasks:
cfg_task = tasks_to_kwargs[task]
cfg = append_dict(cfg, cfg_task, stop_recurse_keys=stop_recurse_keys)
del task, cfg_task, n_tasks, order, tasks
|
@ex.named_config
def taskonomy_12rgb_data():
order = [1, 0, 5, 7, 2, 6, 4, 3, 9, 10, 8, 11]
tasks = list(tasks_np[order])
n_tasks = len(tasks)
cfg = {'training': {'num_epochs': n_tasks, 'sources': ([['rgb']] * n_tasks)}, 'learner': {'model_kwargs': {'tasks': list(range(n_tasks))}}}
cfg['training']['targets'] = [[t] for t in tasks]
for task in tasks:
cfg_task = tasks_to_kwargs[task]
cfg = append_dict(cfg, cfg_task, stop_recurse_keys=stop_recurse_keys)
del task, cfg_task, n_tasks, order, tasks
|
@ex.named_config
def taskonomy_louis_gtnormal_data():
cfg = {'training': {'sources': ([['rgb', 'normal']] * N_TASKONOMY_TASKS)}, 'learner': {'model_kwargs': {'base_class': 'SampleGroupStackModule', 'base_weights_path': None, 'base_uses_other_sensors': True}}}
|
@ex.named_config
def taskonomy_gtcurv_data():
cfg = {'training': {'sources': ([['rgb', 'curvature']] * 12)}, 'learner': {'model_kwargs': {'base_class': 'SampleGroupStackModule', 'base_weights_path': None, 'base_uses_other_sensors': True}}}
|
@ex.named_config
def debug2():
cfg = {'training': {'dataloader_fn_kwargs': {'split': 'debug2'}}}
|
@ex.named_config
def model_lifelong_independent_std_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'GenericSidetuneNetwork', 'side_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'TaskonomyEncoder', 'base_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'use_baked_encoding': False, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder_student.dat'}, 'normalize_pre_transfer': True}}, 'training': {'dataloader_fn': 'taskonomy_dataset.get_lifelong_dataloaders', 'dataloader_fn_kwargs': {'speedup_no_rigidity': True, 'batch_size': 16}}}
|
@ex.named_config
def projected():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'GenericSidetuneNetwork', 'side_kwargs': {'base_kwargs': {'projected': True}, 'side_kwargs': {'projected': True}}}}}
|
@ex.named_config
def model_lifelong_sidetune_std_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'normalize_pre_transfer': True, 'merge_method': 'merge_operators.Alpha'}}, 'training': {'dataloader_fn_kwargs': {'speedup_no_rigidity': True}}}
|
@ex.named_config
def model_lifelong_sidetune_nobase_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': None, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}, 'training': {'dataloader_fn_kwargs': {'speedup_no_rigidity': True}}}
|
@ex.named_config
def model_lifelong_finetune_std_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'TaskonomyEncoder', 'base_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'use_baked_encoding': False, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder_student.dat'}, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_features_std_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': None, 'side_weights_path': None, 'normalize_pre_transfer': True}}, 'training': {'dataloader_fn_kwargs': {'speedup_no_rigidity': True}}}
|
@ex.named_config
def pnn_v4_mlp():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveH', 'pnn': True, 'dense': True, 'merge_method': 'merge_operators.MLP'}}}
|
@ex.named_config
def model_learned_decoder():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'transfer_class': 'PreTransferedDecoder', 'transfer_kwargs': {'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8, 'residual': True}, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'eval_only': False}}}}}
|
@ex.named_config
def full_feedback1():
cfg = {'learner': {'use_feedback': True, 'feedback_kwargs': {'num_feedback_iter': 5}, 'model_kwargs': {'side_class': 'FCN5MidFeedback', 'merge_method': 'side_only', 'side_kwargs': {'kernel_size': 1}}}}
|
@ex.named_config
def with_feedback1():
cfg = {'learner': {'model_kwargs': {'merge_method': 'side_only', 'side_class': 'FCN5LateFeedback', 'side_kwargs': {'kernel_size': 1}}}}
|
@ex.named_config
def with_feedback3():
cfg = {'learner': {'model_kwargs': {'merge_method': 'side_only', 'side_class': 'FCN5LateFeedback', 'side_kwargs': {'kernel_size': 3}}}}
|
@ex.named_config
def taskonomy_single_data():
cfg = {'training': {'dataloader_fn': 'taskonomy_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/', 'train_folders': 'debug', 'val_folders': 'debug', 'test_folders': 'debug', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True}}}
|
@ex.named_config
def model_taskonomy():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_taskonomy_class():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 1000, 'trainable': True, 'is_decoder_mlp': True}}, 'training': {'sources': ['rgb'], 'targets': ['class_object']}}
|
@ex.named_config
def init_lowenergy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student_lowenergy.dat'}}}
|
@ex.named_config
def fcn8():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'GenericSidetuneNetwork', 'side_kwargs': {'base_class': None, 'base_weights_path': None, 'side_class': 'FCN8', 'side_weights_path': '/mnt/models/curvature_base_fcn8.dat'}}}, 'training': {'dataloader_fn_kwargs': {'batch_size': 32}}}
|
@ex.named_config
def model_frozen_decoder():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'transfer_class': 'PreTransferedDecoder', 'transfer_kwargs': {'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8, 'residual': True}, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'eval_only': True, 'train': False}}}}}
|
@ex.named_config
def model_lifelong_independent_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'TaskonomyEncoder', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_independent_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'normalize_pre_transfer': True}}}
|
@ex.named_config
def ensemble_side():
cfg = {'learner': {'model_kwargs': {'side_class': 'EnsembleNet', 'side_weights_path': None, 'side_kwargs': {'n_models': 3, 'model_class': 'FCN5', 'model_weights_path': '/mnt/models/curvature_encoder_student.dat'}}}}
|
@ex.named_config
def model_lifelong_sidetune_double_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': True, 'train': False, 'normalize_outputs': False}, 'use_baked_encoding': True, 'side_class': 'TaskonomyEncoder', 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}, 'training': {'sources': ([['rgb', 'curvature_encoding']] * N_TASKONOMY_TASKS)}}
|
@ex.named_config
def model_lifelong_sidetune_double_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': '/mnt/models/curvature_encoder_student.dat', 'base_kwargs': {'eval_only': True, 'train': False, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_sidetune_double_open_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'TaskonomyEncoder', 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_sidetune_double_open_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': '/mnt/models/curvature_encoder_student.dat', 'base_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_finetune_double_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'FCN5', 'base_weights_path': '/mnt/models/curvature_encoder_student.dat', 'base_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}}, 'use_baked_encoding': False, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def model_lifelong_finetune_double_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'normalize_pre_transfer': False, 'side_class': 'TaskonomyEncoder', 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}}, 'use_baked_encoding': False, 'normalize_pre_transfer': True}}}
|
@ex.named_config
def pnn_v1():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveNoNewParam', 'pnn': True}}}
|
@ex.named_config
def pnn_v2a():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5Progressive', 'side_kwargs': {'dense': False, 'k': 3, 'adapter': 'linear'}, 'pnn': True}}}
|
@ex.named_config
def pnn_v2b():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5Progressive', 'side_kwargs': {'dense': False, 'k': 1, 'adapter': 'linear'}, 'pnn': True}}}
|
@ex.named_config
def pnn_v2c():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5Progressive', 'side_kwargs': {'dense': False, 'k': 1, 'adapter': 'mlp'}, 'pnn': True}}}
|
@ex.named_config
def pnn_v2d():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5Progressive', 'side_kwargs': {'dense': False, 'k': 1, 'adapter': 'mlp', 'extra_adapter': True}, 'pnn': True}}}
|
@ex.named_config
def pnn_v3():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5Progressive', 'side_kwargs': {'dense': True, 'k': 3, 'adapter': 'linear'}, 'pnn': True, 'dense': True}}}
|
@ex.named_config
def pnn_v4():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveH', 'pnn': True, 'dense': True}}}
|
@ex.named_config
def pnn_early_fusion():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveNoNewParam', 'side_kwargs': {'early_fusion': True}, 'pnn': True, 'merge_method': 'merge_operators.SideOnly'}}}
|
@ex.named_config
def pnn_rigidity():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveH', 'pnn': True, 'dense': False}}}
|
@ex.named_config
def bsp_simple_base():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_kwargs': {'bsp': True}, 'side_kwargs': {'bsp': True}}}}
|
@ex.named_config
def bsp():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_kwargs': {'bsp': True, 'period': 12}, 'side_kwargs': {'bsp': True, 'period': 12}}}}}
|
@ex.named_config
def bsp_small():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_kwargs': {'bsp': True, 'period': 3}, 'side_kwargs': {'bsp': True, 'period': 3}}}}}
|
@ex.named_config
def untrack_bn_simple_base():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_kwargs': {'track_running_stats': False}}}}
|
@ex.named_config
def untrack_bn():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_kwargs': {'track_running_stats': False}}}}}
|
@ex.named_config
def ewc():
cfg = {}
cfg['training'] = {'regularizer_fn': 'EWC', 'regularizer_kwargs': {'coef': 100000.0, 'n_samples_fisher': 5000, 'avg_tasks': True}, 'loss_list': ['total', 'weight_tying']}
|
@ex.named_config
def ewc_n_terms():
cfg = {}
cfg['training'] = {'regularizer_fn': 'EWC', 'regularizer_kwargs': {'coef': 10000.0, 'n_samples_fisher': 5000, 'avg_tasks': False}, 'loss_list': ['total', 'weight_tying']}
|
@ex.named_config
def loss_perceptual():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_l1', 'targets': ['normal_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/normal_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/normal_encoder.dat', 'annotator_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def loss_perceptual_l2():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_l2', 'targets': ['normal_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/normal_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/normal_encoder.dat', 'annotator_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def loss_perceptual_cross_entropy():
cfg = {}
cfg['training'] = {'loss_fn': 'perceptual_cross_entropy', 'targets': ['class_object_encoding'], 'loss_kwargs': {'decoder_path': '/mnt/models/class_scene_decoder.dat', 'bake_decodings': True}, 'suppress_target_and_use_annotator': False, 'annotator_class': 'TaskonomyEncoder', 'annotator_weights_path': '/mnt/models/class_scene_encoder.dat', 'annotator_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': False}}
|
@ex.named_config
def transfer_reg():
cfg = {}
cfg['training'] = {'regularizer_fn': 'transfer_regularizer', 'regularizer_kwargs': {'coef': 0.001}, 'loss_list': ['l1', 'total', 'weight_tying']}
|
@ex.named_config
def perceptual_reg():
cfg = {}
cfg['training'] = {'regularizer_fn': 'perceptual_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'decoder_path': '/mnt/models/curvature_decoder.dat'}, 'loss_list': ['l1', 'total', 'weight_tying']}
|
@ex.named_config
def perceptual_reg_no_transfer():
cfg = {}
cfg['training'] = {'regularizer_fn': 'perceptual_regularizer', 'regularizer_kwargs': {'coef': 0.001, 'decoder_path': '/mnt/models/curvature_decoder.dat', 'use_transfer': False}, 'loss_list': ['l1', 'total', 'weight_tying']}
|
@ex.named_config
def taskonomy_hp():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {'lr': 0.0001, 'optimizer_kwargs': {'weight_decay': 2e-06}}
|
@ex.named_config
def scheduler_reduce_on_plateau():
cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.ReduceLROnPlateau', 'lr_scheduler_method_kwargs': {'factor': 0.1, 'patience': 5}}}
|
@ex.named_config
def scheduler_step_lr():
cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.StepLR', 'lr_scheduler_method_kwargs': {'lr_decay_epochs': 30, 'gamma': 0.1}}}
|
@ex.named_config
def radam():
cfg = {}
cfg['learner'] = {'optimizer_class': 'RAdam'}
|
@ex.named_config
def reckless():
cfg = {}
cfg['training'] = {'resume_training': False}
cfg['saving'] = {'obliterate_logs': True}
|
@ex.config
def cfg_base():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {'model': 'atari_residual', 'model_kwargs': {}, 'eps': 1e-05, 'lr': 0.001, 'optimizer_class': 'optim.Adam', 'optimizer_kwargs': {'weight_decay': 0.0001}, 'lr_scheduler_method': None, 'lr_scheduler_method_kwargs': {}, 'max_grad_norm': 1.0, 'scheduler': 'plateau'}
cfg['training'] = {'amp': False, 'post_aggregation_transform_fn': None, 'post_aggregation_transform_fn_kwargs': {}, 'cuda': True, 'loss_fn': 'L1', 'loss_kwargs': {}, 'loss_list': ['total'], 'regularizer_fn': None, 'regularizer_kwargs': {}, 'epochs': 100, 'num_epochs': 100, 'resume_from_checkpoint_path': None, 'resume_training': False, 'seed': 269, 'suppress_target_and_use_annotator': False, 'sources': ['rgb'], 'targets': ['normal_decoding'], 'train': True, 'pretrain': False, 'test': False, 'use_masks': True, 'dataloader_fn': None, 'dataloader_fn_kwargs': {}, 'sources_as_dict': False}
cfg['saving'] = {'obliterate_logs': False, 'log_dir': LOG_DIR, 'log_interval': 0.25, 'ticks_per_epoch': 100, 'logging_type': 'tensorboard', 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'save_interval': 1, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'visdom_server': 'localhost', 'visdom_port': '8097', 'in_background': False}
|
@ex.named_config
def merge_alpha():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.Alpha'}}}
|
@ex.named_config
def merge_film():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.FiLM'}}}
|
@ex.named_config
def merge_product():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.Product'}}}
|
@ex.named_config
def merge_resmlp2():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.ResMLP2'}}}
|
@ex.named_config
def merge_mlp():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.MLP'}}}
|
@ex.named_config
def merge_mlp2():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.MLP2'}}}
|
@ex.named_config
def merge_mlp_hidden_a():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.MLPHidden', 'base_kwargs': {'final_act': False}, 'side_kwargs': {'final_act': False}}}}
|
@ex.named_config
def merge_mlp_hidden_b():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.MLPHidden', 'base_kwargs': {'final_act': True}, 'side_kwargs': {'final_act': False}}}}
|
@ex.named_config
def dense():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'dense': True}}}
|
@ex.named_config
def merge_dense_mlp():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'dense': True, 'merge_method': 'merge_operators.MLP'}}}
|
@ex.named_config
def test():
cfg = {'training': {'train': False, 'test': True}}
|
@ex.named_config
def imagenet_data():
cfg = {'training': {'split_to_use': 'splits.taskonomy_no_midlevel["debug"]', 'dataloader_fn': 'imagenet_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/ILSVRC2012', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': False}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False}}
|
@ex.named_config
def rotating_data():
cfg = {'training': {'suppress_target_and_use_annotator': False, 'sources': ['rgb', 'rotation'], 'post_aggregation_transform_fn': 'imagenet_dataset.RotateBatch()', 'post_aggregation_transform_fn_kwargs': {}}, 'learner': {'model': 'DummyLifelongTaskonomyNetwork', 'model_kwargs': {'out_channels': 1000, 'trainable': True, 'is_decoder_mlp': True}}}
|
@ex.named_config
def model_fcn5():
cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_residual():
cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': True, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_skip():
cfg = {'learner': {'model': 'FCN5', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn5_skip_residual():
cfg = {'learner': {'model': 'FCN5', 'model_kwargs': {'num_groups': 2, 'use_residual': True, 'normalize_outputs': False}}}
|
@ex.named_config
def model_fcn3():
cfg = {'learner': {'model': 'FCN3', 'model_kwargs': {'num_groups': 2, 'normalize_outputs': False}}}
|
@ex.named_config
def model_taskonomy_net():
cfg = {'learner': {'model': 'TaskonomyNetwork', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_sidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_nosidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_pix_only_side():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_pix_only_base():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': True, 'eval_only': False, 'normalize_outputs': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_sidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_nosidetune_encoding():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': True}, 'use_baked_encoding': True, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_pix_only_base():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': True, 'eval_only': False, 'normalize_outputs': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_transfer_pix_only_side():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': None, 'base_weights_path': None, 'base_kwargs': None, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': True}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': True}, 'transfer_class': 'TransferConv3', 'transfer_weights_path': None, 'transfer_kwargs': {'n_channels': 8}}}}
del n_channels_out
|
@ex.named_config
def model_taskonomy_decoder():
cfg = {'learner': {'model': 'TaskonomyDecoder', 'model_kwargs': {'out_channels': 3, 'eval_only': False}}}
|
@ex.named_config
def model_blind():
cfg = {'learner': {'model': 'ConstantModel', 'model_kwargs': {'data': '/mnt/data/normal/median_tiny.png'}}, 'training': {'sources': ['rgb']}}
|
@ex.named_config
def model_unet():
cfg = {'learner': {'model': 'UNet', 'model_kwargs': {'dcwnsample': 6}}}
|
@ex.named_config
def model_unet_heteroscedastic():
cfg = {'learner': {'model': 'UNetHeteroscedastic', 'model_kwargs': {'downsample': 6}}}
|
@ex.named_config
def model_unet_hetero_pooled():
cfg = {'learner': {'model': 'UNetHeteroscedasticPooled', 'model_kwargs': {'downsample': 6}}}
|
@ex.named_config
def gsn_base_resnet50():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': False, 'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': True}}}
|
@ex.named_config
def gsn_base_fcn5s():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': None, 'base_kwargs': {'img_channels': 3, 'train': False, 'eval_only': True, 'normalize_outputs': False}, 'use_baked_encoding': True}}}
|
@ex.named_config
def gsn_base_learned():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'base_kwargs': {'train': True, 'eval_only': False}, 'use_baked_encoding': False}}}
|
@ex.named_config
def gsn_side_resnet50():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_class': 'TaskonomyEncoder', 'side_weights_path': None, 'side_kwargs': {'train': True, 'eval_only': False, 'normalize_outputs': False}}}}
|
@ex.named_config
def gsn_side_fcn5s():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN5', 'side_weights_path': None, 'side_kwargs': {'img_channels': 3, 'train': True, 'eval_only': False, 'normalize_outputs': False}}}}
|
@ex.named_config
def gsn_side_frozen():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_kwargs': {'train': False, 'eval_only': True}}}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.