Deepfake-Detector
/
projects
/ctrgcn
/configs
/ctrgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py
| _base_ = 'mmaction::_base_/default_runtime.py' | |
| custom_imports = dict(imports='models') | |
| model = dict( | |
| type='RecognizerGCN', | |
| backbone=dict( | |
| type='CTRGCN', graph_cfg=dict(layout='nturgb+d', mode='spatial')), | |
| cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) | |
| dataset_type = 'PoseDataset' | |
| ann_file = 'data/skeleton/ntu60_3d.pkl' | |
| train_pipeline = [ | |
| dict(type='PreNormalize3D'), | |
| dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), | |
| dict(type='UniformSampleFrames', clip_len=100), | |
| dict(type='PoseDecode'), | |
| dict(type='FormatGCNInput', num_person=2), | |
| dict(type='PackActionInputs') | |
| ] | |
| val_pipeline = [ | |
| dict(type='PreNormalize3D'), | |
| dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), | |
| dict( | |
| type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), | |
| dict(type='PoseDecode'), | |
| dict(type='FormatGCNInput', num_person=2), | |
| dict(type='PackActionInputs') | |
| ] | |
| test_pipeline = [ | |
| dict(type='PreNormalize3D'), | |
| dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), | |
| dict( | |
| type='UniformSampleFrames', clip_len=100, num_clips=10, | |
| test_mode=True), | |
| dict(type='PoseDecode'), | |
| dict(type='FormatGCNInput', num_person=2), | |
| dict(type='PackActionInputs') | |
| ] | |
| train_dataloader = dict( | |
| batch_size=16, | |
| num_workers=2, | |
| persistent_workers=True, | |
| sampler=dict(type='DefaultSampler', shuffle=True), | |
| dataset=dict( | |
| type='RepeatDataset', | |
| times=5, | |
| dataset=dict( | |
| type=dataset_type, | |
| ann_file=ann_file, | |
| pipeline=train_pipeline, | |
| split='xsub_train'))) | |
| val_dataloader = dict( | |
| batch_size=16, | |
| num_workers=2, | |
| persistent_workers=True, | |
| sampler=dict(type='DefaultSampler', shuffle=False), | |
| dataset=dict( | |
| type=dataset_type, | |
| ann_file=ann_file, | |
| pipeline=val_pipeline, | |
| split='xsub_val', | |
| test_mode=True)) | |
| test_dataloader = dict( | |
| batch_size=1, | |
| num_workers=2, | |
| persistent_workers=True, | |
| sampler=dict(type='DefaultSampler', shuffle=False), | |
| dataset=dict( | |
| type=dataset_type, | |
| ann_file=ann_file, | |
| pipeline=test_pipeline, | |
| split='xsub_val', | |
| test_mode=True)) | |
| val_evaluator = [dict(type='AccMetric')] | |
| test_evaluator = val_evaluator | |
| train_cfg = dict( | |
| type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) | |
| val_cfg = dict(type='ValLoop') | |
| test_cfg = dict(type='TestLoop') | |
| param_scheduler = [ | |
| dict( | |
| type='CosineAnnealingLR', | |
| eta_min=0, | |
| T_max=16, | |
| by_epoch=True, | |
| convert_to_iter_based=True) | |
| ] | |
| optim_wrapper = dict( | |
| optimizer=dict( | |
| type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) | |
| default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) | |
| # Default setting for scaling LR automatically | |
| # - `enable` means enable scaling LR automatically | |
| # or not by default. | |
| # - `base_batch_size` = (8 GPUs) x (16 samples per GPU). | |
| auto_scale_lr = dict(enable=False, base_batch_size=128) | |