cheyul commited on
Commit
c83c4e7
·
0 Parent(s):

First commit

Browse files
Files changed (40) hide show
  1. __init__.py +13 -0
  2. disposition_classification.py +97 -0
  3. experiment_0/DUET_experiment_0_config.py +102 -0
  4. experiment_1/DUET_experiment_1_config.py +102 -0
  5. experiment_10/DUET_experiment_10_config.py +102 -0
  6. experiment_11/DUET_experiment_11_config.py +102 -0
  7. experiment_12/DUET_experiment_12_config.py +102 -0
  8. experiment_13/DUET_experiment_13_config.py +102 -0
  9. experiment_14/DUET_experiment_14_config.py +102 -0
  10. experiment_15/DUET_experiment_15_config.py +102 -0
  11. experiment_16/DUET_experiment_16_config.py +102 -0
  12. experiment_17/DUET_experiment_17_config.py +102 -0
  13. experiment_18/DUET_experiment_18_config.py +102 -0
  14. experiment_19/DUET_experiment_19_config.py +102 -0
  15. experiment_2/DUET_experiment_2_config.py +102 -0
  16. experiment_20/DUET_experiment_20_config.py +102 -0
  17. experiment_21/DUET_experiment_21_config.py +102 -0
  18. experiment_22/DUET_experiment_22_config.py +102 -0
  19. experiment_23/DUET_experiment_23_config.py +102 -0
  20. experiment_24/DUET_experiment_24_config.py +102 -0
  21. experiment_25/DUET_experiment_25_config.py +102 -0
  22. experiment_26/DUET_experiment_26_config.py +102 -0
  23. experiment_27/DUET_experiment_27_config.py +102 -0
  24. experiment_28/DUET_experiment_28_config.py +102 -0
  25. experiment_29/DUET_experiment_29_config.py +102 -0
  26. experiment_3/DUET_experiment_3_config.py +102 -0
  27. experiment_4/DUET_experiment_4_config.py +102 -0
  28. experiment_5/DUET_experiment_5_config.py +102 -0
  29. experiment_6/DUET_experiment_6_config.py +102 -0
  30. experiment_7/DUET_experiment_7_config.py +102 -0
  31. experiment_8/DUET_experiment_8_config.py +102 -0
  32. experiment_9/DUET_experiment_9_config.py +102 -0
  33. helper/.vscode/settings.json +3 -0
  34. helper/CNN.py +69 -0
  35. helper/__init__.py +12 -0
  36. helper/__pycache__/CNN.cpython-311.pyc +0 -0
  37. helper/__pycache__/__init__.cpython-311.pyc +0 -0
  38. helper/__pycache__/helper.cpython-311.pyc +0 -0
  39. helper/helper.py +156 -0
  40. requirements.txt +78 -0
__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import subprocess
3
+ import glob
4
+ import torch
5
+ import os
6
+ import sys
7
+ import numpy as np
8
+ import pickle
9
+
10
+ from torch import nn
11
+ from torch.utils.data import TensorDataset, DataLoader
12
+ from helper import CNN
13
+ from helper import helper
disposition_classification.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import subprocess
3
+ import glob
4
+ import torch
5
+ import os
6
+ import sys
7
+ import numpy as np
8
+ import pickle
9
+
10
+ from torch import nn
11
+ from torch.utils.data import TensorDataset, DataLoader
12
+ from helper import CNN
13
+ from helper import helper
14
+
15
+ def main():
16
+ raw_data_path = r"D:\3D joints"
17
+ categories = {
18
+ "emblems": [0, 1, 2],
19
+ "illustrators": [3, 4],
20
+ "regulators": [5, 6, 7],
21
+ "adaptors": [8],
22
+ "affect display": [9, 10, 11]
23
+ }
24
+
25
+ num_subsets = 30
26
+ num_actions_low = 5
27
+ num_actions_high = 12
28
+ action_numbers = []
29
+
30
+ result_dict = {}
31
+
32
+ for idx in range(num_subsets):
33
+ action_numbers.append(random.randint(num_actions_low, num_actions_high))
34
+
35
+ test_list = ['CC01', 'CM10']
36
+
37
+ for idx in range(num_subsets):
38
+ actions = helper.select_samples(categories, action_numbers[idx])
39
+ helper.extract_keypoints(actions, raw_data_path, test_list, idx)
40
+ print("------------------------------")
41
+ print(f"Experiment {idx}:")
42
+ print(f"Activities: {actions}")
43
+ print(f"Finished extracting keypoint data for experiment_{idx}...")
44
+ config_file_path = os.path.join(f".\experiment_{idx}", f"DUET_experiment_{idx}_config.py")
45
+ result = subprocess.run([sys.executable, r".\mmaction2\tools\train.py", config_file_path, '--seed', '42'])
46
+ print(f"Finished training ST-GCN for experiment_{idx}...")
47
+
48
+ checkpoint_file_path = glob.glob(os.path.join(".\work_dirs", f"DUET_experiment_{idx}_config", "best_acc_top1_epoch_*.pth"))
49
+ device = "cuda"
50
+ pickle_path = os.path.join(f".\experiment_{idx}", f"experiment_{idx}.pkl")
51
+ helper.extract_features(config_file_path, checkpoint_file_path[0], pickle_path, device, idx, test_list)
52
+ print(f"Finished extracting hidden features for experiment_{idx}...")
53
+
54
+ features_train = np.load(os.path.join(f".\experiment_{idx}", "train.npy"))
55
+ activity_train = np.load(os.path.join(f".\experiment_{idx}", "train_label.npy"))
56
+ features_test = np.load(os.path.join(f".\experiment_{idx}", "gtest.npy"))
57
+ activity_test = np.load(os.path.join(f".\experiment_{idx}", "g_label.npy"))
58
+ features_train = np.reshape(features_train, (features_train.shape[0], 1, features_train.shape[1]))
59
+ features_test = np.reshape(features_test, (features_test.shape[0], 1, features_test.shape[1]))
60
+
61
+ taxonomy = {0: 4, 1: 4, 2: 4, 3: 3, 4: 3, 5: 0, 6: 0, 7: 0, 8: 2, 9: 1, 10: 1, 11: 1}
62
+ labels_train = np.copy(activity_train)
63
+ labels_test = np.copy(activity_test)
64
+ for key, value in iter(taxonomy.items()):
65
+ labels_train[activity_train==key] = value
66
+ labels_test[activity_test==key] = value
67
+
68
+ train_set = TensorDataset(torch.tensor(features_train), torch.tensor(labels_train))
69
+ test_set = TensorDataset(torch.tensor(features_test), torch.tensor(labels_test))
70
+ train_dataloader = DataLoader(train_set, batch_size=32, shuffle=False)
71
+ test_dataloader = DataLoader(test_set, batch_size=32, shuffle=False)
72
+
73
+ model = CNN.NeuralNetwork(features_train.shape[2], num_classes=5)
74
+ model.to(device)
75
+ loss_fn = nn.CrossEntropyLoss()
76
+ optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
77
+
78
+ epochs = 5
79
+ for t in range(epochs):
80
+ print(f"Epoch {t+1}\n-------------------------------")
81
+ CNN.train(train_dataloader, model, loss_fn, optimizer)
82
+ test_accuracy = CNN.test(test_dataloader, model, loss_fn)
83
+
84
+ result_dict[idx] = {"experiment_num": idx, "num_activities": len(actions), "actions": actions, "accuracy": test_accuracy}
85
+ print(f"Finished training CNN for experiment_{idx}...")
86
+
87
+ with open("experiment_results.pkl", "wb") as file:
88
+ pickle.dump(result_dict, file)
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
93
+
94
+
95
+
96
+
97
+
experiment_0/DUET_experiment_0_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_0\experiment_0.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_1/DUET_experiment_1_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_1\experiment_1.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_10/DUET_experiment_10_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_10\experiment_10.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_11/DUET_experiment_11_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_11\experiment_11.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_12/DUET_experiment_12_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_12\experiment_12.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_13/DUET_experiment_13_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_13\experiment_13.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_14/DUET_experiment_14_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_14\experiment_14.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_15/DUET_experiment_15_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_15\experiment_15.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_16/DUET_experiment_16_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_16\experiment_16.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_17/DUET_experiment_17_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_17\experiment_17.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_18/DUET_experiment_18_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_18\experiment_18.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_19/DUET_experiment_19_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_19\experiment_19.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_2/DUET_experiment_2_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_2\experiment_2.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_20/DUET_experiment_20_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_20\experiment_20.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_21/DUET_experiment_21_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_21\experiment_21.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_22/DUET_experiment_22_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_22\experiment_22.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_23/DUET_experiment_23_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_23\experiment_23.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_24/DUET_experiment_24_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_24\experiment_24.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_25/DUET_experiment_25_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_25\experiment_25.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_26/DUET_experiment_26_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_26\experiment_26.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_27/DUET_experiment_27_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_27\experiment_27.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_28/DUET_experiment_28_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_28\experiment_28.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_29/DUET_experiment_29_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_29\experiment_29.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_3/DUET_experiment_3_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_3\experiment_3.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_4/DUET_experiment_4_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_4\experiment_4.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_5/DUET_experiment_5_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_5\experiment_5.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_6/DUET_experiment_6_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_6\experiment_6.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_7/DUET_experiment_7_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_7\experiment_7.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_8/DUET_experiment_8_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_8\experiment_8.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
experiment_9/DUET_experiment_9_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = r'C:\dev\disposition_classification\skeleton_feature_extraction\mmaction2\configs\_base_\default_runtime.py'
2
+
3
+ model = dict(
4
+ type='RecognizerGCN',
5
+ backbone=dict(
6
+ type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')),
7
+ cls_head=dict(type='GCNHead', num_classes=12, in_channels=256))
8
+
9
+ dataset_type = 'PoseDataset'
10
+ ann_file = r'C:\dev\disposition_classification\skeleton_feature_extraction\experiment_9\experiment_9.pkl'
11
+ train_pipeline = [
12
+ dict(type='PreNormalize3D'),
13
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
14
+ dict(type='UniformSampleFrames', clip_len=100),
15
+ dict(type='PoseDecode'),
16
+ dict(type='FormatGCNInput', num_person=2),
17
+ dict(type='PackActionInputs')
18
+ ]
19
+ val_pipeline = [
20
+ dict(type='PreNormalize3D'),
21
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
22
+ dict(
23
+ type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True),
24
+ dict(type='PoseDecode'),
25
+ dict(type='FormatGCNInput', num_person=2),
26
+ dict(type='PackActionInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='PreNormalize3D'),
30
+ dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']),
31
+ dict(
32
+ type='UniformSampleFrames', clip_len=100, num_clips=1,
33
+ test_mode=True),
34
+ dict(type='PoseDecode'),
35
+ dict(type='FormatGCNInput', num_person=2),
36
+ dict(type='PackActionInputs')
37
+ ]
38
+
39
+ train_dataloader = dict(
40
+ batch_size=16,
41
+ num_workers=1,
42
+ persistent_workers=True,
43
+ sampler=dict(type='DefaultSampler', shuffle=True),
44
+ dataset=dict(
45
+ type='RepeatDataset',
46
+ times=5,
47
+ dataset=dict(
48
+ type=dataset_type,
49
+ ann_file=ann_file,
50
+ pipeline=train_pipeline,
51
+ split='xsub_train')))
52
+ val_dataloader = dict(
53
+ batch_size=16,
54
+ num_workers=1,
55
+ persistent_workers=True,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ ann_file=ann_file,
60
+ pipeline=val_pipeline,
61
+ split='xsub_test',
62
+ test_mode=True))
63
+ test_dataloader = dict(
64
+ batch_size=1,
65
+ num_workers=1,
66
+ persistent_workers=True,
67
+ sampler=dict(type='DefaultSampler', shuffle=False),
68
+ dataset=dict(
69
+ type=dataset_type,
70
+ ann_file=ann_file,
71
+ pipeline=test_pipeline,
72
+ split='xsub_test',
73
+ test_mode=True))
74
+
75
+ val_evaluator = [dict(type='AccMetric')]
76
+ test_evaluator = val_evaluator
77
+
78
+ train_cfg = dict(
79
+ type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1)
80
+ val_cfg = dict(type='ValLoop')
81
+ test_cfg = dict(type='TestLoop')
82
+
83
+ param_scheduler = [
84
+ dict(
85
+ type='CosineAnnealingLR',
86
+ eta_min=0,
87
+ T_max=16,
88
+ by_epoch=True,
89
+ convert_to_iter_based=True)
90
+ ]
91
+
92
+ optim_wrapper = dict(
93
+ optimizer=dict(
94
+ type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True))
95
+
96
+ default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100))
97
+
98
+ # Default setting for scaling LR automatically
99
+ # - `enable` means enable scaling LR automatically
100
+ # or not by default.
101
+ # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
102
+ auto_scale_lr = dict(enable=False, base_batch_size=128)
helper/.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "ros.distro": "melodic"
3
+ }
helper/CNN.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch import nn
5
+ from torch.nn import functional
6
+ from torch.utils.data import TensorDataset, DataLoader
7
+
8
+ class NeuralNetwork(nn.Module):
9
+ def __init__(self, latent_dim, num_classes=5):
10
+ super().__init__()
11
+
12
+ self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, padding=1)
13
+ self.bn1 = nn.BatchNorm1d(64)
14
+ self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
15
+ self.bn2 = nn.BatchNorm1d(128)
16
+
17
+ self.fc1 = nn.Linear(latent_dim*128, 256)
18
+ self.dropout = nn.Dropout(0.5)
19
+ self.fc2 = nn.Linear(256, num_classes)
20
+
21
+
22
+ def forward(self, x):
23
+ x = functional.relu(self.bn1(self.conv1(x)))
24
+ x = functional.relu(self.bn2(self.conv2(x)))
25
+ x = x.view(x.size(0), -1)
26
+ x = functional.relu(self.fc1(x))
27
+ x = self.dropout(x)
28
+ x = self.fc2(x)
29
+ return functional.log_softmax(x, dim=1)
30
+
31
+
32
+ def train(dataloader, model, loss_fn, optimizer):
33
+ size = len(dataloader.dataset)
34
+ model.train()
35
+ for batch, (X, y) in enumerate(dataloader):
36
+ device = 'cuda'
37
+
38
+ y = y.type(torch.LongTensor)
39
+ X, y = X.to(device), y.to(device)
40
+
41
+ pred = model(X)
42
+ loss = loss_fn(pred, y)
43
+
44
+ loss.backward()
45
+ optimizer.step()
46
+ optimizer.zero_grad()
47
+
48
+ if batch % 100 == 0:
49
+ loss, current = loss.item(), (batch + 1) * len(X)
50
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
51
+
52
+ def test(dataloader, model, loss_fn):
53
+ size = len(dataloader.dataset)
54
+ num_batches = len(dataloader)
55
+ model.eval()
56
+ test_loss, correct = 0, 0
57
+ with torch.no_grad():
58
+ for X, y in dataloader:
59
+ device = 'cuda'
60
+
61
+ y = y.type(torch.LongTensor)
62
+ X, y = X.to(device), y.to(device)
63
+ pred = model(X)
64
+ test_loss += loss_fn(pred, y).item()
65
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
66
+ test_loss /= num_batches
67
+ correct /= size
68
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
69
+ return correct
helper/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import numpy as np
4
+ import torch
5
+ import random
6
+
7
+ from torch import nn
8
+ from torch.nn import functional
9
+ from torch.utils.data import TensorDataset, DataLoader
10
+ from transformers import CLIPProcessor, CLIPModel
11
+ from mmaction.apis import inference_recognizer, init_recognizer
12
+ from mmaction.engine.hooks import OutputHook
helper/__pycache__/CNN.cpython-311.pyc ADDED
Binary file (5.42 kB). View file
 
helper/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (780 Bytes). View file
 
helper/__pycache__/helper.cpython-311.pyc ADDED
Binary file (10.6 kB). View file
 
helper/helper.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import numpy as np
4
+ import random
5
+
6
+ from transformers import CLIPProcessor, CLIPModel
7
+ from mmaction.apis import inference_recognizer, init_recognizer
8
+ from mmaction.engine.hooks import OutputHook
9
+
10
+ def extract_keypoints(actions, path, test_list, experiment_num):
11
+ # 0: Base of the spine -> 0
12
+ # 1: Middle of the spine -> 1
13
+ # 2: Neck -> 3
14
+ # 3: Head -> 26
15
+ # 4: Left shoulder -> 5
16
+ # 5: Left elbow -> 6
17
+ # 6: Left wrist -> 7
18
+ # 7: Left hand -> 8
19
+ # 8: Right shoulder -> 12
20
+ # 9: Right elbow -> 13
21
+ # 10: Right wrist -> 14
22
+ # 11: Right hand -> 15
23
+ # 12: Left hip -> 18
24
+ # 13: Left knee -> 19
25
+ # 14: Left ankle -> 20
26
+ # 15: Left foot -> 21
27
+ # 16: Right hip -> 22
28
+ # 17: Right knee -> 23
29
+ # 18: Right ankle -> 24
30
+ # 19: Right foot -> 25
31
+ # 20: Spine -> 2
32
+ # 21: Left hand tip -> 9
33
+ # 22: Left thumb -> 10
34
+ # 23: Right hand tip -> 16
35
+ # 24: Right thumb -> 17
36
+ keypoint_pair = {0:0, 1:1, 2:3, 3:26, 4:5, 5:6, 6:7, 7:8, 8:12, 9:13, 10:14, 11:15, 12:18, 13:19,
37
+ 14:20, 15:21, 16:22, 17:23, 18:24, 19:25, 20:2, 21:9, 22:10, 23:16, 24:17}
38
+ folder_list = os.listdir(path)
39
+ dataset = {"split": {"xsub_train":[], "xsub_test":[]}, "annotations": []}
40
+ for folder in folder_list:
41
+ location = folder[0:2]
42
+ label = int(folder[2:4]) - 1
43
+ subject = folder[4:6]
44
+ data_file_list = os.listdir(os.path.join(path, folder))
45
+ if label in actions:
46
+ for data_file in data_file_list:
47
+ joints = np.loadtxt(os.path.join(path, folder, data_file), delimiter=",", dtype=float, usecols=range(1,193))
48
+ if joints.ndim < 2:
49
+ joints = joints.reshape((-1, joints.shape[0]))
50
+ time_start, time_end = data_file.split("_")
51
+ num_frames = joints.shape[0]
52
+ num_subjects = 2
53
+ num_keypoints = 25
54
+ num_coords = 3
55
+
56
+ if num_frames < 30:
57
+ continue
58
+
59
+ print(f"Processing {location}{label:02d}{subject} between {time_start} and {time_end}...")
60
+
61
+ keypoints = np.zeros((num_subjects, num_frames, num_keypoints, num_coords))
62
+ for sub in range(num_subjects):
63
+ for frame in range(num_frames):
64
+ for keypoint_key, keypoint_value in keypoint_pair.items():
65
+ keypoints[sub, frame, keypoint_key, 0] = joints[frame, sub*32*num_coords+keypoint_value*num_coords]
66
+ keypoints[sub, frame, keypoint_key, 1] = joints[frame, sub*32*num_coords+keypoint_value*num_coords+1]
67
+ keypoints[sub, frame, keypoint_key, 2] = joints[frame, sub*32*num_coords+keypoint_value*num_coords+2]
68
+ annotation = {"frame_dir": f"{location}{label:02d}{subject}/{time_start}_{time_end}", "label": label, "total_frames": num_frames,
69
+ "keypoint": keypoints}
70
+ dataset["annotations"].append(annotation)
71
+
72
+ if f"{location}{subject}" in test_list:
73
+ # if location in train_location:
74
+ dataset["split"]["xsub_test"].append(f"{location}{label:02d}{subject}/{time_start}_{time_end}")
75
+ else:
76
+ dataset["split"]["xsub_train"].append(f"{location}{label:02d}{subject}/{time_start}_{time_end}")
77
+
78
+
79
+ os.makedirs(f"experiment_{experiment_num}", exist_ok=True)
80
+ pickle_filename = os.path.join(f"experiment_{experiment_num}", f"experiment_{experiment_num}.pkl")
81
+
82
+ print("Writing to pickle file...")
83
+ with open(pickle_filename, "wb") as file:
84
+ pickle.dump(dataset, file)
85
+
86
+
87
+ def select_samples(categories, n):
88
+ if n < len(categories):
89
+ raise ValueError("n must be at least the number of categories to include each category.")
90
+
91
+ selected = {cat: random.choice(samples) for cat, samples in categories.items()}
92
+ chosen_samples = set(selected.values())
93
+
94
+ all_samples = [s for cat_samples in categories.values() for s in cat_samples]
95
+ remaining_samples = list(set(all_samples) - chosen_samples)
96
+
97
+ extra_samples = random.sample(remaining_samples, n - len(categories))
98
+
99
+ return list(chosen_samples) + extra_samples
100
+
101
+
102
+ def extract_features(config_file_path, checkpoint_file_path, data_path, device, experiment_num, test_list_subject):
103
+
104
+ model = init_recognizer(config_file_path, checkpoint_file_path, device=device)
105
+
106
+ with open(data_path, 'rb') as pickle_file:
107
+ data = pickle.load(pickle_file)
108
+
109
+ num_samples = len(data['annotations'])
110
+ num_train_sample = 300
111
+ num_test_sample = 200
112
+ labels_train = np.zeros(num_train_sample, dtype=np.int8)
113
+ labels_test = np.zeros(num_test_sample, dtype=np.int8)
114
+ features_train = np.zeros((num_train_sample, 512), dtype=np.float32)
115
+ features_test = np.zeros((num_test_sample, 512), dtype=np.float32)
116
+ train_tracker = 0
117
+ test_tracker = 0
118
+
119
+ with OutputHook(model.cls_head, outputs=['loss_cls', 'pool', 'fc']) as hook:
120
+ for index in range(num_samples):
121
+ result = inference_recognizer(model, data['annotations'][index])
122
+
123
+ frame_dir = data['annotations'][index]['frame_dir'].split('/')
124
+ loc = frame_dir[0][0:2]
125
+ subject = frame_dir[0][-2:]
126
+ label = data['annotations'][index]['label']
127
+
128
+ if f'{loc}{subject}' in test_list_subject:
129
+ if test_tracker < num_test_sample:
130
+ num_rows, num_cols, depth_1, depth_2 = hook.layer_outputs['pool'].shape
131
+ features_test[test_tracker, :] = np.reshape(hook.layer_outputs['pool'], (-1, num_rows*num_cols))
132
+ labels_test[test_tracker] = data['annotations'][index]['label']
133
+ else:
134
+ num_rows, num_cols, depth_1, depth_2 = hook.layer_outputs['pool'].shape
135
+ features_test = np.vstack((features_test, np.reshape(hook.layer_outputs['pool'], (-1, num_rows*num_cols))))
136
+ labels_test = np.concatenate((labels_test, np.array([data['annotations'][index]['label']])))
137
+ test_tracker = test_tracker + 1
138
+
139
+ else:
140
+ if train_tracker < num_train_sample:
141
+ num_rows, num_cols, depth_1, depth_2 = hook.layer_outputs['pool'].shape
142
+ features_train[train_tracker, :] = np.reshape(hook.layer_outputs['pool'], (-1, num_rows*num_cols))
143
+ labels_train[train_tracker] = data['annotations'][index]['label']
144
+ else:
145
+ num_rows, num_cols, depth_1, depth_2 = hook.layer_outputs['pool'].shape
146
+ features_train = np.vstack((features_train, np.reshape(hook.layer_outputs['pool'], (-1, num_rows*num_cols))))
147
+ labels_train = np.concatenate((labels_train, np.array([data['annotations'][index]['label']])))
148
+ train_tracker = train_tracker + 1
149
+
150
+ np.save(os.path.join(f".\experiment_{experiment_num}", "train.npy"), features_train)
151
+ np.save(os.path.join(f".\experiment_{experiment_num}", "train_label.npy"), labels_train)
152
+ np.save(os.path.join(f".\experiment_{experiment_num}", "gtest.npy"), features_test)
153
+ np.save(os.path.join(f".\experiment_{experiment_num}", "g_label.npy"), labels_test)
154
+
155
+ if __name__ == "__main__":
156
+ pass
requirements.txt ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ addict==2.4.0
2
+ aliyun-python-sdk-core==2.16.0
3
+ aliyun-python-sdk-kms==2.16.5
4
+ certifi==2025.1.31
5
+ cffi==1.17.1
6
+ charset-normalizer==3.4.1
7
+ click==8.1.8
8
+ colorama==0.4.6
9
+ contourpy==1.3.1
10
+ crcmod==1.7
11
+ cryptography==44.0.1
12
+ cycler==0.12.1
13
+ decord==0.6.0
14
+ einops==0.8.1
15
+ filelock==3.14.0
16
+ fonttools==4.56.0
17
+ fsspec==2024.6.1
18
+ huggingface-hub==0.29.2
19
+ idna==3.10
20
+ importlib_metadata==8.6.1
21
+ Jinja2==3.1.4
22
+ jmespath==0.10.0
23
+ kiwisolver==1.4.8
24
+ Markdown==3.7
25
+ markdown-it-py==3.0.0
26
+ MarkupSafe==2.1.5
27
+ matplotlib==3.10.0
28
+ mdurl==0.1.2
29
+ mmaction2==1.0.0
30
+ mmcv==2.0.0
31
+ mmdet==3.3.0
32
+ mmengine==0.10.6
33
+ model-index==0.1.11
34
+ mpmath==1.3.0
35
+ networkx==3.3
36
+ numpy==1.26.4
37
+ opencv-contrib-python==4.11.0.86
38
+ opencv-python==4.11.0.86
39
+ opendatalab==0.0.10
40
+ openmim==0.3.9
41
+ openxlab==0.1.2
42
+ ordered-set==4.1.0
43
+ oss2==2.17.0
44
+ packaging==24.2
45
+ pandas==2.2.3
46
+ pillow==11.1.0
47
+ platformdirs==4.3.6
48
+ pycocotools==2.0.8
49
+ pycparser==2.22
50
+ pycryptodome==3.21.0
51
+ Pygments==2.19.1
52
+ pykinect_azure @ git+https://github.com/ibaiGorordo/pyKinectAzure.git@fdfd70ee0fc4287e0750d6b99a658210cf53cf7c
53
+ pyparsing==3.2.1
54
+ python-dateutil==2.9.0.post0
55
+ pytz==2023.4
56
+ pywin32==308
57
+ PyYAML==6.0.2
58
+ regex==2024.11.6
59
+ requests==2.28.2
60
+ rich==13.4.2
61
+ safetensors==0.5.3
62
+ scipy==1.15.2
63
+ shapely==2.0.7
64
+ six==1.17.0
65
+ sympy==1.13.1
66
+ tabulate==0.9.0
67
+ termcolor==2.5.0
68
+ terminaltables==3.1.10
69
+ tokenizers==0.21.0
70
+ torch==2.6.0+cu118
71
+ torchvision==0.21.0+cu118
72
+ tqdm==4.65.2
73
+ transformers==4.49.0
74
+ typing_extensions==4.12.2
75
+ tzdata==2025.1
76
+ urllib3==1.26.20
77
+ yapf==0.43.0
78
+ zipp==3.21.0