shanaka95 commited on
Commit
43fea7e
·
verified ·
1 Parent(s): da2ec96

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. __pycache__/inference.cpython-313.pyc +0 -0
  3. config/custom/balanced.yaml +85 -0
  4. config/custom/default.yaml +52 -0
  5. config/custom/enhanced.yaml +83 -0
  6. config/custom/improved copy.yaml +86 -0
  7. config/custom/improved.yaml +86 -0
  8. config/custom/optimized.yaml +93 -0
  9. feeders/__pycache__/__init__.cpython-313.pyc +0 -0
  10. feeders/__pycache__/__init__.cpython-36.pyc +0 -0
  11. feeders/__pycache__/__init__.cpython-39.pyc +0 -0
  12. feeders/__pycache__/bone_pairs.cpython-36.pyc +0 -0
  13. feeders/__pycache__/feeder_custom.cpython-313.pyc +0 -0
  14. feeders/__pycache__/feeder_custom.cpython-39.pyc +0 -0
  15. feeders/__pycache__/feeder_ntu.cpython-313.pyc +0 -0
  16. feeders/__pycache__/feeder_ntu.cpython-39.pyc +0 -0
  17. feeders/__pycache__/feeder_ucla.cpython-313.pyc +3 -0
  18. feeders/__pycache__/feeder_ucla.cpython-36.pyc +0 -0
  19. feeders/__pycache__/feeder_ucla.cpython-39.pyc +0 -0
  20. feeders/__pycache__/tools.cpython-313.pyc +0 -0
  21. feeders/__pycache__/tools.cpython-36.pyc +0 -0
  22. feeders/__pycache__/tools.cpython-39.pyc +0 -0
  23. feeders/tools.py +234 -0
  24. graph/__pycache__/__init__.cpython-313.pyc +0 -0
  25. graph/__pycache__/__init__.cpython-36.pyc +0 -0
  26. graph/__pycache__/__init__.cpython-39.pyc +0 -0
  27. graph/__pycache__/custom_17j.cpython-313.pyc +0 -0
  28. graph/__pycache__/custom_17j.cpython-39.pyc +0 -0
  29. graph/__pycache__/ntu_rgb_d.cpython-313.pyc +0 -0
  30. graph/__pycache__/ntu_rgb_d.cpython-36.pyc +0 -0
  31. graph/__pycache__/ntu_rgb_d.cpython-39.pyc +0 -0
  32. graph/__pycache__/tools.cpython-313.pyc +0 -0
  33. graph/__pycache__/tools.cpython-36.pyc +0 -0
  34. graph/__pycache__/tools.cpython-39.pyc +0 -0
  35. graph/__pycache__/ucla.cpython-313.pyc +0 -0
  36. graph/__pycache__/ucla.cpython-36.pyc +0 -0
  37. graph/__pycache__/ucla.cpython-39.pyc +0 -0
  38. graph/custom_17j.py +46 -0
  39. graph/tools.py +80 -0
  40. inference.py +177 -0
  41. model/__pycache__/__init__.cpython-313.pyc +0 -0
  42. model/__pycache__/__init__.cpython-36.pyc +0 -0
  43. model/__pycache__/__init__.cpython-39.pyc +0 -0
  44. model/__pycache__/ctrgcn.cpython-313.pyc +0 -0
  45. model/__pycache__/ctrgcn.cpython-36.pyc +0 -0
  46. model/__pycache__/ctrgcn.cpython-39.pyc +0 -0
  47. model/ctrgcn.py +344 -0
  48. requirements.txt +3 -0
  49. torchlight/setup.py +8 -0
  50. torchlight/torchlight.egg-info/PKG-INFO +5 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ feeders/__pycache__/feeder_ucla.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
__pycache__/inference.cpython-313.pyc ADDED
Binary file (9.79 kB). View file
 
config/custom/balanced.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ work_dir: ./work_dir/custom/ctrgcn_balanced
2
+
3
+ # feeder
4
+ feeder: feeders.feeder_custom.Feeder
5
+ train_feeder_args:
6
+ data_path: data/train
7
+ label_path: data/train_labels.txt
8
+ split: train
9
+ debug: False
10
+ random_choose: False
11
+ random_shift: False
12
+ random_move: False
13
+ window_size: 64
14
+ normalization: False
15
+ random_rot: True # Keep rotation augmentation - it's proven effective
16
+ p_interval: [0.5, 1]
17
+ vel: False
18
+ bone: False
19
+
20
+ test_feeder_args:
21
+ data_path: data/test
22
+ label_path: data/test_labels.txt
23
+ split: test
24
+ window_size: 64
25
+ p_interval: [0.95]
26
+ vel: False
27
+ bone: False
28
+ debug: False
29
+
30
+ # model
31
+ model: model.ctrgcn.Model
32
+ model_args:
33
+ num_class: 52
34
+ num_point: 17
35
+ num_person: 1
36
+ graph: graph.custom_17j.Graph
37
+ graph_args:
38
+ labeling_mode: 'spatial'
39
+ drop_out: 0.2 # Moderate dropout (was 0.5, now 0.2)
40
+
41
+ # Model weights (optional)
42
+ weights: null
43
+ ignore_weights: []
44
+
45
+ #optim
46
+ weight_decay: 0.0006 # Modest increase from original 0.0004 (was 0.001)
47
+ base_lr: 0.08 # Moderate reduction from original 0.1 (was 0.05)
48
+ lr_decay_rate: 0.1
49
+ step: [35, 55] # Back to original schedule (was [40, 60])
50
+ warm_up_epoch: 5
51
+ optimizer: SGD
52
+
53
+ # training
54
+ device: [0]
55
+ batch_size: 48 # Compromise between 32 and 64
56
+ test_batch_size: 64
57
+ num_epoch: 80
58
+ nesterov: True
59
+ start_epoch: 0
60
+ phase: train
61
+ save_score: False
62
+ seed: 1
63
+ log_interval: 100
64
+ save_interval: 1
65
+ save_epoch: 30
66
+ eval_interval: 1
67
+ print_log: True
68
+ show_topk: [1, 5]
69
+ num_worker: 24
70
+ model_saved_name: ./work_dir/custom/ctrgcn_balanced/runs
71
+
72
+ # Early stopping (keep but with longer patience)
73
+ early_stopping: True
74
+ patience: 15 # Increased from 10 to 15
75
+ min_delta: 0.001
76
+
77
+ # Remove excessive regularization
78
+ use_joint_stream: True
79
+ use_bone_stream: True
80
+ use_motion_stream: True
81
+
82
+ # Mild label smoothing (was 0.1, now 0.05)
83
+ label_smoothing: 0.05
84
+
85
+ # Remove gradient clipping - not needed with other regularization
config/custom/default.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ work_dir: ./work_dir/custom/ctrgcn_joint
2
+
3
+ # feeder
4
+ feeder: feeders.feeder_custom.Feeder
5
+ train_feeder_args:
6
+ data_path: data/train
7
+ label_path: data/train_labels.txt
8
+ split: train
9
+ debug: False
10
+ random_choose: False
11
+ random_shift: False
12
+ random_move: False
13
+ window_size: 64
14
+ normalization: False
15
+ random_rot: True
16
+ p_interval: [0.5, 1]
17
+ vel: False
18
+ bone: False
19
+
20
+ test_feeder_args:
21
+ data_path: data/test
22
+ label_path: data/test_labels.txt
23
+ split: test
24
+ window_size: 64
25
+ p_interval: [0.95]
26
+ vel: False
27
+ bone: False
28
+ debug: False
29
+
30
+ # model
31
+ model: model.ctrgcn.Model
32
+ model_args:
33
+ num_class: 52
34
+ num_point: 17
35
+ num_person: 1
36
+ graph: graph.custom_17j.Graph
37
+ graph_args:
38
+ labeling_mode: 'spatial'
39
+
40
+ #optim
41
+ weight_decay: 0.0004
42
+ base_lr: 0.1
43
+ lr_decay_rate: 0.1
44
+ step: [35, 55]
45
+ warm_up_epoch: 5
46
+
47
+ # training
48
+ device: [0]
49
+ batch_size: 64
50
+ test_batch_size: 64
51
+ num_epoch: 65
52
+ nesterov: True
config/custom/enhanced.yaml ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ work_dir: ./work_dir/custom/ctrgcn_enhanced
2
+
3
+ # feeder
4
+ feeder: feeders.feeder_custom.Feeder
5
+ train_feeder_args:
6
+ data_path: data/train
7
+ label_path: data/train_labels.txt
8
+ split: train
9
+ debug: False
10
+ random_choose: True # Enable random temporal cropping
11
+ random_shift: True # Enable temporal shifting
12
+ random_move: True # Enable spatial transformations (rotation, scaling, translation)
13
+ window_size: 64
14
+ normalization: False # Keep disabled for compatibility
15
+ random_rot: True # Enable 3D rotations - very effective
16
+ p_interval: [0.75, 1] # More aggressive temporal cropping
17
+ vel: False
18
+ bone: False
19
+
20
+ test_feeder_args:
21
+ data_path: data/test
22
+ label_path: data/test_labels.txt
23
+ split: test
24
+ window_size: 64
25
+ p_interval: [0.95] # Stable test-time cropping
26
+ vel: False
27
+ bone: False
28
+ debug: False
29
+
30
+ # model
31
+ model: model.ctrgcn.Model
32
+ model_args:
33
+ num_class: 52
34
+ num_point: 17
35
+ num_person: 1
36
+ graph: graph.custom_17j.Graph
37
+ graph_args:
38
+ labeling_mode: 'spatial'
39
+ drop_out: 0.3 # Slightly increased dropout for better generalization
40
+
41
+ # Model weights (optional)
42
+ weights: null
43
+ ignore_weights: []
44
+
45
+ #optim
46
+ weight_decay: 0.0008 # Slightly increased for better regularization
47
+ base_lr: 0.09 # Slightly higher LR to compensate for stronger augmentation
48
+ lr_decay_rate: 0.1
49
+ step: [35, 55] # Original schedule
50
+ warm_up_epoch: 5
51
+ optimizer: SGD
52
+
53
+ # training
54
+ device: [0]
55
+ batch_size: 56 # Larger batch size for stable training with augmentation
56
+ test_batch_size: 64
57
+ num_epoch: 80
58
+ nesterov: True
59
+ start_epoch: 0
60
+ phase: train
61
+ save_score: False
62
+ seed: 1
63
+ log_interval: 100
64
+ save_interval: 1
65
+ save_epoch: 30
66
+ eval_interval: 1
67
+ print_log: True
68
+ show_topk: [1, 5]
69
+ num_worker: 24
70
+ model_saved_name: ./work_dir/custom/ctrgcn_enhanced/runs
71
+
72
+ # Early stopping with longer patience due to augmentation noise
73
+ early_stopping: True
74
+ patience: 18 # Longer patience for augmented training
75
+ min_delta: 0.001
76
+
77
+ # Multi-stream training
78
+ use_joint_stream: True
79
+ use_bone_stream: True
80
+ use_motion_stream: True
81
+
82
+ # Very mild label smoothing with strong augmentation
83
+ label_smoothing: 0.03
config/custom/improved copy.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ work_dir: ./work_dir/custom/ctrgcn_improved_final
2
+
3
+ # feeder
4
+ feeder: feeders.feeder_custom.Feeder
5
+ train_feeder_args:
6
+ data_path: data/train
7
+ label_path: data/train_labels.txt
8
+ split: train
9
+ debug: False
10
+ random_choose: False # Disable temporal sampling (compatibility issue)
11
+ random_shift: False # Disable temporal shifting (compatibility issue)
12
+ random_move: False # Disable spatial perturbations (compatibility issue)
13
+ window_size: 64
14
+ normalization: False # Disable normalization (compatibility issue)
15
+ random_rot: False # Disable rotation augmentation (compatibility issue)
16
+ p_interval: [0.5, 1] # Keep probability interval
17
+ vel: False
18
+ bone: False
19
+
20
+ test_feeder_args:
21
+ data_path: data/test
22
+ label_path: data/test_labels.txt
23
+ split: test
24
+ window_size: 64
25
+ p_interval: [0.95]
26
+ vel: False
27
+ bone: False
28
+ debug: False
29
+
30
+ # model
31
+ model: model.ctrgcn.Model
32
+ model_args:
33
+ num_class: 52
34
+ num_point: 17
35
+ num_person: 1
36
+ graph: graph.custom_17j.Graph
37
+ graph_args:
38
+ labeling_mode: 'spatial'
39
+ drop_out: 0.5 # Add dropout for regularization
40
+
41
+ # Model weights (optional)
42
+ weights: null # No pretrained weights
43
+ ignore_weights: [] # No weights to ignore
44
+
45
+ #optim
46
+ weight_decay: 0.001 # Increase weight decay for better regularization
47
+ base_lr: 0.05 # Reduce initial learning rate
48
+ lr_decay_rate: 0.1
49
+ step: [40, 60] # Adjust LR schedule - later decay
50
+ warm_up_epoch: 5
51
+ optimizer: SGD # Optimizer type
52
+
53
+ # training
54
+ device: [0]
55
+ batch_size: 32 # Reduce batch size for better generalization
56
+ test_batch_size: 64
57
+ num_epoch: 80 # Increase epochs with early stopping
58
+ nesterov: True
59
+ start_epoch: 0 # Starting epoch
60
+ phase: train # Training phase
61
+ save_score: False # Don't save prediction scores
62
+ seed: 1 # Random seed
63
+ log_interval: 100 # Log every 100 iterations
64
+ save_interval: 1 # Save model every epoch
65
+ save_epoch: 30 # Start saving after epoch 30
66
+ eval_interval: 1 # Evaluate every epoch
67
+ print_log: True # Print logs
68
+ show_topk: [1, 5] # Show top-1 and top-5 accuracy
69
+ num_worker: 24 # Number of data loading workers
70
+ model_saved_name: ./work_dir/custom/ctrgcn_improved_final/runs
71
+
72
+ # Early stopping
73
+ early_stopping: True
74
+ patience: 10 # Stop if no improvement for 10 epochs
75
+ min_delta: 0.001 # Minimum improvement threshold
76
+
77
+ # Multi-stream training for better generalization
78
+ use_joint_stream: True
79
+ use_bone_stream: True
80
+ use_motion_stream: True
81
+
82
+ # Label smoothing for regularization
83
+ label_smoothing: 0.1
84
+
85
+ # Additional regularization
86
+ gradient_clip: 1.0 # Gradient clipping
config/custom/improved.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ work_dir: ./work_dir/custom/ctrgcn_improved
2
+
3
+ # feeder
4
+ feeder: feeders.feeder_custom.Feeder
5
+ train_feeder_args:
6
+ data_path: data/train
7
+ label_path: data/train_labels.txt
8
+ split: train
9
+ debug: False
10
+ random_choose: True # Disable temporal sampling (compatibility issue)
11
+ random_shift: True # Disable temporal shifting (compatibility issue)
12
+ random_move: True # Disable spatial perturbations (compatibility issue)
13
+ window_size: 64
14
+ normalization: False # Disable normalization (compatibility issue)
15
+ random_rot: True # Disable rotation augmentation (compatibility issue)
16
+ p_interval: [0.5, 1] # Keep probability interval
17
+ vel: False
18
+ bone: False
19
+
20
+ test_feeder_args:
21
+ data_path: data/test
22
+ label_path: data/test_labels.txt
23
+ split: test
24
+ window_size: 64
25
+ p_interval: [0.95]
26
+ vel: False
27
+ bone: False
28
+ debug: False
29
+
30
+ # model
31
+ model: action_recognition.ctrgcn.model.ctrgcn.Model
32
+ model_args:
33
+ num_class: 52
34
+ num_point: 17
35
+ num_person: 1
36
+ graph: action_recognition.ctrgcn.graph.custom_17j.Graph
37
+ graph_args:
38
+ labeling_mode: 'spatial'
39
+ drop_out: 0.5 # Add dropout for regularization
40
+
41
+ # Model weights (optional)
42
+ weights: null # No pretrained weights
43
+ ignore_weights: [] # No weights to ignore
44
+
45
+ #optim
46
+ weight_decay: 0.001 # Increase weight decay for better regularization
47
+ base_lr: 0.01 # Reduce initial learning rate
48
+ lr_decay_rate: 0.1
49
+ step: [40, 60] # Adjust LR schedule - later decay
50
+ warm_up_epoch: 5
51
+ optimizer: SGD # Optimizer type
52
+
53
+ # training
54
+ device: [0]
55
+ batch_size: 32 # Reduce batch size for better generalization
56
+ test_batch_size: 64
57
+ num_epoch: 80 # Increase epochs with early stopping
58
+ nesterov: True
59
+ start_epoch: 0 # Starting epoch
60
+ phase: train # Training phase
61
+ save_score: False # Don't save prediction scores
62
+ seed: 1 # Random seed
63
+ log_interval: 100 # Log every 100 iterations
64
+ save_interval: 1 # Save model every epoch
65
+ save_epoch: 30 # Start saving after epoch 30
66
+ eval_interval: 1 # Evaluate every epoch
67
+ print_log: True # Print logs
68
+ show_topk: [1, 5] # Show top-1 and top-5 accuracy
69
+ num_worker: 24 # Number of data loading workers
70
+ model_saved_name: ./work_dir/custom/ctrgcn_improved/runs
71
+
72
+ # Early stopping
73
+ early_stopping: True
74
+ patience: 10 # Stop if no improvement for 10 epochs
75
+ min_delta: 0.001 # Minimum improvement threshold
76
+
77
+ # Multi-stream training for better generalization
78
+ use_joint_stream: True
79
+ use_bone_stream: True
80
+ use_motion_stream: True
81
+
82
+ # Label smoothing for regularization
83
+ label_smoothing: 0.1
84
+
85
+ # Additional regularization
86
+ gradient_clip: 1.0 # Gradient clipping
config/custom/optimized.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Optimized CTR-GCN Configuration
2
+ # Balanced approach - moderate augmentation with stability
3
+
4
+ work_dir: ./work_dir/custom/ctrgcn_optimized
5
+
6
+ # feeder
7
+ feeder: feeders.feeder_custom.Feeder
8
+ train_feeder_args:
9
+ data_path: data/train
10
+ label_path: data/train_labels.txt
11
+ split: train
12
+ debug: False
13
+ # Moderate augmentation - not too aggressive
14
+ random_choose: True
15
+ random_shift: True
16
+ random_move: True
17
+ random_rot: True
18
+ window_size: 64
19
+ normalization: False
20
+ # More conservative temporal cropping
21
+ p_interval: [0.85, 1] # Less aggressive than enhanced (0.75)
22
+ vel: False
23
+ bone: False
24
+
25
+ # data loader for testing
26
+ test_feeder: feeders.feeder_custom.Feeder
27
+ test_feeder_args:
28
+ data_path: data/test
29
+ label_path: data/test_labels.txt
30
+ split: test
31
+ window_size: 64
32
+ p_interval: [0.95] # Conservative for testing
33
+ vel: False
34
+ bone: False
35
+ debug: False
36
+
37
+ # model
38
+ model: model.ctrgcn.Model
39
+ model_args:
40
+ num_class: 52
41
+ num_point: 17
42
+ num_person: 1
43
+ graph: graph.custom_17j.Graph
44
+ graph_args:
45
+ labeling_mode: spatial
46
+ drop_out: 0.25 # Between balanced (0.2) and enhanced (0.3)
47
+
48
+ # training
49
+ device: [0]
50
+ batch_size: 52 # Compromise between balanced (48) and enhanced (56)
51
+ test_batch_size: 64
52
+ num_epoch: 80
53
+
54
+ # optimizer
55
+ weight_decay: 0.0007 # Between balanced (0.0006) and enhanced (0.0008)
56
+ base_lr: 0.085 # Between balanced (0.08) and enhanced (0.09)
57
+ lr_decay_rate: 0.1
58
+ step: [35, 55]
59
+ warm_up_epoch: 5
60
+
61
+ # training configuration
62
+ nesterov: True
63
+ start_epoch: 0
64
+ save_interval: 1
65
+ save_epoch: 30
66
+ eval_interval: 1
67
+ save_score: False
68
+ show_topk: [1, 5]
69
+
70
+ # Early stopping
71
+ early_stopping: True
72
+ patience: 16 # Between balanced (15) and enhanced (18)
73
+ min_delta: 0.001
74
+
75
+ # Multi-stream configuration
76
+ use_joint_stream: True
77
+ use_bone_stream: True
78
+ use_motion_stream: True
79
+
80
+ # Regularization
81
+ label_smoothing: 0.04 # Between balanced (0.05) and enhanced (0.03)
82
+
83
+ # Environment
84
+ num_worker: 24
85
+ seed: 1
86
+
87
+ # Model loading
88
+ weights: null
89
+ ignore_weights: []
90
+ start_epoch: 0
91
+ save_score: false
92
+ print_log: true
93
+ log_interval: 100
feeders/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (255 Bytes). View file
 
feeders/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (309 Bytes). View file
 
feeders/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (244 Bytes). View file
 
feeders/__pycache__/bone_pairs.cpython-36.pyc ADDED
Binary file (1.05 kB). View file
 
feeders/__pycache__/feeder_custom.cpython-313.pyc ADDED
Binary file (8.63 kB). View file
 
feeders/__pycache__/feeder_custom.cpython-39.pyc ADDED
Binary file (5.05 kB). View file
 
feeders/__pycache__/feeder_ntu.cpython-313.pyc ADDED
Binary file (6.79 kB). View file
 
feeders/__pycache__/feeder_ntu.cpython-39.pyc ADDED
Binary file (4.54 kB). View file
 
feeders/__pycache__/feeder_ucla.cpython-313.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea6638edd777cbf5375f679a9a8be266963a58987aa26d796fa8527e2795ecbd
3
+ size 119680
feeders/__pycache__/feeder_ucla.cpython-36.pyc ADDED
Binary file (49.4 kB). View file
 
feeders/__pycache__/feeder_ucla.cpython-39.pyc ADDED
Binary file (49.3 kB). View file
 
feeders/__pycache__/tools.cpython-313.pyc ADDED
Binary file (13.9 kB). View file
 
feeders/__pycache__/tools.cpython-36.pyc ADDED
Binary file (8.84 kB). View file
 
feeders/__pycache__/tools.cpython-39.pyc ADDED
Binary file (6.77 kB). View file
 
feeders/tools.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ import pdb
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+
9
+ def valid_crop_resize(data_numpy,valid_frame_num,p_interval,window):
10
+ # input: C,T,V,M
11
+ C, T, V, M = data_numpy.shape
12
+ begin = 0
13
+ end = valid_frame_num
14
+ valid_size = end - begin
15
+
16
+ #crop
17
+ if len(p_interval) == 1:
18
+ p = p_interval[0]
19
+ bias = int((1-p) * valid_size/2)
20
+ data = data_numpy[:, begin+bias:end-bias, :, :]# center_crop
21
+ cropped_length = data.shape[1]
22
+ else:
23
+ p = np.random.rand(1)*(p_interval[1]-p_interval[0])+p_interval[0]
24
+ cropped_length = np.minimum(np.maximum(int(np.floor(valid_size*p)),64), valid_size)# constraint cropped_length lower bound as 64
25
+ bias = np.random.randint(0,valid_size-cropped_length+1)
26
+ data = data_numpy[:, begin+bias:begin+bias+cropped_length, :, :]
27
+ if data.shape[1] == 0:
28
+ print(cropped_length, bias, valid_size)
29
+
30
+ # resize
31
+ data = torch.tensor(data,dtype=torch.float)
32
+ data = data.permute(0, 2, 3, 1).contiguous().view(C * V * M, cropped_length)
33
+ data = data[None, None, :, :]
34
+ data = F.interpolate(data, size=(C * V * M, window), mode='bilinear',align_corners=False).squeeze() # could perform both up sample and down sample
35
+ data = data.contiguous().view(C, V, M, window).permute(0, 3, 1, 2).contiguous().numpy()
36
+
37
+ return data
38
+
39
+ def downsample(data_numpy, step, random_sample=True):
40
+ # input: C,T,V,M
41
+ begin = np.random.randint(step) if random_sample else 0
42
+ return data_numpy[:, begin::step, :, :]
43
+
44
+
45
+ def temporal_slice(data_numpy, step):
46
+ # input: C,T,V,M
47
+ C, T, V, M = data_numpy.shape
48
+ return data_numpy.reshape(C, T / step, step, V, M).transpose(
49
+ (0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M)
50
+
51
+
52
+ def mean_subtractor(data_numpy, mean):
53
+ # input: C,T,V,M
54
+ # naive version
55
+ if mean == 0:
56
+ return
57
+ C, T, V, M = data_numpy.shape
58
+ valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
59
+ begin = valid_frame.argmax()
60
+ end = len(valid_frame) - valid_frame[::-1].argmax()
61
+ data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean
62
+ return data_numpy
63
+
64
+
65
+ def auto_pading(data_numpy, size, random_pad=False):
66
+ C, T, V, M = data_numpy.shape
67
+ if T < size:
68
+ begin = random.randint(0, size - T) if random_pad else 0
69
+ data_numpy_paded = np.zeros((C, size, V, M))
70
+ data_numpy_paded[:, begin:begin + T, :, :] = data_numpy
71
+ return data_numpy_paded
72
+ else:
73
+ return data_numpy
74
+
75
+
76
+ def random_choose(data_numpy, size, auto_pad=True):
77
+ # input: C,T,V,M 随机选择其中一段,不是很合理。因为有0
78
+ C, T, V, M = data_numpy.shape
79
+ if T == size:
80
+ return data_numpy
81
+ elif T < size:
82
+ if auto_pad:
83
+ return auto_pading(data_numpy, size, random_pad=True)
84
+ else:
85
+ return data_numpy
86
+ else:
87
+ begin = random.randint(0, T - size)
88
+ return data_numpy[:, begin:begin + size, :, :]
89
+
90
+ def random_move(data_numpy,
91
+ angle_candidate=[-10., -5., 0., 5., 10.],
92
+ scale_candidate=[0.9, 1.0, 1.1],
93
+ transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2],
94
+ move_time_candidate=[1]):
95
+ # input: C,T,V,M
96
+ C, T, V, M = data_numpy.shape
97
+ move_time = random.choice(move_time_candidate)
98
+ node = np.arange(0, T, T * 1.0 / move_time).round().astype(int)
99
+ node = np.append(node, T)
100
+ num_node = len(node)
101
+
102
+ A = np.random.choice(angle_candidate, num_node)
103
+ S = np.random.choice(scale_candidate, num_node)
104
+ T_x = np.random.choice(transform_candidate, num_node)
105
+ T_y = np.random.choice(transform_candidate, num_node)
106
+
107
+ a = np.zeros(T)
108
+ s = np.zeros(T)
109
+ t_x = np.zeros(T)
110
+ t_y = np.zeros(T)
111
+
112
+ # linspace
113
+ for i in range(num_node - 1):
114
+ a[node[i]:node[i + 1]] = np.linspace(
115
+ A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180
116
+ s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1],
117
+ node[i + 1] - node[i])
118
+ t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1],
119
+ node[i + 1] - node[i])
120
+ t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1],
121
+ node[i + 1] - node[i])
122
+
123
+ theta = np.array([[np.cos(a) * s, -np.sin(a) * s],
124
+ [np.sin(a) * s, np.cos(a) * s]])
125
+
126
+ # perform transformation
127
+ for i_frame in range(T):
128
+ xy = data_numpy[0:2, i_frame, :, :]
129
+ new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1))
130
+ new_xy[0] += t_x[i_frame]
131
+ new_xy[1] += t_y[i_frame]
132
+ data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M)
133
+
134
+ return data_numpy
135
+
136
+
137
+ def random_shift(data_numpy):
138
+ C, T, V, M = data_numpy.shape
139
+ data_shift = np.zeros(data_numpy.shape)
140
+ valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
141
+ begin = valid_frame.argmax()
142
+ end = len(valid_frame) - valid_frame[::-1].argmax()
143
+
144
+ size = end - begin
145
+ bias = random.randint(0, T - size)
146
+ data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :]
147
+
148
+ return data_shift
149
+
150
+
151
+ def _rot(rot):
152
+ """
153
+ rot: T,3
154
+ """
155
+ cos_r, sin_r = rot.cos(), rot.sin() # T,3
156
+ zeros = torch.zeros(rot.shape[0], 1) # T,1
157
+ ones = torch.ones(rot.shape[0], 1) # T,1
158
+
159
+ r1 = torch.stack((ones, zeros, zeros),dim=-1) # T,1,3
160
+ rx2 = torch.stack((zeros, cos_r[:,0:1], sin_r[:,0:1]), dim = -1) # T,1,3
161
+ rx3 = torch.stack((zeros, -sin_r[:,0:1], cos_r[:,0:1]), dim = -1) # T,1,3
162
+ rx = torch.cat((r1, rx2, rx3), dim = 1) # T,3,3
163
+
164
+ ry1 = torch.stack((cos_r[:,1:2], zeros, -sin_r[:,1:2]), dim =-1)
165
+ r2 = torch.stack((zeros, ones, zeros),dim=-1)
166
+ ry3 = torch.stack((sin_r[:,1:2], zeros, cos_r[:,1:2]), dim =-1)
167
+ ry = torch.cat((ry1, r2, ry3), dim = 1)
168
+
169
+ rz1 = torch.stack((cos_r[:,2:3], sin_r[:,2:3], zeros), dim =-1)
170
+ r3 = torch.stack((zeros, zeros, ones),dim=-1)
171
+ rz2 = torch.stack((-sin_r[:,2:3], cos_r[:,2:3],zeros), dim =-1)
172
+ rz = torch.cat((rz1, rz2, r3), dim = 1)
173
+
174
+ rot = rz.matmul(ry).matmul(rx)
175
+ return rot
176
+
177
+
178
+ def random_rot(data_numpy, theta=0.3):
179
+ """
180
+ data_numpy: C,T,V,M
181
+ """
182
+ data_torch = torch.from_numpy(data_numpy).float() # Ensure float32
183
+ C, T, V, M = data_torch.shape
184
+ data_torch = data_torch.permute(1, 0, 2, 3).contiguous().view(T, C, V*M) # T,3,V*M
185
+ rot = torch.zeros(3, dtype=torch.float32).uniform_(-theta, theta) # Ensure float32
186
+ rot = torch.stack([rot, ] * T, dim=0)
187
+ rot = _rot(rot) # T,3,3
188
+ data_torch = torch.matmul(rot, data_torch)
189
+ data_torch = data_torch.view(T, C, V, M).permute(1, 0, 2, 3).contiguous()
190
+
191
+ return data_torch.numpy() # Convert back to numpy
192
+
193
+ def openpose_match(data_numpy):
194
+ C, T, V, M = data_numpy.shape
195
+ assert (C == 3)
196
+ score = data_numpy[2, :, :, :].sum(axis=1)
197
+ # the rank of body confidence in each frame (shape: T-1, M)
198
+ rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M)
199
+
200
+ # data of frame 1
201
+ xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1)
202
+ # data of frame 2
203
+ xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M)
204
+ # square of distance between frame 1&2 (shape: T-1, M, M)
205
+ distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0)
206
+
207
+ # match pose
208
+ forward_map = np.zeros((T, M), dtype=int) - 1
209
+ forward_map[0] = range(M)
210
+ for m in range(M):
211
+ choose = (rank == m)
212
+ forward = distance[choose].argmin(axis=1)
213
+ for t in range(T - 1):
214
+ distance[t, :, forward[t]] = np.inf
215
+ forward_map[1:][choose] = forward
216
+ assert (np.all(forward_map >= 0))
217
+
218
+ # string data
219
+ for t in range(T - 1):
220
+ forward_map[t + 1] = forward_map[t + 1][forward_map[t]]
221
+
222
+ # generate data
223
+ new_data_numpy = np.zeros(data_numpy.shape)
224
+ for t in range(T):
225
+ new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[
226
+ t]].transpose(1, 2, 0)
227
+ data_numpy = new_data_numpy
228
+
229
+ # score sort
230
+ trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0)
231
+ rank = (-trace_score).argsort()
232
+ data_numpy = data_numpy[:, :, :, rank]
233
+
234
+ return data_numpy
graph/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (279 Bytes). View file
 
graph/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (297 Bytes). View file
 
graph/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (267 Bytes). View file
 
graph/__pycache__/custom_17j.cpython-313.pyc ADDED
Binary file (2.41 kB). View file
 
graph/__pycache__/custom_17j.cpython-39.pyc ADDED
Binary file (1.77 kB). View file
 
graph/__pycache__/ntu_rgb_d.cpython-313.pyc ADDED
Binary file (2.12 kB). View file
 
graph/__pycache__/ntu_rgb_d.cpython-36.pyc ADDED
Binary file (3.76 kB). View file
 
graph/__pycache__/ntu_rgb_d.cpython-39.pyc ADDED
Binary file (1.68 kB). View file
 
graph/__pycache__/tools.cpython-313.pyc ADDED
Binary file (4.37 kB). View file
 
graph/__pycache__/tools.cpython-36.pyc ADDED
Binary file (2.56 kB). View file
 
graph/__pycache__/tools.cpython-39.pyc ADDED
Binary file (2.56 kB). View file
 
graph/__pycache__/ucla.cpython-313.pyc ADDED
Binary file (2.07 kB). View file
 
graph/__pycache__/ucla.cpython-36.pyc ADDED
Binary file (1.96 kB). View file
 
graph/__pycache__/ucla.cpython-39.pyc ADDED
Binary file (1.63 kB). View file
 
graph/custom_17j.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from action_recognition.ctrgcn.graph import tools
3
+
4
+ num_node = 17
5
+ self_link = [(i, i) for i in range(num_node)]
6
+ inward_ori_index = [
7
+ (1, 0), (2, 1), (3, 2), (4, 3), # spine chain
8
+ (5, 1), (6, 5), (7, 6), # left arm
9
+ (8, 1), (9, 8), (10, 9), # right arm
10
+ (11, 0), (12, 11), (13, 12), # left leg
11
+ (14, 0), (15, 14), (16, 15) # right leg
12
+ ]
13
+ inward = [(i, j) for (i, j) in inward_ori_index]
14
+ outward = [(j, i) for (i, j) in inward]
15
+ neighbor = inward + outward
16
+
17
+
18
+ class Graph:
19
+ def __init__(self, labeling_mode='spatial'):
20
+ self.num_node = num_node
21
+ self.self_link = self_link
22
+ self.inward = inward
23
+ self.outward = outward
24
+ self.neighbor = neighbor
25
+ self.A = self.get_adjacency_matrix(labeling_mode)
26
+
27
+ def get_adjacency_matrix(self, labeling_mode=None):
28
+ if labeling_mode is None:
29
+ return self.A
30
+ if labeling_mode == 'spatial':
31
+ A = tools.get_spatial_graph(num_node, self_link, inward, outward)
32
+ else:
33
+ raise ValueError()
34
+ return A
35
+
36
+
37
+ if __name__ == '__main__':
38
+ import matplotlib.pyplot as plt
39
+ import os
40
+ # os.environ['DISPLAY'] = 'localhost:10.0'
41
+ A = Graph('spatial').get_adjacency_matrix()
42
+ for i, graph in enumerate(A):
43
+ plt.figure()
44
+ plt.imshow(graph, cmap='gray')
45
+ plt.show()
46
+ print("OK")
graph/tools.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def get_sgp_mat(num_in, num_out, link):
4
+ A = np.zeros((num_in, num_out))
5
+ for i, j in link:
6
+ A[i, j] = 1
7
+ A_norm = A / np.sum(A, axis=0, keepdims=True)
8
+ return A_norm
9
+
10
+ def edge2mat(link, num_node):
11
+ A = np.zeros((num_node, num_node))
12
+ for i, j in link:
13
+ A[j, i] = 1
14
+ return A
15
+
16
+ def get_k_scale_graph(scale, A):
17
+ if scale == 1:
18
+ return A
19
+ An = np.zeros_like(A)
20
+ A_power = np.eye(A.shape[0])
21
+ for k in range(scale):
22
+ A_power = A_power @ A
23
+ An += A_power
24
+ An[An > 0] = 1
25
+ return An
26
+
27
+ def normalize_digraph(A):
28
+ Dl = np.sum(A, 0)
29
+ h, w = A.shape
30
+ Dn = np.zeros((w, w))
31
+ for i in range(w):
32
+ if Dl[i] > 0:
33
+ Dn[i, i] = Dl[i] ** (-1)
34
+ AD = np.dot(A, Dn)
35
+ return AD
36
+
37
+
38
+ def get_spatial_graph(num_node, self_link, inward, outward):
39
+ I = edge2mat(self_link, num_node)
40
+ In = normalize_digraph(edge2mat(inward, num_node))
41
+ Out = normalize_digraph(edge2mat(outward, num_node))
42
+ A = np.stack((I, In, Out))
43
+ return A
44
+
45
+ def normalize_adjacency_matrix(A):
46
+ node_degrees = A.sum(-1)
47
+ degs_inv_sqrt = np.power(node_degrees, -0.5)
48
+ norm_degs_matrix = np.eye(len(node_degrees)) * degs_inv_sqrt
49
+ return (norm_degs_matrix @ A @ norm_degs_matrix).astype(np.float32)
50
+
51
+
52
+ def k_adjacency(A, k, with_self=False, self_factor=1):
53
+ assert isinstance(A, np.ndarray)
54
+ I = np.eye(len(A), dtype=A.dtype)
55
+ if k == 0:
56
+ return I
57
+ Ak = np.minimum(np.linalg.matrix_power(A + I, k), 1) \
58
+ - np.minimum(np.linalg.matrix_power(A + I, k - 1), 1)
59
+ if with_self:
60
+ Ak += (self_factor * I)
61
+ return Ak
62
+
63
+ def get_multiscale_spatial_graph(num_node, self_link, inward, outward):
64
+ I = edge2mat(self_link, num_node)
65
+ A1 = edge2mat(inward, num_node)
66
+ A2 = edge2mat(outward, num_node)
67
+ A3 = k_adjacency(A1, 2)
68
+ A4 = k_adjacency(A2, 2)
69
+ A1 = normalize_digraph(A1)
70
+ A2 = normalize_digraph(A2)
71
+ A3 = normalize_digraph(A3)
72
+ A4 = normalize_digraph(A4)
73
+ A = np.stack((I, A1, A2, A3, A4))
74
+ return A
75
+
76
+
77
+
78
+ def get_uniform_graph(num_node, self_link, neighbor):
79
+ A = normalize_digraph(edge2mat(neighbor + self_link, num_node))
80
+ return A
inference.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Inference Script for CTR-GCN
4
+ =============================
5
+
6
+ This script performs inference on pose sequences using a trained CTR-GCN model.
7
+ It reads pose data from poses.pkl (containing 243-frame segments) and predicts
8
+ action labels using the specified configuration and checkpoint.
9
+
10
+ Usage:
11
+ python inference.py --config config/custom/improved.yaml --weights work_dir/custom/ctrgcn_improved/runs-66-23166.pt
12
+
13
+ Author: AI Assistant
14
+ Date: 2025
15
+ """
16
+
17
+ import os
18
+ import yaml
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn.functional as F
22
+
23
+ # Import required modules
24
+ from action_recognition.ctrgcn.feeders import tools
25
+
26
+ def import_class(import_str):
27
+ mod_str, _sep, class_str = import_str.rpartition('.')
28
+ __import__(mod_str)
29
+ return getattr(__import__(mod_str, fromlist=[class_str]), class_str)
30
+
31
+ # Hardcoded config and checkpoint paths
32
+ CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config/custom/improved.yaml')
33
+ WEIGHTS_PATH = os.path.join(os.path.dirname(__file__), 'work_dir/custom/ctrgcn_improved/runs-56-19656.pt')
34
+
35
+ # Device selection (CPU by default, use CUDA if available)
36
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
+
38
+ # Model/class cache
39
+ _model = None
40
+ _config = None
41
+ _class_names = None
42
+
43
+ def load_model():
44
+ global _model, _config
45
+ if _model is not None and _config is not None:
46
+ return _model, _config
47
+ with open(CONFIG_PATH, 'r') as f:
48
+ config = yaml.load(f, Loader=yaml.FullLoader)
49
+ Model = import_class(config['model'])
50
+ model = Model(**config['model_args'])
51
+ weights = torch.load(WEIGHTS_PATH, map_location='cpu')
52
+ if any(key.startswith('module.') for key in weights.keys()):
53
+ weights = {key[7:]: value for key, value in weights.items()}
54
+ model.load_state_dict(weights)
55
+ model.eval()
56
+ model = model.to(DEVICE)
57
+ _model = model
58
+ _config = config
59
+ return model, config
60
+
61
+ def preprocess_pose_data(pose_data, window_size=64, p_interval=[0.95]):
62
+ if pose_data.ndim == 4:
63
+ data = pose_data.squeeze(0).transpose(2, 0, 1)
64
+ elif pose_data.ndim == 3:
65
+ data = pose_data.transpose(2, 0, 1)
66
+ else:
67
+ raise ValueError(f"Unexpected pose_data shape: {pose_data.shape}")
68
+ data = data[:, :, :, np.newaxis]
69
+ C, T, V, M = data.shape
70
+ data = tools.valid_crop_resize(data, T, p_interval, window_size)
71
+ return data
72
+
73
+ def predict_label(model, data):
74
+ data_tensor = torch.from_numpy(data).float().unsqueeze(0).to(DEVICE)
75
+ with torch.no_grad():
76
+ output = model(data_tensor)
77
+ probabilities = F.softmax(output, dim=1)
78
+ predicted_label = torch.argmax(output, dim=1).item()
79
+ confidence = probabilities[0, predicted_label].item()
80
+ return predicted_label, confidence, probabilities.cpu().numpy()[0]
81
+
82
+ def load_class_names(config):
83
+ global _class_names
84
+ if _class_names is not None:
85
+ return _class_names
86
+ class_names = {}
87
+ label_path = config.get('test_feeder_args', {}).get('label_path', None)
88
+ if label_path and os.path.exists(label_path):
89
+ with open(label_path, 'r') as f:
90
+ for line in f:
91
+ parts = line.strip().split()
92
+ if len(parts) >= 2:
93
+ label = int(parts[1])
94
+ if label not in class_names:
95
+ class_names[label] = f"Class_{label}"
96
+ for i in range(52):
97
+ if i not in class_names:
98
+ class_names[i] = f"Class_{i}"
99
+ _class_names = class_names
100
+ return class_names
101
+
102
+ def extract_embeddings_from_segments(segments):
103
+ """
104
+ Extract feature embeddings from pose segments for clustering/analysis.
105
+
106
+ segments: List of np.ndarray, each of shape (243, 17, 3)
107
+ Returns: List of dicts with keys: sequence_id, embedding (numpy array)
108
+ """
109
+ model, config = load_model()
110
+ window_size = config.get('test_feeder_args', {}).get('window_size', 64)
111
+ results = []
112
+
113
+ for i, pose_data in enumerate(segments):
114
+ processed_data = preprocess_pose_data(pose_data, window_size=window_size)
115
+ data_tensor = torch.from_numpy(processed_data).float().unsqueeze(0).to(DEVICE)
116
+
117
+ with torch.no_grad():
118
+ embedding = model.extract_embedding(data_tensor)
119
+ embedding_np = embedding.cpu().numpy()[0] # Remove batch dimension
120
+
121
+ results.append({
122
+ 'sequence_id': i,
123
+ 'embedding': embedding_np
124
+ })
125
+
126
+ return results
127
+
128
+ def extract_top5_labels_from_segments(segments):
129
+ """
130
+ Extract top 5 action labels from pose segments.
131
+
132
+ segments: List of np.ndarray, each of shape (243, 17, 3)
133
+ Returns: List of dicts with keys: sequence_id, top5_labels (list of 5 integers)
134
+ """
135
+ model, config = load_model()
136
+ window_size = config.get('test_feeder_args', {}).get('window_size', 64)
137
+ results = []
138
+
139
+ for i, pose_data in enumerate(segments):
140
+ processed_data = preprocess_pose_data(pose_data, window_size=window_size)
141
+ data_tensor = torch.from_numpy(processed_data).float().unsqueeze(0).to(DEVICE)
142
+
143
+ with torch.no_grad():
144
+ output = model(data_tensor)
145
+ probabilities = F.softmax(output, dim=1)
146
+ # Get top 5 predictions
147
+ top5_probs, top5_indices = torch.topk(probabilities, 5, dim=1)
148
+ top5_labels = top5_indices.cpu().numpy()[0] # Remove batch dimension
149
+
150
+ results.append({
151
+ 'sequence_id': i,
152
+ 'top5_labels': top5_labels.tolist()
153
+ })
154
+
155
+ return results
156
+
157
+ def run_inference_on_segments(segments):
158
+ """
159
+ segments: List of np.ndarray, each of shape (243, 17, 3)
160
+ Returns: List of dicts with keys: sequence_id, predicted_label, confidence, class_name, probabilities
161
+ """
162
+ model, config = load_model()
163
+ class_names = load_class_names(config)
164
+ window_size = config.get('test_feeder_args', {}).get('window_size', 64)
165
+ results = []
166
+ for i, pose_data in enumerate(segments):
167
+ processed_data = preprocess_pose_data(pose_data, window_size=window_size)
168
+ pred_label, confidence, prob_distribution = predict_label(model, processed_data)
169
+ class_name = class_names.get(pred_label, f"Class_{pred_label}")
170
+ results.append({
171
+ 'sequence_id': i,
172
+ 'predicted_label': pred_label,
173
+ 'confidence': confidence,
174
+ 'class_name': class_name,
175
+ 'probabilities': prob_distribution
176
+ })
177
+ return results
model/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (151 Bytes). View file
 
model/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (141 Bytes). View file
 
model/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (145 Bytes). View file
 
model/__pycache__/ctrgcn.cpython-313.pyc ADDED
Binary file (21 kB). View file
 
model/__pycache__/ctrgcn.cpython-36.pyc ADDED
Binary file (11.5 kB). View file
 
model/__pycache__/ctrgcn.cpython-39.pyc ADDED
Binary file (10.6 kB). View file
 
model/ctrgcn.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import pdb
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.autograd import Variable
8
+ import importlib
9
+
10
+ def import_class(name):
11
+ components = name.split('.')
12
+ module_path = '.'.join(components[:-1])
13
+ class_name = components[-1]
14
+ mod = importlib.import_module(module_path)
15
+ return getattr(mod, class_name)
16
+
17
+
18
+ def conv_branch_init(conv, branches):
19
+ weight = conv.weight
20
+ n = weight.size(0)
21
+ k1 = weight.size(1)
22
+ k2 = weight.size(2)
23
+ nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
24
+ nn.init.constant_(conv.bias, 0)
25
+
26
+
27
+ def conv_init(conv):
28
+ if conv.weight is not None:
29
+ nn.init.kaiming_normal_(conv.weight, mode='fan_out')
30
+ if conv.bias is not None:
31
+ nn.init.constant_(conv.bias, 0)
32
+
33
+
34
+ def bn_init(bn, scale):
35
+ nn.init.constant_(bn.weight, scale)
36
+ nn.init.constant_(bn.bias, 0)
37
+
38
+
39
+ def weights_init(m):
40
+ classname = m.__class__.__name__
41
+ if classname.find('Conv') != -1:
42
+ if hasattr(m, 'weight'):
43
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
44
+ if hasattr(m, 'bias') and m.bias is not None and isinstance(m.bias, torch.Tensor):
45
+ nn.init.constant_(m.bias, 0)
46
+ elif classname.find('BatchNorm') != -1:
47
+ if hasattr(m, 'weight') and m.weight is not None:
48
+ m.weight.data.normal_(1.0, 0.02)
49
+ if hasattr(m, 'bias') and m.bias is not None:
50
+ m.bias.data.fill_(0)
51
+
52
+
53
+ class TemporalConv(nn.Module):
54
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1):
55
+ super(TemporalConv, self).__init__()
56
+ pad = (kernel_size + (kernel_size-1) * (dilation-1) - 1) // 2
57
+ self.conv = nn.Conv2d(
58
+ in_channels,
59
+ out_channels,
60
+ kernel_size=(kernel_size, 1),
61
+ padding=(pad, 0),
62
+ stride=(stride, 1),
63
+ dilation=(dilation, 1))
64
+
65
+ self.bn = nn.BatchNorm2d(out_channels)
66
+
67
+ def forward(self, x):
68
+ x = self.conv(x)
69
+ x = self.bn(x)
70
+ return x
71
+
72
+
73
+ class MultiScale_TemporalConv(nn.Module):
74
+ def __init__(self,
75
+ in_channels,
76
+ out_channels,
77
+ kernel_size=3,
78
+ stride=1,
79
+ dilations=[1,2,3,4],
80
+ residual=True,
81
+ residual_kernel_size=1):
82
+
83
+ super().__init__()
84
+ assert out_channels % (len(dilations) + 2) == 0, '# out channels should be multiples of # branches'
85
+
86
+ # Multiple branches of temporal convolution
87
+ self.num_branches = len(dilations) + 2
88
+ branch_channels = out_channels // self.num_branches
89
+ if type(kernel_size) == list:
90
+ assert len(kernel_size) == len(dilations)
91
+ else:
92
+ kernel_size = [kernel_size]*len(dilations)
93
+ # Temporal Convolution branches
94
+ self.branches = nn.ModuleList([
95
+ nn.Sequential(
96
+ nn.Conv2d(
97
+ in_channels,
98
+ branch_channels,
99
+ kernel_size=1,
100
+ padding=0),
101
+ nn.BatchNorm2d(branch_channels),
102
+ nn.ReLU(inplace=True),
103
+ TemporalConv(
104
+ branch_channels,
105
+ branch_channels,
106
+ kernel_size=ks,
107
+ stride=stride,
108
+ dilation=dilation),
109
+ )
110
+ for ks, dilation in zip(kernel_size, dilations)
111
+ ])
112
+
113
+ # Additional Max & 1x1 branch
114
+ self.branches.append(nn.Sequential(
115
+ nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
116
+ nn.BatchNorm2d(branch_channels),
117
+ nn.ReLU(inplace=True),
118
+ nn.MaxPool2d(kernel_size=(3,1), stride=(stride,1), padding=(1,0)),
119
+ nn.BatchNorm2d(branch_channels) # 为什么还要加bn
120
+ ))
121
+
122
+ self.branches.append(nn.Sequential(
123
+ nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0, stride=(stride,1)),
124
+ nn.BatchNorm2d(branch_channels)
125
+ ))
126
+
127
+ # Residual connection
128
+ if not residual:
129
+ self.residual = lambda x: 0
130
+ elif (in_channels == out_channels) and (stride == 1):
131
+ self.residual = lambda x: x
132
+ else:
133
+ self.residual = TemporalConv(in_channels, out_channels, kernel_size=residual_kernel_size, stride=stride)
134
+
135
+ # initialize
136
+ self.apply(weights_init)
137
+
138
+ def forward(self, x):
139
+ # Input dim: (N,C,T,V)
140
+ res = self.residual(x)
141
+ branch_outs = []
142
+ for tempconv in self.branches:
143
+ out = tempconv(x)
144
+ branch_outs.append(out)
145
+
146
+ out = torch.cat(branch_outs, dim=1)
147
+ out += res
148
+ return out
149
+
150
+
151
+ class CTRGC(nn.Module):
152
+ def __init__(self, in_channels, out_channels, rel_reduction=8, mid_reduction=1):
153
+ super(CTRGC, self).__init__()
154
+ self.in_channels = in_channels
155
+ self.out_channels = out_channels
156
+ if in_channels == 3 or in_channels == 9:
157
+ self.rel_channels = 8
158
+ self.mid_channels = 16
159
+ else:
160
+ self.rel_channels = in_channels // rel_reduction
161
+ self.mid_channels = in_channels // mid_reduction
162
+ self.conv1 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
163
+ self.conv2 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
164
+ self.conv3 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1)
165
+ self.conv4 = nn.Conv2d(self.rel_channels, self.out_channels, kernel_size=1)
166
+ self.tanh = nn.Tanh()
167
+ for m in self.modules():
168
+ if isinstance(m, nn.Conv2d):
169
+ conv_init(m)
170
+ elif isinstance(m, nn.BatchNorm2d):
171
+ bn_init(m, 1)
172
+
173
+ def forward(self, x, A=None, alpha=1):
174
+ x1, x2, x3 = self.conv1(x).mean(-2), self.conv2(x).mean(-2), self.conv3(x)
175
+ x1 = self.tanh(x1.unsqueeze(-1) - x2.unsqueeze(-2))
176
+ x1 = self.conv4(x1) * alpha + (A.unsqueeze(0).unsqueeze(0) if A is not None else 0) # N,C,V,V
177
+ x1 = torch.einsum('ncuv,nctv->nctu', x1, x3)
178
+ return x1
179
+
180
+ class unit_tcn(nn.Module):
181
+ def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
182
+ super(unit_tcn, self).__init__()
183
+ pad = int((kernel_size - 1) / 2)
184
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
185
+ stride=(stride, 1))
186
+
187
+ self.bn = nn.BatchNorm2d(out_channels)
188
+ self.relu = nn.ReLU(inplace=True)
189
+ conv_init(self.conv)
190
+ bn_init(self.bn, 1)
191
+
192
+ def forward(self, x):
193
+ x = self.bn(self.conv(x))
194
+ return x
195
+
196
+
197
+ class unit_gcn(nn.Module):
198
+ def __init__(self, in_channels, out_channels, A, coff_embedding=4, adaptive=True, residual=True):
199
+ super(unit_gcn, self).__init__()
200
+ inter_channels = out_channels // coff_embedding
201
+ self.inter_c = inter_channels
202
+ self.out_c = out_channels
203
+ self.in_c = in_channels
204
+ self.adaptive = adaptive
205
+ self.num_subset = A.shape[0]
206
+ self.convs = nn.ModuleList()
207
+ for i in range(self.num_subset):
208
+ self.convs.append(CTRGC(in_channels, out_channels))
209
+
210
+ if residual:
211
+ if in_channels != out_channels:
212
+ self.down = nn.Sequential(
213
+ nn.Conv2d(in_channels, out_channels, 1),
214
+ nn.BatchNorm2d(out_channels)
215
+ )
216
+ else:
217
+ self.down = lambda x: x
218
+ else:
219
+ self.down = lambda x: 0
220
+ if self.adaptive:
221
+ self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
222
+ else:
223
+ self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
224
+ self.alpha = nn.Parameter(torch.zeros(1))
225
+ self.bn = nn.BatchNorm2d(out_channels)
226
+ self.soft = nn.Softmax(-2)
227
+ self.relu = nn.ReLU(inplace=True)
228
+
229
+ for m in self.modules():
230
+ if isinstance(m, nn.Conv2d):
231
+ conv_init(m)
232
+ elif isinstance(m, nn.BatchNorm2d):
233
+ bn_init(m, 1)
234
+ bn_init(self.bn, 1e-6)
235
+
236
+ def forward(self, x):
237
+ y = None
238
+ if self.adaptive:
239
+ A = self.PA
240
+ else:
241
+ A = self.A.cuda(x.get_device())
242
+ for i in range(self.num_subset):
243
+ z = self.convs[i](x, A[i], self.alpha)
244
+ y = z + y if y is not None else z
245
+ y = self.bn(y)
246
+ y += self.down(x)
247
+ y = self.relu(y)
248
+
249
+
250
+ return y
251
+
252
+
253
+ class TCN_GCN_unit(nn.Module):
254
+ def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True, kernel_size=5, dilations=[1,2]):
255
+ super(TCN_GCN_unit, self).__init__()
256
+ self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
257
+ self.tcn1 = MultiScale_TemporalConv(out_channels, out_channels, kernel_size=kernel_size, stride=stride, dilations=dilations,
258
+ residual=False)
259
+ self.relu = nn.ReLU(inplace=True)
260
+ if not residual:
261
+ self.residual = lambda x: 0
262
+
263
+ elif (in_channels == out_channels) and (stride == 1):
264
+ self.residual = lambda x: x
265
+
266
+ else:
267
+ self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
268
+
269
+ def forward(self, x):
270
+ y = self.relu(self.tcn1(self.gcn1(x)) + self.residual(x))
271
+ return y
272
+
273
+
274
+ class Model(nn.Module):
275
+ def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
276
+ drop_out=0, adaptive=True):
277
+ super(Model, self).__init__()
278
+
279
+ if graph is None:
280
+ raise ValueError()
281
+ else:
282
+ Graph = import_class(graph)
283
+ self.graph = Graph(**graph_args)
284
+
285
+ A = self.graph.A # 3,25,25
286
+
287
+ self.num_class = num_class
288
+ self.num_point = num_point
289
+ self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
290
+
291
+ base_channel = 64
292
+ self.l1 = TCN_GCN_unit(in_channels, base_channel, A, residual=False, adaptive=adaptive)
293
+ self.l2 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
294
+ self.l3 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
295
+ self.l4 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
296
+ self.l5 = TCN_GCN_unit(base_channel, base_channel*2, A, stride=2, adaptive=adaptive)
297
+ self.l6 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
298
+ self.l7 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
299
+ self.l8 = TCN_GCN_unit(base_channel*2, base_channel*4, A, stride=2, adaptive=adaptive)
300
+ self.l9 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
301
+ self.l10 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
302
+
303
+ self.fc = nn.Linear(base_channel*4, num_class)
304
+ nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
305
+ bn_init(self.data_bn, 1)
306
+ if drop_out:
307
+ self.drop_out = nn.Dropout(drop_out)
308
+ else:
309
+ self.drop_out = lambda x: x
310
+
311
+ def extract_embedding(self, x):
312
+ """Extract feature embedding before final classification layer"""
313
+ if len(x.shape) == 3:
314
+ N, T, VC = x.shape
315
+ x = x.view(N, T, self.num_point, -1).permute(0, 3, 1, 2).contiguous().unsqueeze(-1)
316
+ N, C, T, V, M = x.size()
317
+
318
+ x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
319
+ x = self.data_bn(x)
320
+ x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
321
+ x = self.l1(x)
322
+ x = self.l2(x)
323
+ x = self.l3(x)
324
+ x = self.l4(x)
325
+ x = self.l5(x)
326
+ x = self.l6(x)
327
+ x = self.l7(x)
328
+ x = self.l8(x)
329
+ x = self.l9(x)
330
+ x = self.l10(x)
331
+
332
+ # N*M,C,T,V
333
+ c_new = x.size(1)
334
+ x = x.view(N, M, c_new, -1)
335
+ x = x.mean(3).mean(1)
336
+ x = self.drop_out(x)
337
+
338
+ return x # Return embedding (before final classification)
339
+
340
+ def forward(self, x):
341
+ # Extract embedding
342
+ embedding = self.extract_embedding(x)
343
+ # Apply final classification layer
344
+ return self.fc(embedding)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ numpy
2
+ pyyaml
3
+ torch
torchlight/setup.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from setuptools import find_packages, setup
2
+
3
+ setup(
4
+ name='torchlight',
5
+ version='1.0',
6
+ description='A mini framework for pytorch',
7
+ packages=find_packages(),
8
+ install_requires=[])
torchlight/torchlight.egg-info/PKG-INFO ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: torchlight
3
+ Version: 1.0
4
+ Summary: A mini framework for pytorch
5
+ Dynamic: summary