zehralx commited on
Commit
9c5382a
·
verified ·
1 Parent(s): feb33bb

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,53 +1,70 @@
1
  ---
2
  library_name: transformers
3
- license: apache-2.0
4
- base_model: facebook/wav2vec2-base
5
  tags:
6
- - generated_from_trainer
7
- model-index:
8
- - name: deepfake_audio_wav2vec
9
- results: []
10
  ---
11
 
12
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
- should probably proofread and complete it, then remove this comment. -->
14
-
15
- # deepfake_audio_wav2vec
16
-
17
- This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
18
-
19
- ## Model description
20
-
21
- More information needed
22
-
23
- ## Intended uses & limitations
24
-
25
- More information needed
26
-
27
- ## Training and evaluation data
28
-
29
- More information needed
30
-
31
- ## Training procedure
32
-
33
- ### Training hyperparameters
34
-
35
- The following hyperparameters were used during training:
36
- - learning_rate: 5e-05
37
- - train_batch_size: 16
38
- - eval_batch_size: 16
39
- - seed: 42
40
- - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
- - lr_scheduler_type: linear
42
- - num_epochs: 4.0
43
-
44
- ### Training results
45
-
46
-
47
-
48
- ### Framework versions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- - Transformers 4.52.0.dev0
51
- - Pytorch 2.6.0+cu124
52
- - Datasets 3.5.0
53
- - Tokenizers 0.21.1
 
1
  ---
2
  library_name: transformers
 
 
3
  tags:
4
+ - audio-classification
5
+ license: apache-2.0
 
 
6
  ---
7
 
8
+ # wav2vec2-audio-classification-v1-test
9
+
10
+ ## Model Index
11
+
12
+ ```json
13
+ {
14
+ "model-index": [
15
+ {
16
+ "name": "wav2vec2-audio-classification-v1-test",
17
+ "results": [
18
+ {
19
+ "task": {
20
+ "type": "audio-classification",
21
+ "name": "Audio Classification"
22
+ },
23
+ "dataset": {
24
+ "name": "./audio_classification",
25
+ "type": "audio",
26
+ "config": "default"
27
+ },
28
+ "metrics": [
29
+ {
30
+ "type": "accuracy",
31
+ "name": "Accuracy",
32
+ "value": 0.9633333333333334
33
+ }
34
+ ]
35
+ }
36
+ ]
37
+ }
38
+ ]
39
+ }
40
+ ```
41
+
42
+ ## Detailed Evaluation Metrics
43
+
44
+ **Confusion Matrix**
45
+
46
+ | | spoof | bona-fide |
47
+ |------------|-------|-------|
48
+ | spoof | 95 | 11 |
49
+ | bona-fide | 0 | 194 |
50
+
51
+ **Per-class Accuracy**
52
+
53
+ | Class | Accuracy |
54
+ |-----------|----------|
55
+ | spoof | 0.8962 |
56
+ | bona-fide | 1.0000 |
57
+
58
+ **Classification Report**
59
+
60
+ ```text
61
+ precision recall f1-score support
62
+
63
+ spoof 1.0000 0.8962 0.9453 106
64
+ bona-fide 0.9463 1.0000 0.9724 194
65
+
66
+ accuracy 0.9633 300
67
+ macro avg 0.9732 0.9481 0.9589 300
68
+ weighted avg 0.9653 0.9633 0.9628 300
69
+ ```
70
 
 
 
 
 
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 3.0,
3
- "eval_accuracy": 0.94,
4
- "eval_loss": 0.2446485459804535,
5
- "eval_runtime": 10.2837,
6
- "eval_samples_per_second": 9.724,
7
- "eval_steps_per_second": 0.681,
8
- "total_flos": 3.183758365185645e+17,
9
- "train_loss": 0.0,
10
- "train_runtime": 0.0042,
11
- "train_samples_per_second": 7161.589,
12
- "train_steps_per_second": 716.159
13
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9633333333333334,
4
+ "eval_loss": 0.17613795399665833,
5
+ "eval_runtime": 7.4955,
6
+ "eval_samples_per_second": 40.024,
7
+ "eval_steps_per_second": 2.535,
8
+ "total_flos": 4.337421477135794e+17,
9
+ "train_loss": 0.1324449266706194,
10
+ "train_runtime": 231.6013,
11
+ "train_samples_per_second": 17.271,
12
+ "train_steps_per_second": 1.088
13
  }
checkpoint-252/config.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "adapter_attn_dim": null,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForSequenceClassification"
10
+ ],
11
+ "attention_dropout": 0.1,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "sum",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.1,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "finetuning_task": "audio-classification",
55
+ "freeze_feat_extract_train": true,
56
+ "hidden_act": "gelu",
57
+ "hidden_dropout": 0.1,
58
+ "hidden_size": 768,
59
+ "id2label": {
60
+ "0": "spoof",
61
+ "1": "bona-fide"
62
+ },
63
+ "initializer_range": 0.02,
64
+ "intermediate_size": 3072,
65
+ "label2id": {
66
+ "bona-fide": "1",
67
+ "spoof": "0"
68
+ },
69
+ "layer_norm_eps": 1e-05,
70
+ "layerdrop": 0.0,
71
+ "mask_channel_length": 10,
72
+ "mask_channel_min_space": 1,
73
+ "mask_channel_other": 0.0,
74
+ "mask_channel_prob": 0.0,
75
+ "mask_channel_selection": "static",
76
+ "mask_feature_length": 10,
77
+ "mask_feature_min_masks": 0,
78
+ "mask_feature_prob": 0.0,
79
+ "mask_time_length": 10,
80
+ "mask_time_min_masks": 2,
81
+ "mask_time_min_space": 1,
82
+ "mask_time_other": 0.0,
83
+ "mask_time_prob": 0.05,
84
+ "mask_time_selection": "static",
85
+ "model_type": "wav2vec2",
86
+ "no_mask_channel_overlap": false,
87
+ "no_mask_time_overlap": false,
88
+ "num_adapter_layers": 3,
89
+ "num_attention_heads": 12,
90
+ "num_codevector_groups": 2,
91
+ "num_codevectors_per_group": 320,
92
+ "num_conv_pos_embedding_groups": 16,
93
+ "num_conv_pos_embeddings": 128,
94
+ "num_feat_extract_layers": 7,
95
+ "num_hidden_layers": 12,
96
+ "num_negatives": 100,
97
+ "output_hidden_size": 768,
98
+ "pad_token_id": 0,
99
+ "proj_codevector_dim": 256,
100
+ "tdnn_dilation": [
101
+ 1,
102
+ 2,
103
+ 3,
104
+ 1,
105
+ 1
106
+ ],
107
+ "tdnn_dim": [
108
+ 512,
109
+ 512,
110
+ 512,
111
+ 512,
112
+ 1500
113
+ ],
114
+ "tdnn_kernel": [
115
+ 5,
116
+ 3,
117
+ 3,
118
+ 1,
119
+ 1
120
+ ],
121
+ "torch_dtype": "float32",
122
+ "transformers_version": "4.52.0.dev0",
123
+ "use_weighted_layer_sum": false,
124
+ "vocab_size": 32,
125
+ "xvector_output_dim": 512
126
+ }
checkpoint-252/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:825f4abce51209ba99ad3bb359e10154210a1cd4ce68f1950bfd0d594fdf98fd
3
+ size 378302360
checkpoint-252/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b6440d80f2cb3078f5983dc97e70fc267aa428ff30f8fb5f51766a3d5638d04
3
+ size 723121402
checkpoint-252/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-252/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32f747925e09a3a31c942fe4738efe820d0f29e57b46a453d202b2d8faee84c7
3
+ size 14244
checkpoint-252/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d850a9a2833d791b56dac160ee0736dade1cf2c6c005c2726014f52e1cad95dc
3
+ size 1064
checkpoint-252/trainer_state.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 4.0,
6
+ "eval_steps": 500,
7
+ "global_step": 252,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [],
12
+ "logging_steps": 500,
13
+ "max_steps": 252,
14
+ "num_input_tokens_seen": 0,
15
+ "num_train_epochs": 4,
16
+ "save_steps": 500,
17
+ "stateful_callbacks": {
18
+ "TrainerControl": {
19
+ "args": {
20
+ "should_epoch_stop": false,
21
+ "should_evaluate": false,
22
+ "should_log": false,
23
+ "should_save": true,
24
+ "should_training_stop": true
25
+ },
26
+ "attributes": {}
27
+ }
28
+ },
29
+ "total_flos": 4.337421477135794e+17,
30
+ "train_batch_size": 16,
31
+ "trial_name": null,
32
+ "trial_params": null
33
+ }
checkpoint-252/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6471cecf94ed1837b7d642def8847c0639981212e25da640dc6634c090fb05f8
3
+ size 5368
confusion_matrix.png CHANGED
eval_detailed_metrics.json CHANGED
@@ -1,45 +1,45 @@
1
  {
2
  "confusion_matrix": [
3
  [
4
- 30,
5
- 5
6
  ],
7
  [
8
- 1,
9
- 64
10
  ]
11
  ],
12
  "classification_report": {
13
  "spoof": {
14
- "precision": 0.967741935483871,
15
- "recall": 0.8571428571428571,
16
- "f1-score": 0.9090909090909091,
17
- "support": 35.0
18
  },
19
  "bona-fide": {
20
- "precision": 0.927536231884058,
21
- "recall": 0.9846153846153847,
22
- "f1-score": 0.9552238805970149,
23
- "support": 65.0
24
  },
25
- "accuracy": 0.94,
26
  "macro avg": {
27
- "precision": 0.9476390836839645,
28
- "recall": 0.9208791208791209,
29
- "f1-score": 0.932157394843962,
30
- "support": 100.0
31
  },
32
  "weighted avg": {
33
- "precision": 0.9416082281439926,
34
- "recall": 0.94,
35
- "f1-score": 0.9390773405698779,
36
- "support": 100.0
37
  }
38
  },
39
- "classification_report_str": " precision recall f1-score support\n\n spoof 0.9677 0.8571 0.9091 35\n bona-fide 0.9275 0.9846 0.9552 65\n\n accuracy 0.9400 100\n macro avg 0.9476 0.9209 0.9322 100\nweighted avg 0.9416 0.9400 0.9391 100\n",
40
  "per_class_accuracy": {
41
- "spoof": 0.8571428571428571,
42
- "bona-fide": 0.9846153846153847
43
  },
44
  "task": {
45
  "name": "Audio Classification",
@@ -49,7 +49,7 @@
49
  {
50
  "name": "Accuracy",
51
  "type": "accuracy",
52
- "value": 0.94
53
  }
54
  ]
55
  }
 
1
  {
2
  "confusion_matrix": [
3
  [
4
+ 95,
5
+ 11
6
  ],
7
  [
8
+ 0,
9
+ 194
10
  ]
11
  ],
12
  "classification_report": {
13
  "spoof": {
14
+ "precision": 1.0,
15
+ "recall": 0.8962264150943396,
16
+ "f1-score": 0.945273631840796,
17
+ "support": 106.0
18
  },
19
  "bona-fide": {
20
+ "precision": 0.9463414634146341,
21
+ "recall": 1.0,
22
+ "f1-score": 0.9724310776942355,
23
+ "support": 194.0
24
  },
25
+ "accuracy": 0.9633333333333334,
26
  "macro avg": {
27
+ "precision": 0.973170731707317,
28
+ "recall": 0.9481132075471699,
29
+ "f1-score": 0.9588523547675158,
30
+ "support": 300.0
31
  },
32
  "weighted avg": {
33
+ "precision": 0.9653008130081302,
34
+ "recall": 0.9633333333333334,
35
+ "f1-score": 0.9628354468260202,
36
+ "support": 300.0
37
  }
38
  },
39
+ "classification_report_str": " precision recall f1-score support\n\n spoof 1.0000 0.8962 0.9453 106\n bona-fide 0.9463 1.0000 0.9724 194\n\n accuracy 0.9633 300\n macro avg 0.9732 0.9481 0.9589 300\nweighted avg 0.9653 0.9633 0.9628 300\n",
40
  "per_class_accuracy": {
41
+ "spoof": 0.8962264150943396,
42
+ "bona-fide": 1.0
43
  },
44
  "task": {
45
  "name": "Audio Classification",
 
49
  {
50
  "name": "Accuracy",
51
  "type": "accuracy",
52
+ "value": 0.9633333333333334
53
  }
54
  ]
55
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.0,
3
- "eval_accuracy": 0.94,
4
- "eval_loss": 0.2446485459804535,
5
- "eval_runtime": 10.2837,
6
- "eval_samples_per_second": 9.724,
7
- "eval_steps_per_second": 0.681
8
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9633333333333334,
4
+ "eval_loss": 0.17613795399665833,
5
+ "eval_runtime": 7.4955,
6
+ "eval_samples_per_second": 40.024,
7
+ "eval_steps_per_second": 2.535
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.0,
3
- "total_flos": 3.183758365185645e+17,
4
- "train_loss": 0.0,
5
- "train_runtime": 0.0042,
6
- "train_samples_per_second": 7161.589,
7
- "train_steps_per_second": 716.159
8
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "total_flos": 4.337421477135794e+17,
4
+ "train_loss": 0.1324449266706194,
5
+ "train_runtime": 231.6013,
6
+ "train_samples_per_second": 17.271,
7
+ "train_steps_per_second": 1.088
8
  }
trainer_state.json CHANGED
@@ -2,27 +2,27 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 3.0,
6
  "eval_steps": 500,
7
- "global_step": 189,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "epoch": 3.0,
14
- "step": 189,
15
- "total_flos": 3.183758365185645e+17,
16
- "train_loss": 0.0,
17
- "train_runtime": 0.0042,
18
- "train_samples_per_second": 7161.589,
19
- "train_steps_per_second": 716.159
20
  }
21
  ],
22
  "logging_steps": 500,
23
- "max_steps": 3,
24
  "num_input_tokens_seen": 0,
25
- "num_train_epochs": 3,
26
  "save_steps": 500,
27
  "stateful_callbacks": {
28
  "TrainerControl": {
@@ -36,7 +36,7 @@
36
  "attributes": {}
37
  }
38
  },
39
- "total_flos": 3.183758365185645e+17,
40
  "train_batch_size": 16,
41
  "trial_name": null,
42
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 4.0,
6
  "eval_steps": 500,
7
+ "global_step": 252,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "epoch": 4.0,
14
+ "step": 252,
15
+ "total_flos": 4.337421477135794e+17,
16
+ "train_loss": 0.1324449266706194,
17
+ "train_runtime": 231.6013,
18
+ "train_samples_per_second": 17.271,
19
+ "train_steps_per_second": 1.088
20
  }
21
  ],
22
  "logging_steps": 500,
23
+ "max_steps": 252,
24
  "num_input_tokens_seen": 0,
25
+ "num_train_epochs": 4,
26
  "save_steps": 500,
27
  "stateful_callbacks": {
28
  "TrainerControl": {
 
36
  "attributes": {}
37
  }
38
  },
39
+ "total_flos": 4.337421477135794e+17,
40
  "train_batch_size": 16,
41
  "trial_name": null,
42
  "trial_params": null