Transformers
Safetensors
whisper
File size: 1,953 Bytes
9525acd
 
 
 
 
 
 
 
 
9b2d1c7
9525acd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
{
  "_name_or_path": "openai/whisper-large-v3-turbo",
  "activation_dropout": 0.0,
  "activation_function": "gelu",
  "additional_layer": false,
  "additional_self_attention_layer": true,
  "apply_spec_augment": false,
  "apply_target_amp_to_n_layers": -1,
  "architectures": [
    "WhisperForConditionalGenerationWithCTC"
  ],
  "attention_dropout": 0.0,
  "begin_suppress_tokens": [
    220,
    50256
  ],
  "blank_token_id": null,
  "bos_token_id": 50257,
  "classifier_proj_size": 256,
  "ctc_loss_reduction": "mean",
  "ctc_weight": 0.3,
  "ctc_zero_infinity": false,
  "d_model": 1280,
  "decoder_attention_heads": 20,
  "decoder_ffn_dim": 5120,
  "decoder_layerdrop": 0.0,
  "decoder_layers": 4,
  "decoder_start_token_id": 50258,
  "dropout": 0.0,
  "encoder_attention_heads": 20,
  "encoder_ffn_dim": 5120,
  "encoder_layerdrop": 0.0,
  "encoder_layers": 32,
  "eos_token_id": 50257,
  "final_dropout": 0.0,
  "forced_decoder_ids": null,
  "init_std": 0.02,
  "is_encoder_decoder": true,
  "mask_feature_length": 10,
  "mask_feature_min_masks": 0,
  "mask_feature_prob": 0.0,
  "mask_time_length": 10,
  "mask_time_min_masks": 2,
  "mask_time_prob": 0.05,
  "max_source_positions": 1500,
  "max_target_positions": 448,
  "median_filter_width": 7,
  "model_type": "whisper",
  "mt_num_speakers": 1,
  "n_soft_prompts": 16,
  "non_target_amplifier_value": 1.0,
  "num_hidden_layers": 32,
  "num_mel_bins": 128,
  "pad_token_id": 50257,
  "remove_timestamps_from_ctc": false,
  "scale_embedding": false,
  "sub_sample": true,
  "target_amp_bias_only": false,
  "target_amp_init": "disparagement",
  "target_amp_is_diagonal": true,
  "target_amp_use_non_target": true,
  "target_amp_use_overlap": true,
  "target_amp_use_silence": true,
  "target_amp_use_target": true,
  "torch_dtype": "float32",
  "transformers_version": "4.41.2",
  "use_cache": true,
  "use_target_amplifiers": true,
  "use_weighted_layer_sum": false,
  "vocab_size": 51866
}