viethq5 commited on
Commit
450693a
·
verified ·
1 Parent(s): 92b2dda

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 4090-whisper-large-v3-trt-bs4/decoder.engine filter=lfs diff=lfs merge=lfs -text
37
+ 4090-whisper-large-v3-trt-bs4/encoder.engine filter=lfs diff=lfs merge=lfs -text
38
+ 4090-whisper-large-v3-trt-bs8/decoder.engine filter=lfs diff=lfs merge=lfs -text
39
+ 4090-whisper-large-v3-trt-bs8/encoder.engine filter=lfs diff=lfs merge=lfs -text
4090-whisper-large-v3-trt-bs4/decoder.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00eaed8abe5a70fadc4046cdbc01d8b0964f96d723a1bcb4687ca46d1cb77e31
3
+ size 1954679476
4090-whisper-large-v3-trt-bs4/decoder_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "apply_query_key_layer_scaling": false,
4
+ "cross_attention": true,
5
+ "has_position_embedding": true,
6
+ "has_token_type_embedding": false,
7
+ "hidden_act": "gelu",
8
+ "hidden_size": 1280,
9
+ "int8": false,
10
+ "max_batch_size": 4,
11
+ "max_input_len": 14,
12
+ "max_output_len": 448,
13
+ "max_position_embeddings": 448,
14
+ "name": "decoder",
15
+ "num_heads": 20,
16
+ "num_layers": 32,
17
+ "precision": "float16",
18
+ "tensor_parallel": 1,
19
+ "use_refit": false,
20
+ "vocab_size": 51866
21
+ },
22
+ "plugin_config": {
23
+ "attention_qk_half_accumulation": false,
24
+ "bert_attention_plugin": null,
25
+ "context_fmha_type": 0,
26
+ "enable_xqa": false,
27
+ "gemm_plugin": "float16",
28
+ "gpt_attention_plugin": "float16",
29
+ "identity_plugin": null,
30
+ "layernorm_plugin": null,
31
+ "layernorm_quantization_plugin": null,
32
+ "lookup_plugin": null,
33
+ "lora_plugin": null,
34
+ "multi_block_mode": false,
35
+ "nccl_plugin": null,
36
+ "paged_kv_cache": false,
37
+ "quantize_per_token_plugin": false,
38
+ "quantize_tensor_plugin": false,
39
+ "remove_input_padding": false,
40
+ "rmsnorm_plugin": null,
41
+ "rmsnorm_quantization_plugin": null,
42
+ "selective_scan_plugin": false,
43
+ "smooth_quant_gemm_plugin": null,
44
+ "tokens_per_block": 0,
45
+ "use_context_fmha_for_generation": false,
46
+ "use_custom_all_reduce": false,
47
+ "use_paged_context_fmha": false,
48
+ "weight_only_groupwise_quant_matmul_plugin": null,
49
+ "weight_only_quant_matmul_plugin": null
50
+ }
51
+ }
4090-whisper-large-v3-trt-bs4/encoder.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29e48a254ec42ce4d33d6b091608b3970b29c4058bf68a21838f8c439952cdb4
3
+ size 1278471588
4090-whisper-large-v3-trt-bs4/encoder_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "hidden_size": 1280,
4
+ "int8": false,
5
+ "max_batch_size": 4,
6
+ "n_mels": 128,
7
+ "name": "encoder",
8
+ "num_heads": 20,
9
+ "num_languages": 100,
10
+ "num_layers": 32,
11
+ "precision": "float16",
12
+ "tensor_parallel": 1,
13
+ "use_refit": false
14
+ },
15
+ "plugin_config": {
16
+ "attention_qk_half_accumulation": false,
17
+ "bert_attention_plugin": "float16",
18
+ "context_fmha_type": 0,
19
+ "enable_xqa": false,
20
+ "gemm_plugin": "float16",
21
+ "gpt_attention_plugin": null,
22
+ "identity_plugin": null,
23
+ "layernorm_plugin": null,
24
+ "layernorm_quantization_plugin": null,
25
+ "lookup_plugin": null,
26
+ "lora_plugin": null,
27
+ "multi_block_mode": false,
28
+ "nccl_plugin": null,
29
+ "paged_kv_cache": false,
30
+ "quantize_per_token_plugin": false,
31
+ "quantize_tensor_plugin": false,
32
+ "remove_input_padding": false,
33
+ "rmsnorm_plugin": null,
34
+ "rmsnorm_quantization_plugin": null,
35
+ "selective_scan_plugin": false,
36
+ "smooth_quant_gemm_plugin": null,
37
+ "tokens_per_block": 0,
38
+ "use_context_fmha_for_generation": false,
39
+ "use_custom_all_reduce": false,
40
+ "use_paged_context_fmha": false,
41
+ "weight_only_groupwise_quant_matmul_plugin": null,
42
+ "weight_only_quant_matmul_plugin": null
43
+ }
44
+ }
4090-whisper-large-v3-trt-bs4/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
4090-whisper-large-v3-trt-bs4/trt_build_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"max_batch_size": 4, "max_beam_width": 1, "max_input_len": 14, "max_output_len": 448, "world_size": 1, "dtype": "float16", "quantize_dir": "quantize/1-gpu", "use_gpt_attention_plugin": "float16", "use_bert_attention_plugin": "float16", "use_context_fmha_enc": false, "use_context_fmha_dec": false, "use_gemm_plugin": "float16", "use_layernorm_plugin": false, "remove_input_padding": false, "use_weight_only_enc": false, "use_weight_only_dec": false, "weight_only_precision": "int8", "int8_kv_cache": false, "debug_mode": false, "cuda_compute_capability": [8, 9], "cuda_device_name": "NVIDIA GeForce RTX 4090", "model_path": "models/large-v3/pt_ckpt.pt", "output_dir": "models/266044c7309ecb70c167ed90aee5bbf1"}
4090-whisper-large-v3-trt-bs8/decoder.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c275ffc8a50e85830d771da5b66d5eb6e299bad3a439a3b9338b7f4f4bb01c4
3
+ size 1954647324
4090-whisper-large-v3-trt-bs8/decoder_config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "apply_query_key_layer_scaling": false,
4
+ "cross_attention": true,
5
+ "has_position_embedding": true,
6
+ "has_token_type_embedding": false,
7
+ "hidden_act": "gelu",
8
+ "hidden_size": 1280,
9
+ "int8": false,
10
+ "max_batch_size": 8,
11
+ "max_input_len": 14,
12
+ "max_output_len": 448,
13
+ "max_position_embeddings": 448,
14
+ "name": "decoder",
15
+ "num_heads": 20,
16
+ "num_layers": 32,
17
+ "precision": "float16",
18
+ "tensor_parallel": 1,
19
+ "use_refit": false,
20
+ "vocab_size": 51866
21
+ },
22
+ "plugin_config": {
23
+ "attention_qk_half_accumulation": false,
24
+ "bert_attention_plugin": null,
25
+ "context_fmha_type": 0,
26
+ "enable_xqa": false,
27
+ "gemm_plugin": "float16",
28
+ "gpt_attention_plugin": "float16",
29
+ "identity_plugin": null,
30
+ "layernorm_plugin": null,
31
+ "layernorm_quantization_plugin": null,
32
+ "lookup_plugin": null,
33
+ "lora_plugin": null,
34
+ "multi_block_mode": false,
35
+ "nccl_plugin": null,
36
+ "paged_kv_cache": false,
37
+ "quantize_per_token_plugin": false,
38
+ "quantize_tensor_plugin": false,
39
+ "remove_input_padding": false,
40
+ "rmsnorm_plugin": null,
41
+ "rmsnorm_quantization_plugin": null,
42
+ "selective_scan_plugin": false,
43
+ "smooth_quant_gemm_plugin": null,
44
+ "tokens_per_block": 0,
45
+ "use_context_fmha_for_generation": false,
46
+ "use_custom_all_reduce": false,
47
+ "use_paged_context_fmha": false,
48
+ "weight_only_groupwise_quant_matmul_plugin": null,
49
+ "weight_only_quant_matmul_plugin": null
50
+ }
51
+ }
4090-whisper-large-v3-trt-bs8/encoder.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be6afdcdbab6b7380ae0bfb0da389b2ff86c4851083a425c08993532b38bb455
3
+ size 1278764844
4090-whisper-large-v3-trt-bs8/encoder_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "hidden_size": 1280,
4
+ "int8": false,
5
+ "max_batch_size": 8,
6
+ "n_mels": 128,
7
+ "name": "encoder",
8
+ "num_heads": 20,
9
+ "num_languages": 100,
10
+ "num_layers": 32,
11
+ "precision": "float16",
12
+ "tensor_parallel": 1,
13
+ "use_refit": false
14
+ },
15
+ "plugin_config": {
16
+ "attention_qk_half_accumulation": false,
17
+ "bert_attention_plugin": "float16",
18
+ "context_fmha_type": 0,
19
+ "enable_xqa": false,
20
+ "gemm_plugin": "float16",
21
+ "gpt_attention_plugin": null,
22
+ "identity_plugin": null,
23
+ "layernorm_plugin": null,
24
+ "layernorm_quantization_plugin": null,
25
+ "lookup_plugin": null,
26
+ "lora_plugin": null,
27
+ "multi_block_mode": false,
28
+ "nccl_plugin": null,
29
+ "paged_kv_cache": false,
30
+ "quantize_per_token_plugin": false,
31
+ "quantize_tensor_plugin": false,
32
+ "remove_input_padding": false,
33
+ "rmsnorm_plugin": null,
34
+ "rmsnorm_quantization_plugin": null,
35
+ "selective_scan_plugin": false,
36
+ "smooth_quant_gemm_plugin": null,
37
+ "tokens_per_block": 0,
38
+ "use_context_fmha_for_generation": false,
39
+ "use_custom_all_reduce": false,
40
+ "use_paged_context_fmha": false,
41
+ "weight_only_groupwise_quant_matmul_plugin": null,
42
+ "weight_only_quant_matmul_plugin": null
43
+ }
44
+ }
4090-whisper-large-v3-trt-bs8/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
4090-whisper-large-v3-trt-bs8/trt_build_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"max_batch_size": 8, "max_beam_width": 1, "max_input_len": 14, "max_output_len": 448, "world_size": 1, "dtype": "float16", "quantize_dir": "quantize/1-gpu", "use_gpt_attention_plugin": "float16", "use_bert_attention_plugin": "float16", "use_context_fmha_enc": false, "use_context_fmha_dec": false, "use_gemm_plugin": "float16", "use_layernorm_plugin": false, "remove_input_padding": false, "use_weight_only_enc": false, "use_weight_only_dec": false, "weight_only_precision": "int8", "int8_kv_cache": false, "debug_mode": false, "cuda_compute_capability": [8, 9], "cuda_device_name": "NVIDIA GeForce RTX 4090", "model_path": "models/large-v3/pt_ckpt.pt", "output_dir": "models/e19300457c5c10b1afb8832c3b56157f"}