hyoungjoon commited on
Commit
16bc953
·
verified ·
1 Parent(s): ff0a653

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ decoder/t5-base_float32_tp1_rank0.engine filter=lfs diff=lfs merge=lfs -text
37
+ encoder/t5-base_float32_tp1_rank0.engine filter=lfs diff=lfs merge=lfs -text
.gitattributes copy ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ encoder/t5-base_float32_tp1_rank0.engine filter=lfs diff=lfs merge=lfs -text
37
+ decoder/t5-base_float32_tp1_rank0.engine filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "j5ng/et5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 3072,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 12,
20
+ "num_heads": 12,
21
+ "num_layers": 12,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.28.0",
29
+ "use_cache": true,
30
+ "vocab_size": 45100
31
+ }
decoder/config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "apply_query_key_layer_scaling": false,
4
+ "cross_attention": true,
5
+ "gather_context_logits": false,
6
+ "gather_generation_logits": false,
7
+ "gpus_per_node": 8,
8
+ "has_position_embedding": false,
9
+ "has_token_type_embedding": false,
10
+ "head_size": 64,
11
+ "hf_modules_to_trtllm_modules": null,
12
+ "hidden_act": "gelu_new",
13
+ "hidden_size": 768,
14
+ "int8": false,
15
+ "lora_target_modules": null,
16
+ "max_batch_size": 8,
17
+ "max_beam_width": 5,
18
+ "max_decoder_input_len": 1,
19
+ "max_encoder_input_len": 1024,
20
+ "max_output_len": 256,
21
+ "max_position_embeddings": 512,
22
+ "max_prompt_embedding_table_size": 0,
23
+ "name": "t5-base",
24
+ "num_heads": 12,
25
+ "num_layers": 12,
26
+ "parallel_build": false,
27
+ "pipeline_parallel": 1,
28
+ "precision": "float32",
29
+ "strongly_typed": false,
30
+ "tensor_parallel": 1,
31
+ "trtllm_modules_to_hf_modules": null,
32
+ "use_refit": false,
33
+ "vocab_size": 45100
34
+ },
35
+ "plugin_config": {
36
+ "attention_qk_half_accumulation": false,
37
+ "bert_attention_plugin": "float32",
38
+ "context_fmha": false,
39
+ "context_fmha_fp32_acc": false,
40
+ "dense_context_fmha": false,
41
+ "enable_xqa": false,
42
+ "gemm_plugin": "float32",
43
+ "gpt_attention_plugin": "float32",
44
+ "identity_plugin": null,
45
+ "layernorm_quantization_plugin": null,
46
+ "lookup_plugin": null,
47
+ "lora_plugin": null,
48
+ "moe_plugin": null,
49
+ "multi_block_mode": false,
50
+ "nccl_plugin": null,
51
+ "paged_kv_cache": false,
52
+ "pos_shift": false,
53
+ "quantize_per_token_plugin": false,
54
+ "quantize_tensor_plugin": false,
55
+ "remove_input_padding": true,
56
+ "rmsnorm_quantization_plugin": null,
57
+ "smooth_quant_gemm_plugin": null,
58
+ "tokens_per_block": 128,
59
+ "use_context_fmha_for_generation": false,
60
+ "use_custom_all_reduce": false,
61
+ "use_paged_context_fmha": false,
62
+ "weight_only_groupwise_quant_matmul_plugin": null,
63
+ "weight_only_quant_matmul_plugin": null
64
+ }
65
+ }
decoder/model.cache ADDED
Binary file (86.5 kB). View file
 
decoder/t5-base_float32_tp1_rank0.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1669f9928afee144e0affacc725859e021bad453f024babd86db821bbf14a5b
3
+ size 845351052
encoder/config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "apply_query_key_layer_scaling": false,
4
+ "cross_attention": false,
5
+ "gather_context_logits": false,
6
+ "gather_generation_logits": false,
7
+ "gpus_per_node": 8,
8
+ "has_position_embedding": false,
9
+ "has_token_type_embedding": false,
10
+ "head_size": 64,
11
+ "hf_modules_to_trtllm_modules": null,
12
+ "hidden_act": "gelu_new",
13
+ "hidden_size": 768,
14
+ "int8": false,
15
+ "lora_target_modules": null,
16
+ "max_batch_size": 8,
17
+ "max_beam_width": 5,
18
+ "max_decoder_input_len": 1,
19
+ "max_encoder_input_len": 1024,
20
+ "max_output_len": 256,
21
+ "max_position_embeddings": 512,
22
+ "max_prompt_embedding_table_size": 0,
23
+ "name": "t5-base",
24
+ "num_heads": 12,
25
+ "num_layers": 12,
26
+ "parallel_build": false,
27
+ "pipeline_parallel": 1,
28
+ "precision": "float32",
29
+ "strongly_typed": false,
30
+ "tensor_parallel": 1,
31
+ "trtllm_modules_to_hf_modules": null,
32
+ "use_refit": false,
33
+ "vocab_size": 45100
34
+ },
35
+ "plugin_config": {
36
+ "attention_qk_half_accumulation": false,
37
+ "bert_attention_plugin": "float32",
38
+ "context_fmha": false,
39
+ "context_fmha_fp32_acc": false,
40
+ "dense_context_fmha": false,
41
+ "enable_xqa": false,
42
+ "gemm_plugin": "float32",
43
+ "gpt_attention_plugin": "float32",
44
+ "identity_plugin": null,
45
+ "layernorm_quantization_plugin": null,
46
+ "lookup_plugin": null,
47
+ "lora_plugin": null,
48
+ "moe_plugin": null,
49
+ "multi_block_mode": false,
50
+ "nccl_plugin": null,
51
+ "paged_kv_cache": false,
52
+ "pos_shift": false,
53
+ "quantize_per_token_plugin": false,
54
+ "quantize_tensor_plugin": false,
55
+ "remove_input_padding": true,
56
+ "rmsnorm_quantization_plugin": null,
57
+ "smooth_quant_gemm_plugin": null,
58
+ "tokens_per_block": 128,
59
+ "use_context_fmha_for_generation": false,
60
+ "use_custom_all_reduce": false,
61
+ "use_paged_context_fmha": false,
62
+ "weight_only_groupwise_quant_matmul_plugin": null,
63
+ "weight_only_quant_matmul_plugin": null
64
+ }
65
+ }
encoder/model.cache ADDED
Binary file (84.8 kB). View file
 
encoder/t5-base_float32_tp1_rank0.engine ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c39b63e64e9b3e0551f63266dbfd5ba4e4065d075b5d10018de96d675a5b02b9
3
+ size 592835748
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.28.0"
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ebba0a7b0fadda80677fe980264027fd8c51f822c486201b39dfdfe8804a570
3
+ size 1196021
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "clean_up_tokenization_spaces": true,
4
+ "eos_token": "</s>",
5
+ "extra_ids": 0,
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "pad_token": "<pad>",
8
+ "sp_model_kwargs": {},
9
+ "tokenizer_class": "T5Tokenizer",
10
+ "unk_token": "<unk>"
11
+ }