test_test / decoder /config.json
hyoungjoon's picture
Upload folder using huggingface_hub
16bc953 verified
{
"builder_config": {
"apply_query_key_layer_scaling": false,
"cross_attention": true,
"gather_context_logits": false,
"gather_generation_logits": false,
"gpus_per_node": 8,
"has_position_embedding": false,
"has_token_type_embedding": false,
"head_size": 64,
"hf_modules_to_trtllm_modules": null,
"hidden_act": "gelu_new",
"hidden_size": 768,
"int8": false,
"lora_target_modules": null,
"max_batch_size": 8,
"max_beam_width": 5,
"max_decoder_input_len": 1,
"max_encoder_input_len": 1024,
"max_output_len": 256,
"max_position_embeddings": 512,
"max_prompt_embedding_table_size": 0,
"name": "t5-base",
"num_heads": 12,
"num_layers": 12,
"parallel_build": false,
"pipeline_parallel": 1,
"precision": "float32",
"strongly_typed": false,
"tensor_parallel": 1,
"trtllm_modules_to_hf_modules": null,
"use_refit": false,
"vocab_size": 45100
},
"plugin_config": {
"attention_qk_half_accumulation": false,
"bert_attention_plugin": "float32",
"context_fmha": false,
"context_fmha_fp32_acc": false,
"dense_context_fmha": false,
"enable_xqa": false,
"gemm_plugin": "float32",
"gpt_attention_plugin": "float32",
"identity_plugin": null,
"layernorm_quantization_plugin": null,
"lookup_plugin": null,
"lora_plugin": null,
"moe_plugin": null,
"multi_block_mode": false,
"nccl_plugin": null,
"paged_kv_cache": false,
"pos_shift": false,
"quantize_per_token_plugin": false,
"quantize_tensor_plugin": false,
"remove_input_padding": true,
"rmsnorm_quantization_plugin": null,
"smooth_quant_gemm_plugin": null,
"tokens_per_block": 128,
"use_context_fmha_for_generation": false,
"use_custom_all_reduce": false,
"use_paged_context_fmha": false,
"weight_only_groupwise_quant_matmul_plugin": null,
"weight_only_quant_matmul_plugin": null
}
}