| { | |
| "version": "0.17.0.post1", | |
| "pretrained_config": { | |
| "mlp_bias": false, | |
| "attn_bias": false, | |
| "rotary_base": 10000.0, | |
| "rotary_scaling": null, | |
| "residual_mlp": false, | |
| "disable_weight_only_quant_plugin": false, | |
| "moe": { | |
| "num_experts": 0, | |
| "shared_expert_intermediate_size": 0, | |
| "top_k": 0, | |
| "normalization_mode": null, | |
| "sparse_mixer_epsilon": 0.01, | |
| "tp_mode": 0, | |
| "device_limited_n_group": 0, | |
| "device_limited_topk_group": 0, | |
| "device_limited_routed_scaling_factor": 1.0 | |
| }, | |
| "remove_duplicated_kv_heads": false, | |
| "fc_after_embed": false, | |
| "use_input_layernorm_in_first_layer": true, | |
| "use_last_layernorm": true, | |
| "layer_idx_offset": 0, | |
| "embedding_multiplier": 1.0, | |
| "attention_multiplier": 1.0, | |
| "residual_multiplier": 1.0, | |
| "output_multiplier_scale": 1.0, | |
| "architecture": "LlamaForCausalLM", | |
| "dtype": "bfloat16", | |
| "vocab_size": 32000, | |
| "hidden_size": 2048, | |
| "num_hidden_layers": 22, | |
| "num_attention_heads": 32, | |
| "hidden_act": "silu", | |
| "logits_dtype": "float16", | |
| "norm_epsilon": 1e-05, | |
| "runtime_defaults": null, | |
| "position_embedding_type": "rope_gpt_neox", | |
| "num_key_value_heads": 4, | |
| "intermediate_size": 5632, | |
| "max_position_embeddings": 2048, | |
| "mapping": { | |
| "world_size": 1, | |
| "gpus_per_node": 8, | |
| "cp_size": 1, | |
| "tp_size": 1, | |
| "pp_size": 1, | |
| "moe_tp_size": 1, | |
| "moe_ep_size": 1, | |
| "auto_parallel": false | |
| }, | |
| "quantization": { | |
| "quant_algo": "FP8", | |
| "kv_cache_quant_algo": "FP8", | |
| "group_size": 128, | |
| "smoothquant_val": 0.5, | |
| "clamp_val": null, | |
| "use_meta_recipe": false, | |
| "has_zero_point": false, | |
| "pre_quant_scale": false, | |
| "exclude_modules": [ | |
| "transformer.layers.14.post_layernorm", | |
| "transformer.layers.4.post_layernorm", | |
| "transformer.layers.2.input_layernorm", | |
| "transformer.vocab_embedding", | |
| "transformer.layers.7.input_layernorm", | |
| "transformer.layers.18.input_layernorm", | |
| "lm_head", | |
| "transformer.layers.20.input_layernorm", | |
| "transformer.layers.10.post_layernorm", | |
| "transformer.layers.16.input_layernorm", | |
| "transformer.layers.8.input_layernorm", | |
| "transformer.layers.1.input_layernorm", | |
| "transformer.layers.20.post_layernorm", | |
| "transformer.layers.14.input_layernorm", | |
| "transformer.layers.3.post_layernorm", | |
| "transformer.layers.17.input_layernorm", | |
| "transformer.layers.17.post_layernorm", | |
| "transformer.layers.13.input_layernorm", | |
| "transformer.layers.21.input_layernorm", | |
| "transformer.layers.5.input_layernorm", | |
| "transformer.layers.6.post_layernorm", | |
| "transformer.layers.19.input_layernorm", | |
| "transformer.layers.12.post_layernorm", | |
| "transformer.layers.21.post_layernorm", | |
| "transformer.layers.11.input_layernorm", | |
| "transformer.layers.0.post_layernorm", | |
| "transformer.layers.16.post_layernorm", | |
| "transformer.layers.4.input_layernorm", | |
| "transformer.layers.11.post_layernorm", | |
| "transformer.layers.8.post_layernorm", | |
| "transformer.layers.6.input_layernorm", | |
| "transformer.layers.3.input_layernorm", | |
| "transformer.layers.10.input_layernorm", | |
| "transformer.layers.1.post_layernorm", | |
| "transformer.layers.15.input_layernorm", | |
| "transformer.layers.0.input_layernorm", | |
| "transformer.layers.9.input_layernorm", | |
| "transformer.layers.15.post_layernorm", | |
| "transformer.layers.18.post_layernorm", | |
| "transformer.layers.2.post_layernorm", | |
| "transformer.layers.13.post_layernorm", | |
| "transformer.layers.7.post_layernorm", | |
| "transformer.layers.12.input_layernorm", | |
| "transformer.ln_f", | |
| "transformer.layers.9.post_layernorm", | |
| "transformer.layers.5.post_layernorm", | |
| "transformer.layers.19.post_layernorm" | |
| ] | |
| }, | |
| "use_parallel_embedding": true, | |
| "embedding_sharding_dim": 0, | |
| "head_size": 64, | |
| "qk_layernorm": false, | |
| "rotary_embedding_dim": 64, | |
| "producer": { | |
| "name": "modelopt", | |
| "version": "0.23.2" | |
| }, | |
| "share_embedding_table": false, | |
| "bias": false, | |
| "rotary_pct": 1.0, | |
| "rank": 0, | |
| "decoder": "llama", | |
| "rmsnorm": true, | |
| "lm_head_bias": false, | |
| "tie_word_embeddings": false, | |
| "model_type": "llama" | |
| }, | |
| "build_config": { | |
| "max_input_len": 4096, | |
| "max_seq_len": 4096, | |
| "opt_batch_size": 8, | |
| "max_batch_size": 1, | |
| "max_beam_width": 1, | |
| "max_num_tokens": 4096, | |
| "opt_num_tokens": null, | |
| "max_prompt_embedding_table_size": 0, | |
| "kv_cache_type": "PAGED", | |
| "gather_context_logits": false, | |
| "gather_generation_logits": false, | |
| "strongly_typed": true, | |
| "force_num_profiles": null, | |
| "profiling_verbosity": "layer_names_only", | |
| "enable_debug_output": false, | |
| "max_draft_len": 0, | |
| "speculative_decoding_mode": 1, | |
| "use_refit": false, | |
| "input_timing_cache": null, | |
| "output_timing_cache": "model.cache", | |
| "lora_config": { | |
| "lora_dir": [], | |
| "lora_ckpt_source": "hf", | |
| "max_lora_rank": 64, | |
| "lora_target_modules": [], | |
| "trtllm_modules_to_hf_modules": {} | |
| }, | |
| "auto_parallel_config": { | |
| "world_size": 1, | |
| "gpus_per_node": 8, | |
| "cluster_key": null, | |
| "cluster_info": null, | |
| "sharding_cost_model": "alpha_beta", | |
| "comm_cost_model": "alpha_beta", | |
| "enable_pipeline_parallelism": false, | |
| "enable_shard_unbalanced_shape": false, | |
| "enable_shard_dynamic_shape": false, | |
| "enable_reduce_scatter": true, | |
| "builder_flags": null, | |
| "debug_mode": false, | |
| "infer_shape": true, | |
| "validation_mode": false, | |
| "same_buffer_io": {}, | |
| "same_spec_io": {}, | |
| "sharded_io_allowlist": [], | |
| "fill_weights": false, | |
| "parallel_config_cache": null, | |
| "profile_cache": null, | |
| "dump_path": null, | |
| "debug_outputs": [] | |
| }, | |
| "weight_sparsity": false, | |
| "weight_streaming": false, | |
| "plugin_config": { | |
| "dtype": "bfloat16", | |
| "bert_attention_plugin": "auto", | |
| "gpt_attention_plugin": "auto", | |
| "gemm_plugin": null, | |
| "explicitly_disable_gemm_plugin": false, | |
| "gemm_swiglu_plugin": null, | |
| "fp8_rowwise_gemm_plugin": null, | |
| "qserve_gemm_plugin": null, | |
| "identity_plugin": null, | |
| "nccl_plugin": null, | |
| "lora_plugin": null, | |
| "weight_only_groupwise_quant_matmul_plugin": null, | |
| "weight_only_quant_matmul_plugin": null, | |
| "smooth_quant_plugins": true, | |
| "smooth_quant_gemm_plugin": null, | |
| "layernorm_quantization_plugin": null, | |
| "rmsnorm_quantization_plugin": null, | |
| "quantize_per_token_plugin": false, | |
| "quantize_tensor_plugin": false, | |
| "moe_plugin": "auto", | |
| "mamba_conv1d_plugin": "auto", | |
| "low_latency_gemm_plugin": null, | |
| "low_latency_gemm_swiglu_plugin": null, | |
| "context_fmha": true, | |
| "bert_context_fmha_fp32_acc": false, | |
| "paged_kv_cache": true, | |
| "remove_input_padding": true, | |
| "reduce_fusion": false, | |
| "user_buffer": false, | |
| "tokens_per_block": 64, | |
| "use_paged_context_fmha": true, | |
| "use_fp8_context_fmha": true, | |
| "multiple_profiles": true, | |
| "paged_state": false, | |
| "streamingllm": false, | |
| "manage_weights": false, | |
| "use_fused_mlp": true, | |
| "pp_reduce_scatter": false | |
| }, | |
| "use_strip_plan": false, | |
| "max_encoder_input_len": 1024, | |
| "monitor_memory": false, | |
| "use_mrope": false | |
| } | |
| } |