| { | |
| "_name_or_path": "vinai/PhoGPT-4B-Chat", | |
| "architectures": [ | |
| "MPTForCausalLM" | |
| ], | |
| "attn_config": { | |
| "alibi": true, | |
| "alibi_bias_max": 8, | |
| "attn_impl": "torch", | |
| "attn_pdrop": 0.0, | |
| "attn_type": "multihead_attention", | |
| "attn_uses_sequence_id": false, | |
| "clip_qkv": null, | |
| "prefix_lm": false, | |
| "qk_gn": false, | |
| "qk_ln": false, | |
| "rope": false, | |
| "rope_dail_config": { | |
| "pos_idx_in_fp32": true, | |
| "type": "original", | |
| "xpos_scale_base": 512 | |
| }, | |
| "rope_hf_config": { | |
| "factor": 1.0, | |
| "type": "no_scaling" | |
| }, | |
| "rope_impl": "dail", | |
| "rope_theta": 10000, | |
| "sliding_window_size": -1, | |
| "softmax_scale": null | |
| }, | |
| "auto_map": { | |
| "AutoConfig": "vinai/PhoGPT-4B-Chat--configuration_mpt.MPTConfig", | |
| "AutoModelForCausalLM": "vinai/PhoGPT-4B-Chat--modeling_mpt.MPTForCausalLM" | |
| }, | |
| "d_model": 3072, | |
| "emb_pdrop": 0.0, | |
| "embedding_fraction": 1.0, | |
| "expansion_ratio": 4, | |
| "fc_type": "torch", | |
| "ffn_config": { | |
| "fc_type": "torch", | |
| "ffn_type": "mptmlp" | |
| }, | |
| "init_config": { | |
| "emb_init_std": null, | |
| "emb_init_uniform_lim": null, | |
| "fan_mode": "fan_in", | |
| "init_div_is_residual": true, | |
| "init_gain": 0.0, | |
| "init_nonlinearity": "relu", | |
| "init_std": null, | |
| "name": "kaiming_normal_", | |
| "verbose": 0 | |
| }, | |
| "init_device": "cpu", | |
| "learned_pos_emb": false, | |
| "logit_scale": null, | |
| "max_seq_len": 8192, | |
| "model_type": "mpt", | |
| "n_heads": 24, | |
| "n_layers": 32, | |
| "no_bias": false, | |
| "norm_type": "low_precision_layernorm", | |
| "pretraining_tp": 1, | |
| "quantization_config": { | |
| "_load_in_4bit": true, | |
| "_load_in_8bit": false, | |
| "bnb_4bit_compute_dtype": "float16", | |
| "bnb_4bit_quant_storage": "uint8", | |
| "bnb_4bit_quant_type": "nf4", | |
| "bnb_4bit_use_double_quant": true, | |
| "llm_int8_enable_fp32_cpu_offload": false, | |
| "llm_int8_has_fp16_weight": false, | |
| "llm_int8_skip_modules": null, | |
| "llm_int8_threshold": 6.0, | |
| "load_in_4bit": true, | |
| "load_in_8bit": false, | |
| "quant_method": "bitsandbytes" | |
| }, | |
| "resid_pdrop": 0.0, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.43.3", | |
| "use_cache": false, | |
| "use_pad_tok_in_ffn": true, | |
| "verbose": 0, | |
| "vocab_size": 20480 | |
| } | |