{   "_name_or_path": "meta-llama/Llama-3.2-1B",   "architectures": [     "LlamaForCausalLM"   ],   "attention_bias": false,   "attention_dropout": 0.0,   "bos_token_id": 128000,   "eos_token_id": 128001,   "head_dim": 64,   "hidden_act": "silu",   "hidden_size": 2048,   "initializer_range": 0.02,   "intermediate_size": 8192,   "max_position_embeddings": 131072,   "mlp_bias": false,   "model_type": "llama",   "num_attention_heads": 32,   "num_hidden_layers": 16,   "num_key_value_heads": 8,   "pretraining_tp": 1,   "quantization_config": {     "_load_in_4bit": false,     "_load_in_8bit": true,     "bnb_4bit_compute_dtype": "float32",     "bnb_4bit_quant_storage": "uint8",     "bnb_4bit_quant_type": "fp4",     "bnb_4bit_use_double_quant": false,     "llm_int8_enable_fp32_cpu_offload": false,     "llm_int8_has_fp16_weight": false,     "llm_int8_skip_modules": null,     "llm_int8_threshold": 6.0,     "load_in_4bit": false,     "load_in_8bit": true,     "quant_method": "bitsandbytes"   },   "rms_norm_eps": 1e-05,   "rope_scaling": {     "factor": 32.0,     "high_freq_factor": 4.0,     "low_freq_factor": 1.0,     "original_max_position_embeddings": 8192,     "rope_type": "llama3"   },   "rope_theta": 500000.0,   "tie_word_embeddings": true,   "torch_dtype": "float16",   "transformers_version": "4.48.2",   "use_cache": true,   "vocab_size": 128256 }