{ "auto_mapping": null, "base_model_name_or_path": "meta-llama/Llama-3.2-1B-Instruct", "encoder_hidden_size": 512, "inference_mode": true, "num_attention_heads": 8, "num_layers": 16, "num_transformer_submodules": 1, "num_virtual_tokens": 20, "peft_type": "PREFIX_TUNING", "prefix_projection": false, "revision": null, "task_type": "CAUSAL_LM", "token_dim": 512 }