{ "auto_mapping": { "base_model_class": "TensorParallelPreTrainedModel", "parent_library": "tensor_parallel.pretrained_model" }, "base_model_name_or_path": "meta-llama/Llama-2-7b-hf", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": null, "peft_type": "LORA", "r": 16, "revision": null, "target_modules": [ "wrapped_model.module_shards.0.model.layers.31.self_attn.tp_wrapped_module.q_proj", "wrapped_model.module_shards.0.model.layers.31.self_attn.tp_wrapped_module.k_proj", "wrapped_model.module_shards.0.model.layers.31.self_attn.tp_wrapped_module.v_proj", "wrapped_model.module_shards.0.model.layers.31.self_attn.tp_wrapped_module.o_proj", "wrapped_model.module_shards.0.model.layers.31.mlp.tp_wrapped_module.gate_proj", "wrapped_model.module_shards.0.model.layers.31.mlp.tp_wrapped_module.up_proj", "wrapped_model.module_shards.0.model.layers.31.mlp.tp_wrapped_module.down_proj", "wrapped_model.module_shards.1.model.layers.31.self_attn.tp_wrapped_module.q_proj", "wrapped_model.module_shards.1.model.layers.31.self_attn.tp_wrapped_module.k_proj", "wrapped_model.module_shards.1.model.layers.31.self_attn.tp_wrapped_module.v_proj", "wrapped_model.module_shards.1.model.layers.31.self_attn.tp_wrapped_module.o_proj", "wrapped_model.module_shards.1.model.layers.31.mlp.tp_wrapped_module.gate_proj", "wrapped_model.module_shards.1.model.layers.31.mlp.tp_wrapped_module.up_proj", "wrapped_model.module_shards.1.model.layers.31.mlp.tp_wrapped_module.down_proj" ], "task_type": null }