{ "base_model": "Qwen/Qwen1.5-1.8B-Chat", "lora_config": { "r": 2, "lora_alpha": 4, "lora_dropout": 0.1, "target_modules": [ "q_proj", "v_proj", "gate_proj", "up_proj" ], "bias": "none", "task_type": "CAUSAL_LM" }, "generation_config": { "max_new_tokens": 64, "do_sample": true, "temperature": 0.7, "top_p": 0.9, "top_k": 50, "repetition_penalty": 1.1 } }