File size: 2,567 Bytes
8819d2a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# ===============================
# AmorCoder AI - Entrenamiento LoRA Avanzado
# ===============================
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
from datasets import load_dataset
from peft import LoraConfig, get_peft_model, TaskType

# -------------------------------
# 1️⃣ Modelo base
# -------------------------------
MODEL_NAME = "codellama/CodeLlama-7b-hf"
print("Cargando modelo base...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    device_map="auto",
    torch_dtype=torch.float16
)

# -------------------------------
# 2️⃣ Configuración LoRA
# -------------------------------
print("Aplicando LoRA...")
lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    r=16,
    lora_alpha=32,
    target_modules=["q_proj", "v_proj"],  # módulos recomendados para LLMs
    lora_dropout=0.05,
    bias="none"
)
model = get_peft_model(model, lora_config)

# -------------------------------
# 3️⃣ Dataset
# -------------------------------
print("Cargando dataset...")
dataset = load_dataset("json", data_files={"train":"tu_dataset.json"}, split="train")

def preprocess(example):
    prompt = f"# Instrucción:\n{example['instruction']}\n\n# Código:\n"
    input_ids = tokenizer(prompt, truncation=True, max_length=512)["input_ids"]
    labels = tokenizer(example['code'], truncation=True, max_length=512)["input_ids"]
    return {"input_ids": input_ids, "labels": labels}

dataset = dataset.map(preprocess)

# -------------------------------
# 4️⃣ Argumentos de entrenamiento
# -------------------------------
training_args = TrainingArguments(
    output_dir="./lora_codellama",
    per_device_train_batch_size=1,   # usar gradient accumulation para batches grandes
    gradient_accumulation_steps=4,
    num_train_epochs=3,              # puedes subir a 5 para más precisión
    learning_rate=3e-4,
    fp16=True,
    logging_steps=10,
    save_steps=50,
    save_total_limit=3,
    report_to="none",                # para no depender de wandb u otro tracker
)

# -------------------------------
# 5️⃣ Entrenamiento
# -------------------------------
trainer = Trainer(
    model=model,
    train_dataset=dataset,
    args=training_args
)

print("Entrenando LoRA...")
trainer.train()

# -------------------------------
# 6️⃣ Guardar pesos
# -------------------------------
model.save_pretrained("lora_codellama")
print("✅ Entrenamiento completado. Pesos guardados en 'lora_codellama'.")