Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| from huggingface_hub import login | |
| from datasets import load_dataset | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, DataCollatorForLanguageModeling, pipeline | |
| from peft import PeftModel | |
| # ============================================================ | |
| # 🔐 Autenticación HuggingFace | |
| # ============================================================ | |
| hf_token = os.environ.get("HF_TOKEN") | |
| if hf_token: | |
| login(token=hf_token) | |
| else: | |
| print("⚠️ No se encontró el token. Agrega 'HF_TOKEN' en Settings → Secrets.") | |
| # ============================================================ | |
| # ⚙️ Configuración del modelo y dataset | |
| # ============================================================ | |
| BASE_MODEL = "bigcode/santacoder" # Modelo público | |
| LORA_PATH = "./lora_output" # Carpeta donde se guardará LoRA | |
| DATASET_PATH = "tu_dataset.json" # Cambia aquí al nombre de tu dataset | |
| # ============================================================ | |
| # 🔧 Inicializar tokenizer y modelo base | |
| # ============================================================ | |
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) | |
| base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL) | |
| # 🔧 Asegurar que haya un pad_token | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| # ============================================================ | |
| # 🧩 Función de entrenamiento LoRA | |
| # ============================================================ | |
| def train_lora(epochs, batch_size, learning_rate): | |
| try: | |
| dataset = load_dataset("json", data_files=DATASET_PATH) | |
| tokenized = dataset.map( | |
| lambda e: tokenizer( | |
| e["prompt"] + e["completion"], | |
| truncation=True, | |
| padding="max_length", | |
| max_length=256 | |
| ), | |
| batched=True | |
| ) | |
| data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) | |
| training_args = TrainingArguments( | |
| output_dir=LORA_PATH, | |
| per_device_train_batch_size=int(batch_size), | |
| num_train_epochs=int(epochs), | |
| learning_rate=learning_rate, | |
| save_total_limit=1, | |
| logging_steps=10, | |
| push_to_hub=False | |
| ) | |
| trainer = Trainer( | |
| model=base_model, | |
| args=training_args, | |
| train_dataset=tokenized["train"], | |
| data_collator=data_collator, | |
| ) | |
| trainer.train() | |
| # Guardar LoRA | |
| base_model.save_pretrained(LORA_PATH) | |
| tokenizer.save_pretrained(LORA_PATH) | |
| return "✅ Entrenamiento completado y guardado en ./lora_output" | |
| except Exception as e: | |
| return f"❌ Error durante el entrenamiento: {e}" | |
| # ============================================================ | |
| # 🤖 Función para generar texto usando LoRA sobre el modelo base | |
| # ============================================================ | |
| def generate_text(prompt_text): | |
| try: | |
| # Cargar modelo base | |
| tokenizer_gen = AutoTokenizer.from_pretrained(BASE_MODEL) | |
| base_model_gen = AutoModelForCausalLM.from_pretrained(BASE_MODEL) | |
| # Aplicar LoRA | |
| model = PeftModel.from_pretrained(base_model_gen, LORA_PATH) | |
| generator = pipeline("text-generation", model=model, tokenizer=tokenizer_gen) | |
| output = generator(prompt_text, max_new_tokens=100, temperature=0.7, top_p=0.9) | |
| return output[0]["generated_text"] | |
| except Exception as e: | |
| return f"❌ Error generando texto: {e}" | |
| # ============================================================ | |
| # 💻 Interfaz Gradio | |
| # ============================================================ | |
| with gr.Blocks(title="AmorCoderAI - Entrenamiento LoRA") as demo: | |
| gr.Markdown("# 💙 AmorCoderAI - Entrenamiento y Pruebas") | |
| gr.Markdown("Entrena y prueba tu modelo basado en `bigcode/santacoder` con LoRA") | |
| with gr.Tab("🧠 Entrenar"): | |
| epochs = gr.Number(value=1, label="Épocas") | |
| batch_size = gr.Number(value=2, label="Tamaño de lote") | |
| learning_rate = gr.Number(value=5e-5, label="Tasa de aprendizaje") | |
| train_button = gr.Button("🚀 Iniciar entrenamiento") | |
| train_output = gr.Textbox(label="Resultado") | |
| train_button.click(train_lora, inputs=[epochs, batch_size, learning_rate], outputs=train_output) | |
| with gr.Tab("✨ Probar modelo"): | |
| prompt = gr.Textbox(label="Escribe un prompt") | |
| generate_button = gr.Button("💬 Generar texto") | |
| output_box = gr.Textbox(label="Salida generada") | |
| generate_button.click(generate_text, inputs=prompt, outputs=output_box) | |
| # ============================================================ | |
| # 🚀 Lanzar app | |
| # ============================================================ | |
| if __name__ == "__main__": | |
| demo.launch() |