Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| from huggingface_hub import login | |
| from datasets import load_dataset | |
| from transformers import ( | |
| AutoTokenizer, | |
| AutoModelForCausalLM, | |
| Trainer, | |
| TrainingArguments, | |
| DataCollatorForLanguageModeling, | |
| pipeline, | |
| ) | |
| # ============================================================ | |
| # 🔐 Autenticación segura con tu token | |
| # ============================================================ | |
| hf_token = os.environ.get("HF_TOKEN") | |
| if hf_token: | |
| login(token=hf_token) | |
| else: | |
| print("⚠️ No se encontró el token. Agrega 'HF_TOKEN' en Settings → Secrets → Add new secret") | |
| # ============================================================ | |
| # ⚙️ Configuración del modelo base y dataset | |
| # ============================================================ | |
| MODEL_NAME = "bigcode/santacoder" # Modelo libre y compatible con Hugging Face | |
| DATASET_PATH = "dataset.json" # Archivo dataset que subiste al Space | |
| OUTPUT_DIR = "lora_output" # Carpeta donde se guarda el modelo entrenado | |
| # Crear carpeta de salida si no existe | |
| os.makedirs(OUTPUT_DIR, exist_ok=True) | |
| # Cargar modelo y tokenizer | |
| print("🔄 Cargando modelo base...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=hf_token) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=hf_token) | |
| # ============================================================ | |
| # 🧩 Función de entrenamiento LoRA (simple y funcional) | |
| # ============================================================ | |
| def train_lora(epochs, batch_size, learning_rate): | |
| try: | |
| # Cargar dataset JSON | |
| dataset = load_dataset("json", data_files=DATASET_PATH) | |
| # Tokenización del dataset | |
| def tokenize_fn(example): | |
| text = example["prompt"] + example["completion"] | |
| return tokenizer( | |
| text, | |
| truncation=True, | |
| padding="max_length", | |
| max_length=256, | |
| ) | |
| tokenized = dataset.map(tokenize_fn, batched=True) | |
| # Preparar data collator | |
| data_collator = DataCollatorForLanguageModeling( | |
| tokenizer=tokenizer, mlm=False | |
| ) | |
| # Configuración del entrenamiento | |
| training_args = TrainingArguments( | |
| output_dir=OUTPUT_DIR, | |
| per_device_train_batch_size=int(batch_size), | |
| num_train_epochs=int(epochs), | |
| learning_rate=float(learning_rate), | |
| logging_steps=10, | |
| save_total_limit=1, | |
| push_to_hub=False, | |
| report_to="none", | |
| ) | |
| # Entrenador | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized["train"], | |
| data_collator=data_collator, | |
| ) | |
| # Entrenar modelo | |
| trainer.train() | |
| # Guardar resultados | |
| model.save_pretrained(OUTPUT_DIR) | |
| tokenizer.save_pretrained(OUTPUT_DIR) | |
| return "✅ Entrenamiento completado con éxito. Modelo guardado en ./lora_output" | |
| except Exception as e: | |
| return f"❌ Error durante el entrenamiento: {str(e)}" | |
| # ============================================================ | |
| # 🤖 Función de prueba del modelo entrenado | |
| # ============================================================ | |
| def generate_text(prompt): | |
| try: | |
| generator = pipeline( | |
| "text-generation", | |
| model=OUTPUT_DIR, | |
| tokenizer=tokenizer, | |
| ) | |
| output = generator(prompt, max_new_tokens=100, temperature=0.7, top_p=0.9) | |
| return output[0]["generated_text"] | |
| except Exception as e: | |
| return f"⚠️ Error al generar texto: {str(e)}" | |
| # ============================================================ | |
| # 💻 Interfaz de usuario (Gradio) | |
| # ============================================================ | |
| with gr.Blocks(title="💙 AmorCoderAI - Entrenamiento LoRA") as demo: | |
| gr.Markdown("# 💙 AmorCoderAI - Entrenamiento y Pruebas") | |
| gr.Markdown("Entrena y prueba tu modelo basado en `bigcode/santacoder` con LoRA.") | |
| with gr.Tab("🧠 Entrenar"): | |
| epochs = gr.Number(value=1, label="Épocas") | |
| batch_size = gr.Number(value=2, label="Tamaño de lote") | |
| learning_rate = gr.Number(value=5e-5, label="Tasa de aprendizaje") | |
| train_button = gr.Button("🚀 Iniciar entrenamiento") | |
| train_output = gr.Textbox(label="Resultado", lines=3) | |
| train_button.click(train_lora, inputs=[epochs, batch_size, learning_rate], outputs=train_output) | |
| with gr.Tab("✨ Probar modelo"): | |
| prompt = gr.Textbox(label="Escribe un prompt") | |
| generate_button = gr.Button("💬 Generar texto") | |
| output_box = gr.Textbox(label="Salida generada", lines=6) | |
| generate_button.click(generate_text, inputs=prompt, outputs=output_box) | |
| # ============================================================ | |
| # 🚀 Lanzar app | |
| # ============================================================ | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |