Andro0s's picture
Create app.py
a7bd5fa verified
raw
history blame
1.83 kB
# ===============================
# AmorCoder AI - Space de Producci贸n
# ===============================
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from peft import PeftModel
import torch
# -------------------------------
# 1锔忊儯 Cargar modelo base y LoRA
# -------------------------------
MODEL_NAME = "codellama/CodeLlama-7b-hf"
LORA_PATH = "./lora_codellama" # carpeta con tus pesos LoRA
print("Cargando modelo...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype=torch.float16
)
print("Aplicando pesos LoRA...")
model = PeftModel.from_pretrained(model, LORA_PATH)
# Crear pipeline de generaci贸n
codegen = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=0,
max_new_tokens=512,
temperature=0.2,
top_p=0.95
)
# -------------------------------
# 2锔忊儯 Funci贸n de generaci贸n de c贸digo
# -------------------------------
def generar_codigo(instruccion):
prompt = f"# Instrucci贸n:\n{instruccion}\n\n# C贸digo:\n"
salida = codegen(prompt)[0]["generated_text"]
# Extraer solo el c贸digo generado despu茅s de la instrucci贸n
if "# C贸digo:" in salida:
return salida.split("# C贸digo:")[1].strip()
return salida.strip()
# -------------------------------
# 3锔忊儯 Interfaz Gradio
# -------------------------------
iface = gr.Interface(
fn=generar_codigo,
inputs=gr.Textbox(lines=4, placeholder="Escribe tu instrucci贸n aqu铆..."),
outputs=gr.Textbox(lines=20, placeholder="C贸digo generado..."),
title="馃挋 AmorCoder AI - Programaci贸n Inteligente",
description="Genera c贸digo real y completo usando CodeLlama 7B adaptado a tu estilo con LoRA."
)
iface.launch()