| import os |
| import gradio as gr |
| import torch |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
| from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler |
|
|
| |
| |
| |
|
|
| |
| MODEL_TEXT_NAME = os.environ.get("MODEL_NAME", "gpt2") |
| MODEL_IMG_ID = os.environ.get("MODEL_ID", "runwayml/stable-diffusion-v1-5") |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
|
| |
| def load_text_generator(model_name=MODEL_TEXT_NAME): |
| try: |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
| device = 0 if torch.cuda.is_available() else -1 |
| return pipeline("text-generation", model=model, tokenizer=tokenizer, device=device) |
| except Exception as e: |
| print("Error cargando modelo de texto, intentando pipeline directo:", e) |
| device = 0 if torch.cuda.is_available() else -1 |
| return pipeline("text-generation", model=model_name, device=device) |
|
|
| |
| def load_image_pipeline(model_id=MODEL_IMG_ID, token=HF_TOKEN): |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| try: |
| scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler", use_auth_token=token) if token else DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device=="cuda" else torch.float32, use_auth_token=token) |
| pipe.scheduler = scheduler |
| if device == "cuda": |
| pipe = pipe.to(device) |
| pipe.enable_attention_slicing() |
| return pipe |
| except Exception as e: |
| print(f"Error cargando modelo de imagen (驴falta GPU o token?): {e}") |
| return None |
|
|
| print("Iniciando secuencia de arranque de HECTRON...") |
| generador_texto = load_text_generator() |
| pipe_imagen = load_image_pipeline() |
| print("Sistemas cargados.") |
|
|
| |
| |
| |
|
|
| def generar_texto(prompt, max_length=150, temperature=0.7, top_k=50, top_p=0.95, num_return_sequences=1): |
| if not prompt or prompt.strip() == "": |
| return "Inicia la consola de HECTRON con un prompt v谩lido." |
| salida = generador_texto( |
| prompt, |
| max_length=int(max_length), |
| temperature=float(temperature), |
| top_k=int(top_k), |
| top_p=float(top_p), |
| num_return_sequences=int(num_return_sequences), |
| do_sample=True, |
| ) |
| textos = [s["generated_text"] for s in salida] |
| return textos[0] if len(textos) == 1 else "\n\n---\n\n".join(textos) |
|
|
| def generar_imagen(prompt, negative_prompt, steps, guidance_scale, width, height, seed): |
| if pipe_imagen is None: |
| return None |
| if not prompt or prompt.strip() == "": |
| return None |
| |
| generator = torch.Generator(device="cuda") if torch.cuda.is_available() else torch.Generator() |
| if seed is not None and seed != "": |
| try: |
| generator = generator.manual_seed(int(seed)) |
| except: |
| pass |
| |
| image = pipe_imagen( |
| prompt=prompt, |
| negative_prompt=negative_prompt or None, |
| num_inference_steps=int(steps), |
| guidance_scale=float(guidance_scale), |
| height=int(height), |
| width=int(width), |
| generator=generator |
| ).images[0] |
| return image |
|
|
| |
| |
| |
|
|
| with gr.Blocks(title="HECTRON - Sistema Unificado", theme=gr.themes.Monochrome()) as demo: |
| gr.Markdown("# 馃 HECTRON: Terminal de Control Unificada") |
| gr.Markdown("Ecosistema integrado. Selecciona el m贸dulo operativo en las pesta帽as inferiores.") |
| |
| |
| with gr.Tab("M贸dulo de Lenguaje (Texto)"): |
| with gr.Row(): |
| with gr.Column(scale=3): |
| prompt_text = gr.Textbox(lines=6, label="Directiva del Sistema", placeholder="Ej. Ejecuta MODO GUARDI脕N 22:00...", value="HECTRON activado. Analiza el siguiente concepto: ") |
| btn_text = gr.Button("Ejecutar Secuencia de Texto") |
| with gr.Column(scale=1): |
| max_len = gr.Slider(minimum=20, maximum=1024, value=150, step=10, label="L铆mite de Tokens") |
| temp = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.05, label="Temperatura") |
| topk = gr.Slider(minimum=0, maximum=200, value=50, step=1, label="Top-k") |
| topp = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p") |
| n_seq = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Secuencias") |
| output_text = gr.Textbox(lines=12, label="Respuesta de HECTRON") |
| |
| btn_text.click(fn=generar_texto, inputs=[prompt_text, max_len, temp, topk, topp, n_seq], outputs=output_text) |
|
|
| |
| with gr.Tab("M贸dulo de S铆ntesis Visual (Imagen)"): |
| if pipe_imagen is None: |
| gr.Markdown("鈿狅笍 **Advertencia:** El modelo de imagen no pudo cargarse. Revisa la memoria de tu sistema o tu token de Hugging Face.") |
| with gr.Row(): |
| with gr.Column(scale=3): |
| prompt_img = gr.Textbox(lines=4, label="Directiva Visual (Prompt)", value="A futuristic AI core named Hectron, cyberpunk style, glowing neon lights, highly detailed anime art style") |
| neg_img = gr.Textbox(lines=2, label="Filtro Negativo", value="lowres, bad anatomy, text, error, blurry, deformed") |
| btn_img = gr.Button("Renderizar Imagen") |
| with gr.Column(scale=1): |
| steps = gr.Slider(minimum=1, maximum=100, value=30, step=1, label="Pasos de Renderizado") |
| guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.1, label="Fidelidad (Guidance)") |
| w_img = gr.Dropdown(choices=["512","640","768"], value="512", label="Ancho (px)") |
| h_img = gr.Dropdown(choices=["512","640","768"], value="512", label="Alto (px)") |
| seed_img = gr.Textbox(lines=1, label="Semilla (Seed)") |
| |
| output_img = gr.Image(label="Renderizado de HECTRON") |
| |
| btn_img.click(fn=generar_imagen, inputs=[prompt_img, neg_img, steps, guidance, w_img, h_img, seed_img], outputs=output_img) |
|
|
| if __name__ == "__main__": |
| demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860))) |
| |