| | import gradio as gr |
| | import torch |
| | from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
| | from diffusers import StableDiffusionPipeline, DiffusionPipeline |
| | import requests |
| | from PIL import Image |
| | import io |
| | import base64 |
| |
|
| | |
| | MODELS = { |
| | "text": { |
| | "microsoft/DialoGPT-medium": "Chat conversacional", |
| | "gpt2": "Generaci贸n de texto", |
| | "distilgpt2": "GPT-2 optimizado", |
| | "EleutherAI/gpt-neo-125M": "GPT-Neo peque帽o" |
| | }, |
| | "image": { |
| | "runwayml/stable-diffusion-v1-5": "Stable Diffusion v1.5", |
| | "CompVis/stable-diffusion-v1-4": "Stable Diffusion v1.4" |
| | } |
| | } |
| |
|
| | |
| | model_cache = {} |
| |
|
| | def load_text_model(model_name): |
| | """Cargar modelo de texto""" |
| | if model_name not in model_cache: |
| | print(f"Cargando modelo de texto: {model_name}") |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForCausalLM.from_pretrained(model_name) |
| | |
| | |
| | if "dialogpt" in model_name.lower(): |
| | tokenizer.pad_token = tokenizer.eos_token |
| | model.config.pad_token_id = model.config.eos_token_id |
| | |
| | model_cache[model_name] = { |
| | "tokenizer": tokenizer, |
| | "model": model, |
| | "type": "text" |
| | } |
| | |
| | return model_cache[model_name] |
| |
|
| | def load_image_model(model_name): |
| | """Cargar modelo de imagen""" |
| | if model_name not in model_cache: |
| | print(f"Cargando modelo de imagen: {model_name}") |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 |
| | ) |
| | |
| | if torch.cuda.is_available(): |
| | pipe = pipe.to("cuda") |
| | |
| | model_cache[model_name] = { |
| | "pipeline": pipe, |
| | "type": "image" |
| | } |
| | |
| | return model_cache[model_name] |
| |
|
| | def generate_text(prompt, model_name, max_length=100): |
| | """Generar texto con el modelo seleccionado""" |
| | try: |
| | model_data = load_text_model(model_name) |
| | tokenizer = model_data["tokenizer"] |
| | model = model_data["model"] |
| | |
| | |
| | inputs = tokenizer.encode(prompt, return_tensors="pt") |
| | |
| | |
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | inputs, |
| | max_length=max_length, |
| | num_return_sequences=1, |
| | temperature=0.7, |
| | do_sample=True, |
| | pad_token_id=tokenizer.eos_token_id |
| | ) |
| | |
| | |
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | |
| | |
| | if "dialogpt" in model_name.lower(): |
| | response = response.replace(prompt, "").strip() |
| | |
| | return response |
| | |
| | except Exception as e: |
| | return f"Error generando texto: {str(e)}" |
| |
|
| | def generate_image(prompt, model_name, num_inference_steps=20): |
| | """Generar imagen con el modelo seleccionado""" |
| | try: |
| | model_data = load_image_model(model_name) |
| | pipeline = model_data["pipeline"] |
| | |
| | |
| | image = pipeline( |
| | prompt, |
| | num_inference_steps=num_inference_steps, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | |
| | return image |
| | |
| | except Exception as e: |
| | return f"Error generando imagen: {str(e)}" |
| |
|
| | def chat_with_model(message, history, model_name): |
| | """Funci贸n de chat para DialoGPT""" |
| | try: |
| | model_data = load_text_model(model_name) |
| | tokenizer = model_data["tokenizer"] |
| | model = model_data["model"] |
| | |
| | |
| | conversation = "" |
| | for user_msg, bot_msg in history: |
| | conversation += f"User: {user_msg}\n" |
| | if bot_msg: |
| | conversation += f"Assistant: {bot_msg}\n" |
| | |
| | conversation += f"User: {message}\nAssistant:" |
| | |
| | |
| | inputs = tokenizer.encode(conversation, return_tensors="pt", truncation=True, max_length=512) |
| | |
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | inputs, |
| | max_length=inputs.shape[1] + 50, |
| | temperature=0.7, |
| | do_sample=True, |
| | pad_token_id=tokenizer.eos_token_id |
| | ) |
| | |
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | |
| | |
| | response = response.split("Assistant:")[-1].strip() |
| | |
| | return response |
| | |
| | except Exception as e: |
| | return f"Error en el chat: {str(e)}" |
| |
|
| | |
| | with gr.Blocks(title="Modelos Libres de IA", theme=gr.themes.Soft()) as demo: |
| | gr.Markdown("# 馃 Modelos Libres de IA") |
| | gr.Markdown("### Genera texto e im谩genes sin l铆mites de cuota") |
| | |
| | with gr.Tabs(): |
| | |
| | with gr.TabItem("馃摑 Generaci贸n de Texto"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | text_model = gr.Dropdown( |
| | choices=list(MODELS["text"].keys()), |
| | value="microsoft/DialoGPT-medium", |
| | label="Modelo de Texto" |
| | ) |
| | text_prompt = gr.Textbox( |
| | label="Prompt", |
| | placeholder="Escribe tu prompt aqu铆...", |
| | lines=3 |
| | ) |
| | max_length = gr.Slider( |
| | minimum=50, |
| | maximum=200, |
| | value=100, |
| | step=10, |
| | label="Longitud m谩xima" |
| | ) |
| | text_btn = gr.Button("Generar Texto", variant="primary") |
| | |
| | with gr.Column(): |
| | text_output = gr.Textbox( |
| | label="Resultado", |
| | lines=10, |
| | interactive=False |
| | ) |
| | |
| | text_btn.click( |
| | generate_text, |
| | inputs=[text_prompt, text_model, max_length], |
| | outputs=text_output |
| | ) |
| | |
| | |
| | with gr.TabItem("馃挰 Chat"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | chat_model = gr.Dropdown( |
| | choices=["microsoft/DialoGPT-medium"], |
| | value="microsoft/DialoGPT-medium", |
| | label="Modelo de Chat" |
| | ) |
| | |
| | with gr.Column(): |
| | chatbot = gr.Chatbot( |
| | label="Chat", |
| | height=400 |
| | ) |
| | chat_input = gr.Textbox( |
| | label="Mensaje", |
| | placeholder="Escribe tu mensaje...", |
| | lines=2 |
| | ) |
| | chat_btn = gr.Button("Enviar", variant="primary") |
| | |
| | chat_btn.click( |
| | chat_with_model, |
| | inputs=[chat_input, chatbot, chat_model], |
| | outputs=[chatbot], |
| | clear_input=True |
| | ) |
| | |
| | chat_input.submit( |
| | chat_with_model, |
| | inputs=[chat_input, chatbot, chat_model], |
| | outputs=[chatbot], |
| | clear_input=True |
| | ) |
| | |
| | |
| | with gr.TabItem("馃帹 Generaci贸n de Im谩genes"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | image_model = gr.Dropdown( |
| | choices=list(MODELS["image"].keys()), |
| | value="runwayml/stable-diffusion-v1-5", |
| | label="Modelo de Imagen" |
| | ) |
| | image_prompt = gr.Textbox( |
| | label="Prompt de Imagen", |
| | placeholder="Describe la imagen que quieres generar...", |
| | lines=3 |
| | ) |
| | steps = gr.Slider( |
| | minimum=10, |
| | maximum=50, |
| | value=20, |
| | step=5, |
| | label="Pasos de inferencia" |
| | ) |
| | image_btn = gr.Button("Generar Imagen", variant="primary") |
| | |
| | with gr.Column(): |
| | image_output = gr.Image( |
| | label="Imagen Generada", |
| | type="pil" |
| | ) |
| | |
| | image_btn.click( |
| | generate_image, |
| | inputs=[image_prompt, image_model, steps], |
| | outputs=image_output |
| | ) |
| |
|
| | |
| | if __name__ == "__main__": |
| | demo.launch( |
| | server_name="0.0.0.0", |
| | server_port=7860, |
| | share=False |
| | ) |