Add 8 high-quality models compatible with CPU - Add FLUX.1-dev and FLUX.1-schnell models - Add SDXL Lightning variants and Realistic Vision - Add model selector dropdown - Add automatic configurations for each model - Update README with model descriptions
Browse files
README.md
CHANGED
|
@@ -9,4 +9,47 @@ app_file: app.py
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
| 12 |
+
# 🎨 Text-to-Image Gradio Template
|
| 13 |
+
|
| 14 |
+
Un generador de imágenes con IA con múltiples modelos de alta calidad optimizados para CPU.
|
| 15 |
+
|
| 16 |
+
## 🚀 Modelos Disponibles
|
| 17 |
+
|
| 18 |
+
### Modelos Rápidos (1-4 pasos)
|
| 19 |
+
- **SDXL Turbo** - Generación ultra rápida (1 paso)
|
| 20 |
+
- **SD Turbo** - Generación rápida (1 paso)
|
| 21 |
+
- **SDXL Lightning** - Generación rápida de alta calidad (4 pasos)
|
| 22 |
+
- **SDXL Lightning 4Step** - Versión optimizada de 4 pasos
|
| 23 |
+
|
| 24 |
+
### Modelos FLUX (Alta Calidad)
|
| 25 |
+
- **FLUX.1-dev** - Modelo de desarrollo de alta calidad
|
| 26 |
+
- **FLUX.1-schnell** - Modelo rápido de alta calidad
|
| 27 |
+
|
| 28 |
+
### Modelos Estándar
|
| 29 |
+
- **SDXL Base** - Modelo base de Stable Diffusion XL
|
| 30 |
+
- **Realistic Vision** - Especializado en retratos realistas
|
| 31 |
+
|
| 32 |
+
## 🎯 Uso
|
| 33 |
+
|
| 34 |
+
1. Selecciona un modelo del dropdown en "Advanced Settings"
|
| 35 |
+
2. Escribe tu prompt en el campo de texto
|
| 36 |
+
3. Ajusta los parámetros si es necesario
|
| 37 |
+
4. Haz clic en "Run"
|
| 38 |
+
5. ¡Disfruta tu imagen generada!
|
| 39 |
+
|
| 40 |
+
## ⚡ Optimizaciones
|
| 41 |
+
|
| 42 |
+
- **CPU Optimizado**: Todos los modelos funcionan perfectamente en CPU básico
|
| 43 |
+
- **Configuraciones automáticas**: Cada modelo usa sus parámetros óptimos
|
| 44 |
+
- **Carga bajo demanda**: Los modelos se cargan solo cuando se necesitan
|
| 45 |
+
- **Configuraciones específicas**: Cada modelo tiene sus propios parámetros de guidance y steps
|
| 46 |
+
|
| 47 |
+
## 🔧 Configuraciones por Modelo
|
| 48 |
+
|
| 49 |
+
- **Turbo/Lightning**: guidance_scale=0.0, steps=1-4
|
| 50 |
+
- **FLUX**: guidance_scale=7.5, steps=20
|
| 51 |
+
- **Realistic Vision**: guidance_scale=7.5, steps=25
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -7,19 +7,38 @@ from diffusers import DiffusionPipeline
|
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
if torch.cuda.is_available():
|
| 13 |
torch_dtype = torch.float16
|
| 14 |
else:
|
| 15 |
torch_dtype = torch.float32
|
| 16 |
|
| 17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 18 |
-
pipe = pipe.to(device)
|
| 19 |
-
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 25 |
def infer(
|
|
@@ -31,13 +50,32 @@ def infer(
|
|
| 31 |
height,
|
| 32 |
guidance_scale,
|
| 33 |
num_inference_steps,
|
|
|
|
| 34 |
progress=gr.Progress(track_tqdm=True),
|
| 35 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
if randomize_seed:
|
| 37 |
seed = random.randint(0, MAX_SEED)
|
| 38 |
|
| 39 |
generator = torch.Generator().manual_seed(seed)
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
image = pipe(
|
| 42 |
prompt=prompt,
|
| 43 |
negative_prompt=negative_prompt,
|
|
@@ -55,6 +93,8 @@ examples = [
|
|
| 55 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
| 56 |
"An astronaut riding a green horse",
|
| 57 |
"A delicious ceviche cheesecake slice",
|
|
|
|
|
|
|
| 58 |
]
|
| 59 |
|
| 60 |
css = """
|
|
@@ -82,6 +122,13 @@ with gr.Blocks(css=css) as demo:
|
|
| 82 |
result = gr.Image(label="Result", show_label=False)
|
| 83 |
|
| 84 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
negative_prompt = gr.Text(
|
| 86 |
label="Negative prompt",
|
| 87 |
max_lines=1,
|
|
@@ -146,6 +193,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 146 |
height,
|
| 147 |
guidance_scale,
|
| 148 |
num_inference_steps,
|
|
|
|
| 149 |
],
|
| 150 |
outputs=[result, seed],
|
| 151 |
)
|
|
|
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
+
|
| 11 |
+
# Modelos disponibles de alta calidad
|
| 12 |
+
MODELS = {
|
| 13 |
+
"SDXL Turbo (stabilityai/sdxl-turbo)": "stabilityai/sdxl-turbo",
|
| 14 |
+
"FLUX.1-dev (black-forest-labs/FLUX.1-dev)": "black-forest-labs/FLUX.1-dev",
|
| 15 |
+
"FLUX.1-schnell (black-forest-labs/FLUX.1-schnell)": "black-forest-labs/FLUX.1-schnell",
|
| 16 |
+
"SDXL Lightning (ByteDance/SDXL-Lightning)": "ByteDance/SDXL-Lightning",
|
| 17 |
+
"SDXL Lightning 4Step (ByteDance/SDXL-Lightning-4Step)": "ByteDance/SDXL-Lightning-4Step",
|
| 18 |
+
"SD Turbo (stabilityai/sd-turbo)": "stabilityai/sd-turbo",
|
| 19 |
+
"SDXL Base (stabilityai/stable-diffusion-xl-base-1.0)": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 20 |
+
"Realistic Vision (SG161222/Realistic_Vision_V5.1_noVAE)": "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
# Estado del pipeline
|
| 24 |
+
pipe = None
|
| 25 |
+
current_model_id = None
|
| 26 |
|
| 27 |
if torch.cuda.is_available():
|
| 28 |
torch_dtype = torch.float16
|
| 29 |
else:
|
| 30 |
torch_dtype = torch.float32
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
MAX_SEED = np.iinfo(np.int32).max
|
| 33 |
MAX_IMAGE_SIZE = 1024
|
| 34 |
|
| 35 |
+
# Función para cargar el modelo
|
| 36 |
+
def load_model(model_id):
|
| 37 |
+
global pipe, current_model_id
|
| 38 |
+
if pipe is None or model_id != current_model_id:
|
| 39 |
+
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
|
| 40 |
+
pipe = pipe.to(device)
|
| 41 |
+
current_model_id = model_id
|
| 42 |
|
| 43 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 44 |
def infer(
|
|
|
|
| 50 |
height,
|
| 51 |
guidance_scale,
|
| 52 |
num_inference_steps,
|
| 53 |
+
model_name,
|
| 54 |
progress=gr.Progress(track_tqdm=True),
|
| 55 |
):
|
| 56 |
+
# Cargar el modelo seleccionado
|
| 57 |
+
model_id = MODELS[model_name]
|
| 58 |
+
load_model(model_id)
|
| 59 |
+
|
| 60 |
if randomize_seed:
|
| 61 |
seed = random.randint(0, MAX_SEED)
|
| 62 |
|
| 63 |
generator = torch.Generator().manual_seed(seed)
|
| 64 |
|
| 65 |
+
# Configuraciones específicas según el modelo
|
| 66 |
+
if "turbo" in model_id.lower() or "lightning" in model_id.lower():
|
| 67 |
+
# Modelos rápidos
|
| 68 |
+
guidance_scale = 0.0
|
| 69 |
+
num_inference_steps = 1 if "turbo" in model_id.lower() else 4
|
| 70 |
+
elif "flux" in model_id.lower():
|
| 71 |
+
# Modelos FLUX
|
| 72 |
+
guidance_scale = 7.5
|
| 73 |
+
num_inference_steps = 20
|
| 74 |
+
elif "realistic" in model_id.lower():
|
| 75 |
+
# Realistic Vision
|
| 76 |
+
guidance_scale = 7.5
|
| 77 |
+
num_inference_steps = 25
|
| 78 |
+
|
| 79 |
image = pipe(
|
| 80 |
prompt=prompt,
|
| 81 |
negative_prompt=negative_prompt,
|
|
|
|
| 93 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
| 94 |
"An astronaut riding a green horse",
|
| 95 |
"A delicious ceviche cheesecake slice",
|
| 96 |
+
"Futuristic AI assistant in a glowing galaxy, neon lights, sci-fi style, cinematic",
|
| 97 |
+
"Portrait of a beautiful woman, realistic, high quality, detailed",
|
| 98 |
]
|
| 99 |
|
| 100 |
css = """
|
|
|
|
| 122 |
result = gr.Image(label="Result", show_label=False)
|
| 123 |
|
| 124 |
with gr.Accordion("Advanced Settings", open=False):
|
| 125 |
+
model_selector = gr.Dropdown(
|
| 126 |
+
choices=list(MODELS.keys()),
|
| 127 |
+
value=list(MODELS.keys())[0],
|
| 128 |
+
label="Model",
|
| 129 |
+
info="Select a high-quality model"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
negative_prompt = gr.Text(
|
| 133 |
label="Negative prompt",
|
| 134 |
max_lines=1,
|
|
|
|
| 193 |
height,
|
| 194 |
guidance_scale,
|
| 195 |
num_inference_steps,
|
| 196 |
+
model_selector,
|
| 197 |
],
|
| 198 |
outputs=[result, seed],
|
| 199 |
)
|