Malaji71 commited on
Commit
f7da536
·
verified ·
1 Parent(s): 6d7510d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -57
app.py CHANGED
@@ -1,11 +1,10 @@
1
- # app.py — FLUX.1 via Hugging Face InferenceClient (CPU-compatible)
2
  import gradio as gr
3
  import os
4
  import time
5
  import logging
6
  from typing import Optional, Tuple
7
  from PIL import Image
8
- import io
9
  from agent import ImprovedSemanticAgent
10
  from huggingface_hub import InferenceClient
11
 
@@ -14,71 +13,51 @@ logging.basicConfig(level=logging.INFO)
14
  logger = logging.getLogger(__name__)
15
 
16
  # ================================================================
17
- # NUEVO GENERADOR USANDO Hugging Face InferenceClient
18
  # ================================================================
19
 
20
- class HuggingFaceFluxGenerator:
21
  def __init__(self):
22
- # HF_TOKEN se toma automáticamente si está en los secrets como HF_TOKEN
23
- # Pero tú usas "PS", así que lo leemos explícitamente
24
- hf_token = os.getenv("PS")
25
  if not hf_token:
26
- raise ValueError("❌ Secret 'PS' (HF_TOKEN) no encontrado en el entorno.")
27
  self.client = InferenceClient(
28
- provider="together", # FLUX.1-dev disponible en Together
29
  api_key=hf_token
30
  )
31
 
32
- def generate_image(
33
- self,
34
- prompt: str,
35
- width: int = 1024,
36
- height: int = 1024,
37
- image_input: Optional[str] = None,
38
- progress_callback=None
39
- ) -> Tuple[Optional[str], str]:
40
  try:
41
  if progress_callback:
42
- progress_callback(0.5, desc="🚀 Generando con FLUX.1 vía HF Inference...")
43
-
44
- if image_input is None:
45
- # Text-to-image
46
- image = self.client.text_to_image(
47
- prompt=prompt,
48
- model="black-forest-labs/FLUX.1-dev",
49
- width=width,
50
- height=height
51
- )
52
- else:
53
- # Image-to-image (primero abrimos la imagen local)
54
- init_image = Image.open(image_input).convert("RGB")
55
- image = self.client.image_to_image(
56
- prompt=prompt,
57
- image=init_image,
58
- model="black-forest-labs/FLUX.1-dev",
59
- strength=0.75
60
- )
61
-
62
- output_path = f"/tmp/flux_hf_{int(time.time())}.png"
63
  image.save(output_path)
64
-
65
  if progress_callback:
66
  progress_callback(1.0, desc="✅ ¡Imagen generada!")
67
-
68
- return output_path, "✅ Imagen generada con FLUX.1-dev vía Hugging Face Inference"
69
-
70
  except Exception as e:
71
- return None, f"❌ Error en HF Inference: {str(e)}"
72
 
73
 
74
  # ================================================================
75
- # INTERFAZ GRADIO (mismo flujo semántico)
76
  # ================================================================
77
 
78
  def create_interface():
79
- # Inicializar generador y agente
80
  try:
81
- generator = HuggingFaceFluxGenerator()
82
  except Exception as e:
83
  generator = None
84
  logger.error(f"Error al iniciar generador: {e}")
@@ -88,7 +67,6 @@ def create_interface():
88
  def generate_wrapper(
89
  prompt: str,
90
  aspect_ratio: str,
91
- input_image,
92
  enable_semantic_enhancement: bool,
93
  enhancement_category: str,
94
  progress=gr.Progress()
@@ -111,7 +89,7 @@ def create_interface():
111
  except Exception as e:
112
  enhancement_info = f"⚠️ Error en enhancement: {str(e)}"
113
 
114
- # Resolución por aspect ratio
115
  aspect_ratios = {
116
  "1:1": (1024, 1024),
117
  "16:9": (1344, 768),
@@ -128,7 +106,6 @@ def create_interface():
128
  prompt=final_prompt,
129
  width=width,
130
  height=height,
131
- image_input=input_image,
132
  progress_callback=progress
133
  )
134
 
@@ -139,8 +116,8 @@ def create_interface():
139
  try:
140
  if not prompt.strip():
141
  return "⚠️ Prompt vacío"
142
- # Extraer el prompt mejorado del status si es posible, o usar el original
143
- enhanced = prompt # en este flujo, el mejorado ya se usó
144
  success = agent.db.store_cache_result(prompt, enhanced, category, 0.95, "user_curated")
145
  return "✅ Prompt guardado como ejemplo de alta calidad." if success else "❌ Error al guardar."
146
  except Exception as e:
@@ -160,14 +137,14 @@ def create_interface():
160
  h1, h2, h3 { color: #ffffff !important; font-weight: 300 !important; }
161
  """
162
 
163
- with gr.Blocks(css=custom_css, title="FLUX.1 + SEMANTIC AI", theme=gr.themes.Base()) as interface:
164
  gr.HTML("""
165
  <div style="text-align: center; padding: 20px; background: #000000;">
166
  <h1 style="color: #ffffff; font-size: 2.5em; font-weight: 300; margin: 0; letter-spacing: 2px;">
167
- FLUX.1 + SEMANTIC AI
168
  </h1>
169
  <p style="color: #cccccc; font-size: 1.1em; margin: 10px 0 0 0;">
170
- Generación con FLUX.1 vía Hugging Face Inference + búsqueda semántica en 100k prompts
171
  </p>
172
  </div>
173
  """)
@@ -190,8 +167,7 @@ def create_interface():
190
  choices=["1:1", "16:9", "9:16", "4:3", "3:4", "21:9", "9:21"],
191
  value="1:1"
192
  )
193
- input_image = gr.Image(label="🖼️ Imagen (img2img)", type="filepath")
194
- generate_btn = gr.Button("🧠 GENERAR CON FLUX.1", variant="primary")
195
 
196
  with gr.Column():
197
  output_image = gr.Image(label="Imagen generada", type="filepath", height=500)
@@ -203,7 +179,7 @@ def create_interface():
203
  example_btn.click(get_semantic_example_wrapper, [enhancement_category, prompt_input], example_output)
204
  generate_btn.click(
205
  generate_wrapper,
206
- [prompt_input, aspect_ratio, input_image, enable_semantic_enhancement, enhancement_category],
207
  [output_image, status_output]
208
  )
209
  save_btn.click(save_high_quality_prompt, [prompt_input, enhancement_category, status_output], [save_status])
 
1
+ # app.py — MODELO NUEVO: SDXL via HF Inference (gratuito para HF Pro)
2
  import gradio as gr
3
  import os
4
  import time
5
  import logging
6
  from typing import Optional, Tuple
7
  from PIL import Image
 
8
  from agent import ImprovedSemanticAgent
9
  from huggingface_hub import InferenceClient
10
 
 
13
  logger = logging.getLogger(__name__)
14
 
15
  # ================================================================
16
+ # GENERADOR CON SDXL EN HF INFERENCE (GRATUITO PARA HF PRO)
17
  # ================================================================
18
 
19
+ class SDXLHFGenerator:
20
  def __init__(self):
21
+ hf_token = os.getenv("PS") # Tu secret se llama "PS"
 
 
22
  if not hf_token:
23
+ raise ValueError("❌ Secret 'PS' (tu HF_TOKEN) no encontrado en el entorno.")
24
  self.client = InferenceClient(
25
+ provider="hf-inference",
26
  api_key=hf_token
27
  )
28
 
29
+ def generate_image(self, prompt: str, width: int = 1024, height: int = 1024, progress_callback=None) -> Tuple[Optional[str], str]:
 
 
 
 
 
 
 
30
  try:
31
  if progress_callback:
32
+ progress_callback(0.5, desc="🎨 Generando con SDXL (vía HF Inference)...")
33
+
34
+ image = self.client.text_to_image(
35
+ prompt=prompt,
36
+ model="stabilityai/stable-diffusion-xl-base-1.0",
37
+ width=width,
38
+ height=height
39
+ )
40
+
41
+ output_path = f"/tmp/sdxl_hf_{int(time.time())}.png"
 
 
 
 
 
 
 
 
 
 
 
42
  image.save(output_path)
43
+
44
  if progress_callback:
45
  progress_callback(1.0, desc="✅ ¡Imagen generada!")
46
+
47
+ return output_path, "✅ Imagen generada con SDXL vía Hugging Face Inference (gratuito para HF Pro)"
48
+
49
  except Exception as e:
50
+ return None, f"❌ Error en generación: {str(e)}"
51
 
52
 
53
  # ================================================================
54
+ # INTERFAZ GRADIO
55
  # ================================================================
56
 
57
  def create_interface():
58
+ # Inicializar
59
  try:
60
+ generator = SDXLHFGenerator()
61
  except Exception as e:
62
  generator = None
63
  logger.error(f"Error al iniciar generador: {e}")
 
67
  def generate_wrapper(
68
  prompt: str,
69
  aspect_ratio: str,
 
70
  enable_semantic_enhancement: bool,
71
  enhancement_category: str,
72
  progress=gr.Progress()
 
89
  except Exception as e:
90
  enhancement_info = f"⚠️ Error en enhancement: {str(e)}"
91
 
92
+ # Resolución
93
  aspect_ratios = {
94
  "1:1": (1024, 1024),
95
  "16:9": (1344, 768),
 
106
  prompt=final_prompt,
107
  width=width,
108
  height=height,
 
109
  progress_callback=progress
110
  )
111
 
 
116
  try:
117
  if not prompt.strip():
118
  return "⚠️ Prompt vacío"
119
+ # Usa el prompt original o el mejorado (en este flujo, ya está mejorado)
120
+ enhanced = prompt
121
  success = agent.db.store_cache_result(prompt, enhanced, category, 0.95, "user_curated")
122
  return "✅ Prompt guardado como ejemplo de alta calidad." if success else "❌ Error al guardar."
123
  except Exception as e:
 
137
  h1, h2, h3 { color: #ffffff !important; font-weight: 300 !important; }
138
  """
139
 
140
+ with gr.Blocks(css=custom_css, title="SDXL + SEMANTIC AI (HF Inference)", theme=gr.themes.Base()) as interface:
141
  gr.HTML("""
142
  <div style="text-align: center; padding: 20px; background: #000000;">
143
  <h1 style="color: #ffffff; font-size: 2.5em; font-weight: 300; margin: 0; letter-spacing: 2px;">
144
+ SDXL + SEMANTIC AI (HF Inference)
145
  </h1>
146
  <p style="color: #cccccc; font-size: 1.1em; margin: 10px 0 0 0;">
147
+ Testing de prompts con generación gratuita (HF Pro)
148
  </p>
149
  </div>
150
  """)
 
167
  choices=["1:1", "16:9", "9:16", "4:3", "3:4", "21:9", "9:21"],
168
  value="1:1"
169
  )
170
+ generate_btn = gr.Button("🧠 GENERAR CON SDXL", variant="primary")
 
171
 
172
  with gr.Column():
173
  output_image = gr.Image(label="Imagen generada", type="filepath", height=500)
 
179
  example_btn.click(get_semantic_example_wrapper, [enhancement_category, prompt_input], example_output)
180
  generate_btn.click(
181
  generate_wrapper,
182
+ [prompt_input, aspect_ratio, enable_semantic_enhancement, enhancement_category],
183
  [output_image, status_output]
184
  )
185
  save_btn.click(save_high_quality_prompt, [prompt_input, enhancement_category, status_output], [save_status])