Spaces:
vcollos
/
Runtime error

vcollos commited on
Commit
933ac9a
·
verified ·
1 Parent(s): 373241b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -9
app.py CHANGED
@@ -23,14 +23,22 @@ hf_token = os.getenv("HF_TOKEN")
23
 
24
  # Inicializa o modelo base FLUX.1-dev
25
  base_model = "black-forest-labs/FLUX.1-dev"
 
 
 
 
 
 
 
 
26
  pipe = DiffusionPipeline.from_pretrained(
27
  base_model,
28
- torch_dtype=torch.float16,
29
  use_safetensors=True
30
  )
31
 
32
- # Move o modelo para GPU
33
- pipe.to("cuda")
34
 
35
  # Definição dos LoRA e Trigger Words
36
  lora_models = {
@@ -106,7 +114,9 @@ def translate_text(text, source_lang="pt", target_lang="en"):
106
  def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_option, lora_scale_1, lora_scale_2, cross_attention_scale, auto_translate, progress=gr.Progress(track_tqdm=True)):
107
  if randomize_seed:
108
  seed = random.randint(0, MAX_SEED)
109
- generator = torch.Generator(device="cuda").manual_seed(seed)
 
 
110
 
111
  original_prompt = prompt # Guarda o prompt original para metadados
112
 
@@ -150,8 +160,20 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
150
  if lora_option == "Ambos" and cross_attention_scale != 1.0:
151
  cross_attention_kwargs = {"scale": cross_attention_scale}
152
 
153
- # Gera a imagem com precisão de 16 bits
154
- with torch.autocast("cuda"):
 
 
 
 
 
 
 
 
 
 
 
 
155
  image = pipe(
156
  prompt=prompt,
157
  num_inference_steps=steps,
@@ -206,7 +228,19 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
206
  # Interface Gradio
207
  gr_theme = os.getenv("THEME")
208
  with gr.Blocks(theme=gr_theme) as app:
209
- gr.Markdown("# Paula & Vivi Image Generator")
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  with gr.Row():
212
  with gr.Column(scale=2):
@@ -215,9 +249,14 @@ with gr.Blocks(theme=gr_theme) as app:
215
 
216
  with gr.Accordion("Configurações Básicas", open=True):
217
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
218
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=32)
219
  width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=768)
220
- height = gr.Slider(label="Height", minimum=256, maximum=1024, step=64, value=1024)
 
 
 
 
 
221
  randomize_seed = gr.Checkbox(False, label="Randomize seed")
222
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=556215326)
223
 
 
23
 
24
  # Inicializa o modelo base FLUX.1-dev
25
  base_model = "black-forest-labs/FLUX.1-dev"
26
+
27
+ # Verifica se CUDA está disponível
28
+ device = "cuda" if torch.cuda.is_available() else "cpu"
29
+ dtype = torch.float16 if device == "cuda" else torch.float32
30
+
31
+ print(f"✅ Dispositivo detectado: {device}")
32
+ print(f"✅ Usando precisão: {dtype}")
33
+
34
  pipe = DiffusionPipeline.from_pretrained(
35
  base_model,
36
+ torch_dtype=dtype,
37
  use_safetensors=True
38
  )
39
 
40
+ # Move o modelo para o dispositivo disponível
41
+ pipe.to(device)
42
 
43
  # Definição dos LoRA e Trigger Words
44
  lora_models = {
 
114
  def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_option, lora_scale_1, lora_scale_2, cross_attention_scale, auto_translate, progress=gr.Progress(track_tqdm=True)):
115
  if randomize_seed:
116
  seed = random.randint(0, MAX_SEED)
117
+
118
+ # Cria o gerador no dispositivo correto (cuda ou cpu)
119
+ generator = torch.Generator(device=device).manual_seed(seed)
120
 
121
  original_prompt = prompt # Guarda o prompt original para metadados
122
 
 
160
  if lora_option == "Ambos" and cross_attention_scale != 1.0:
161
  cross_attention_kwargs = {"scale": cross_attention_scale}
162
 
163
+ # Gera a imagem com o dispositivo e precisão adequados
164
+ if device == "cuda":
165
+ with torch.autocast("cuda"):
166
+ image = pipe(
167
+ prompt=prompt,
168
+ num_inference_steps=steps,
169
+ guidance_scale=cfg_scale,
170
+ width=width,
171
+ height=height,
172
+ generator=generator,
173
+ cross_attention_kwargs=cross_attention_kwargs
174
+ ).images[0]
175
+ else:
176
+ # Em CPU não é necessário usar autocast
177
  image = pipe(
178
  prompt=prompt,
179
  num_inference_steps=steps,
 
228
  # Interface Gradio
229
  gr_theme = os.getenv("THEME")
230
  with gr.Blocks(theme=gr_theme) as app:
231
+ device_info = "GPU" if torch.cuda.is_available() else "CPU"
232
+ gr.Markdown(f"# Paula & Vivi Image Generator (Rodando em {device_info})")
233
+
234
+ if device == "cpu":
235
+ gr.Markdown("""
236
+ ⚠️ **Aviso: Executando em CPU**
237
+
238
+ Este aplicativo está rodando em CPU, o que significa que a geração de imagens será significativamente mais lenta.
239
+ Considere usar um ambiente com GPU para melhor performance.
240
+ """)
241
+ else:
242
+ gpu_info = torch.cuda.get_device_name(0)
243
+ gr.Markdown(f"✅ **GPU Detectada**: {gpu_info}")
244
 
245
  with gr.Row():
246
  with gr.Column(scale=2):
 
249
 
250
  with gr.Accordion("Configurações Básicas", open=True):
251
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
252
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=32 if device == "cuda" else 20)
253
  width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=768)
254
+ height = gr.Slider(label="Height", minimum=256, maximum=1024, step=64, value=1024)
255
+
256
+ # Definir valores padrão menores se estiver em CPU
257
+ if device == "cpu":
258
+ width.value = 512
259
+ height.value = 512
260
  randomize_seed = gr.Checkbox(False, label="Randomize seed")
261
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=556215326)
262