Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import spaces # Pour Zero GPU gratuit
|
| 2 |
import torch
|
| 3 |
import gradio as gr
|
| 4 |
from diffusers import FluxPipeline
|
|
@@ -24,20 +23,29 @@ def load_lora(repo_id, style):
|
|
| 24 |
except Exception as e:
|
| 25 |
return f"❌ Erreur LoRA : {str(e)}"
|
| 26 |
|
| 27 |
-
@spaces.GPU(duration=120) # Allocation GPU pour 120s
|
| 28 |
def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8):
|
| 29 |
try:
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Chargement LoRA si disponible
|
| 35 |
if lora_repo and lora_path:
|
|
|
|
| 36 |
pipe.load_lora_weights(lora_path)
|
| 37 |
pipe.fuse_lora(lora_scale=lora_scale)
|
| 38 |
|
| 39 |
-
generator = torch.Generator(
|
| 40 |
|
|
|
|
| 41 |
image = pipe(
|
| 42 |
prompt,
|
| 43 |
negative_prompt=negative_prompt,
|
|
@@ -48,12 +56,16 @@ def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1,
|
|
| 48 |
generator=generator
|
| 49 |
).images[0]
|
| 50 |
|
|
|
|
|
|
|
| 51 |
# Nettoyage VRAM
|
| 52 |
del pipe
|
| 53 |
-
|
|
|
|
|
|
|
| 54 |
return image
|
| 55 |
except Exception as e:
|
| 56 |
-
print(f"Erreur génération: {e}")
|
| 57 |
import traceback
|
| 58 |
traceback.print_exc()
|
| 59 |
return None
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import gradio as gr
|
| 3 |
from diffusers import FluxPipeline
|
|
|
|
| 23 |
except Exception as e:
|
| 24 |
return f"❌ Erreur LoRA : {str(e)}"
|
| 25 |
|
|
|
|
| 26 |
def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8):
|
| 27 |
try:
|
| 28 |
+
print(f"🚀 Début génération: {prompt[:50]}...")
|
| 29 |
+
|
| 30 |
+
# Détection automatique du device
|
| 31 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 32 |
+
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
| 33 |
+
|
| 34 |
+
print(f"📱 Device: {device}, dtype: {dtype}")
|
| 35 |
+
|
| 36 |
+
# Chargement du pipe
|
| 37 |
+
pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=dtype)
|
| 38 |
+
pipe.to(device)
|
| 39 |
|
| 40 |
# Chargement LoRA si disponible
|
| 41 |
if lora_repo and lora_path:
|
| 42 |
+
print(f"🎨 Application LoRA: {lora_path}")
|
| 43 |
pipe.load_lora_weights(lora_path)
|
| 44 |
pipe.fuse_lora(lora_scale=lora_scale)
|
| 45 |
|
| 46 |
+
generator = torch.Generator(device).manual_seed(seed if seed != -1 else random.randint(0, 2**32))
|
| 47 |
|
| 48 |
+
print(f"⚙️ Génération {width}x{height}, {steps} steps...")
|
| 49 |
image = pipe(
|
| 50 |
prompt,
|
| 51 |
negative_prompt=negative_prompt,
|
|
|
|
| 56 |
generator=generator
|
| 57 |
).images[0]
|
| 58 |
|
| 59 |
+
print("✅ Image générée avec succès!")
|
| 60 |
+
|
| 61 |
# Nettoyage VRAM
|
| 62 |
del pipe
|
| 63 |
+
if device == "cuda":
|
| 64 |
+
torch.cuda.empty_cache()
|
| 65 |
+
|
| 66 |
return image
|
| 67 |
except Exception as e:
|
| 68 |
+
print(f"❌ Erreur génération: {e}")
|
| 69 |
import traceback
|
| 70 |
traceback.print_exc()
|
| 71 |
return None
|