Update app.py
Browse files
app.py
CHANGED
|
@@ -1,97 +1,97 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
import subprocess
|
| 4 |
-
|
| 5 |
-
# Install deps au démarrage (ignore erreurs si déjà installé)
|
| 6 |
-
def install_deps():
|
| 7 |
-
deps = [
|
| 8 |
-
"torch==2.5.1",
|
| 9 |
-
"torchaudio==2.5.1",
|
| 10 |
-
"torchvision==0.20.1",
|
| 11 |
-
"diffusers==0.32.2",
|
| 12 |
-
"transformers==4.50.3",
|
| 13 |
-
"accelerate==1.1.1",
|
| 14 |
-
"huggingface-hub==0.26.1",
|
| 15 |
-
"safetensors==0.4.6",
|
| 16 |
-
"pillow==10.4.0",
|
| 17 |
-
"peft==0.13.2"
|
| 18 |
-
]
|
| 19 |
-
for dep in deps:
|
| 20 |
-
subprocess.check_call([sys.executable, "-m", "pip", "install", "--quiet", dep])
|
| 21 |
-
|
| 22 |
-
install_deps() # Lance une fois
|
| 23 |
-
|
| 24 |
import torch
|
| 25 |
import gradio as gr
|
| 26 |
from diffusers import FluxPipeline
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
model_id = "black-forest-labs/FLUX.1-schnell"
|
| 30 |
-
pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
| 31 |
-
pipe.enable_model_cpu_offload()
|
| 32 |
-
pipe.enable_sequential_cpu_offload() # Optimise VRAM pour Space gratuit
|
| 33 |
|
| 34 |
# Variables globales pour LoRA
|
| 35 |
lora_repo = None
|
| 36 |
-
|
| 37 |
|
| 38 |
def load_lora(repo_id):
|
| 39 |
-
global
|
| 40 |
try:
|
| 41 |
lora_path = hf_hub_download(repo_id=repo_id, filename="flux-lora.safetensors")
|
| 42 |
-
pipe.load_lora_weights(lora_path)
|
| 43 |
-
pipe.fuse_lora()
|
| 44 |
lora_repo = repo_id
|
| 45 |
-
return f"LoRA chargé : {repo_id}"
|
| 46 |
except Exception as e:
|
| 47 |
-
return f"Erreur LoRA : {str(e)}"
|
| 48 |
|
| 49 |
-
@spaces.GPU(duration=120)
|
| 50 |
def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8):
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
pipe.
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
# Interface Gradio
|
| 70 |
-
with gr.Blocks(title="Flux Schnell
|
| 71 |
-
gr.Markdown("# Flux.1 Schnell + LoRA
|
| 72 |
|
| 73 |
with gr.Row():
|
| 74 |
with gr.Column(scale=1):
|
| 75 |
-
lora_input = gr.Textbox(
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
with gr.Column(scale=4):
|
| 80 |
-
prompt = gr.Textbox(
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
with gr.Row():
|
| 84 |
-
steps = gr.Slider(1, 20, value=4, label="Steps
|
| 85 |
width = gr.Slider(512, 2048, value=1024, step=128, label="Largeur")
|
| 86 |
height = gr.Slider(512, 2048, value=1024, step=128, label="Hauteur")
|
| 87 |
-
lora_scale_slider = gr.Slider(0, 2, value=0.8, label="Force LoRA")
|
| 88 |
-
seed = gr.Number(value=-1, label="Seed (-1=random)")
|
| 89 |
|
| 90 |
-
generate_btn = gr.Button("Générer Image
|
| 91 |
-
output = gr.Image(label="Résultat")
|
| 92 |
|
| 93 |
load_btn.click(load_lora, inputs=lora_input, outputs=status)
|
| 94 |
-
generate_btn.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
if __name__ == "__main__":
|
| 97 |
-
demo.launch(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import gradio as gr
|
| 3 |
from diffusers import FluxPipeline
|
| 4 |
+
from huggingface_hub import hf_hub_download
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
# Chargement du modèle Flux.1-schnell
|
| 8 |
model_id = "black-forest-labs/FLUX.1-schnell"
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# Variables globales pour LoRA
|
| 11 |
lora_repo = None
|
| 12 |
+
lora_path = None
|
| 13 |
|
| 14 |
def load_lora(repo_id):
|
| 15 |
+
global lora_repo, lora_path
|
| 16 |
try:
|
| 17 |
lora_path = hf_hub_download(repo_id=repo_id, filename="flux-lora.safetensors")
|
|
|
|
|
|
|
| 18 |
lora_repo = repo_id
|
| 19 |
+
return f"✅ LoRA chargé : {repo_id}"
|
| 20 |
except Exception as e:
|
| 21 |
+
return f"❌ Erreur LoRA : {str(e)}"
|
| 22 |
|
|
|
|
| 23 |
def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8):
|
| 24 |
+
try:
|
| 25 |
+
# Chargement du pipe
|
| 26 |
+
pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
| 27 |
+
pipe.enable_model_cpu_offload()
|
| 28 |
+
|
| 29 |
+
# Chargement LoRA si disponible
|
| 30 |
+
if lora_repo and lora_path:
|
| 31 |
+
pipe.load_lora_weights(lora_path)
|
| 32 |
+
pipe.fuse_lora(lora_scale=lora_scale)
|
| 33 |
+
|
| 34 |
+
generator = torch.Generator("cuda").manual_seed(seed if seed != -1 else random.randint(0, 2**32))
|
| 35 |
+
|
| 36 |
+
image = pipe(
|
| 37 |
+
prompt,
|
| 38 |
+
negative_prompt=negative_prompt,
|
| 39 |
+
height=height,
|
| 40 |
+
width=width,
|
| 41 |
+
num_inference_steps=steps,
|
| 42 |
+
guidance_scale=0.0,
|
| 43 |
+
generator=generator
|
| 44 |
+
).images[0]
|
| 45 |
+
|
| 46 |
+
# Nettoyage VRAM
|
| 47 |
+
del pipe
|
| 48 |
+
torch.cuda.empty_cache()
|
| 49 |
+
return image
|
| 50 |
+
except Exception as e:
|
| 51 |
+
return None
|
| 52 |
|
| 53 |
+
# Interface Gradio
|
| 54 |
+
with gr.Blocks(title="Flux Schnell + LoRA", theme=gr.themes.Soft()) as demo:
|
| 55 |
+
gr.Markdown("# 🎨 Flux.1 Schnell + LoRA\nGénérateur rapide (4 steps) avec support LoRA personnalisé")
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
with gr.Column(scale=1):
|
| 59 |
+
lora_input = gr.Textbox(
|
| 60 |
+
label="Repo HuggingFace LoRA",
|
| 61 |
+
placeholder="ex: XLabs-AI/flux-lora-collection",
|
| 62 |
+
value=""
|
| 63 |
+
)
|
| 64 |
+
load_btn = gr.Button("Charger LoRA", variant="secondary")
|
| 65 |
+
status = gr.Textbox(label="Status", interactive=False)
|
| 66 |
|
| 67 |
with gr.Column(scale=4):
|
| 68 |
+
prompt = gr.Textbox(
|
| 69 |
+
label="Prompt",
|
| 70 |
+
placeholder="une belle image artistique, haute qualité, détaillée",
|
| 71 |
+
lines=3,
|
| 72 |
+
value="beautiful artistic portrait, high quality, detailed"
|
| 73 |
+
)
|
| 74 |
+
neg_prompt = gr.Textbox(
|
| 75 |
+
label="Prompt négatif",
|
| 76 |
+
value="blurry, deformed, ugly, lowres, text, watermark"
|
| 77 |
+
)
|
| 78 |
|
| 79 |
with gr.Row():
|
| 80 |
+
steps = gr.Slider(1, 20, value=4, label="Steps", step=1)
|
| 81 |
width = gr.Slider(512, 2048, value=1024, step=128, label="Largeur")
|
| 82 |
height = gr.Slider(512, 2048, value=1024, step=128, label="Hauteur")
|
| 83 |
+
lora_scale_slider = gr.Slider(0, 2, value=0.8, step=0.1, label="Force LoRA")
|
| 84 |
+
seed = gr.Number(value=-1, label="Seed (-1=random)", precision=0)
|
| 85 |
|
| 86 |
+
generate_btn = gr.Button("🚀 Générer Image", variant="primary", size="lg")
|
| 87 |
+
output = gr.Image(label="Résultat", type="pil")
|
| 88 |
|
| 89 |
load_btn.click(load_lora, inputs=lora_input, outputs=status)
|
| 90 |
+
generate_btn.click(
|
| 91 |
+
generate,
|
| 92 |
+
inputs=[prompt, neg_prompt, width, height, steps, seed, lora_scale_slider],
|
| 93 |
+
outputs=output
|
| 94 |
+
)
|
| 95 |
|
| 96 |
if __name__ == "__main__":
|
| 97 |
+
demo.launch()
|