Leteint commited on
Commit
c8bec5c
·
verified ·
1 Parent(s): a78d192

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from diffusers import FluxPipeline
4
+ from huggingface_hub import hf_hub_download
5
+ import os
6
+ import random
7
+
8
+ # Chargement du modèle Flux.1-schnell (Apache 2.0, NSFW-friendly)
9
+ model_id = "black-forest-labs/FLUX.1-schnell"
10
+ pipe = FluxPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
11
+ pipe.enable_model_cpu_offload()
12
+ pipe.enable_sequential_cpu_offload() # Optimise VRAM pour Space gratuit
13
+
14
+ # Variables globales pour LoRA
15
+ lora_repo = None
16
+ lora_strength = 0.8
17
+
18
+ def load_lora(repo_id):
19
+ global pipe, lora_repo
20
+ try:
21
+ lora_path = hf_hub_download(repo_id=repo_id, filename="flux-lora.safetensors")
22
+ pipe.load_lora_weights(lora_path)
23
+ pipe.fuse_lora()
24
+ lora_repo = repo_id
25
+ return f"LoRA chargé : {repo_id}"
26
+ except Exception as e:
27
+ return f"Erreur LoRA : {str(e)}"
28
+
29
+ def generate(prompt, negative_prompt, width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8):
30
+ global pipe, lora_repo, lora_strength
31
+ pipe.set_lora_scale(0.0) # Reset LoRA
32
+ if lora_repo:
33
+ pipe.set_lora_scale(lora_scale)
34
+
35
+ generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed if seed != -1 else random.randint(0, 2**32))
36
+
37
+ # Params optimisés pour NSFW : faible guidance (Flux flow-based), steps rapides
38
+ image = pipe(
39
+ prompt,
40
+ height=height, width=width,
41
+ num_inference_steps=steps,
42
+ guidance_scale=0.0, # Pas de CFG pour Flux naturel
43
+ max_sequence_length=512,
44
+ generator=generator,
45
+ negative_prompt=negative_prompt
46
+ ).images[0]
47
+ return image
48
+
49
+ # Interface Gradio favorisant NSFW
50
+ with gr.Blocks(title="Flux Schnell NSFW LoRA Space", theme=gr.themes.Dark()) as demo:
51
+ gr.Markdown("# Flux.1 Schnell + LoRA NSFW\nGénérateur rapide (4 steps) pour images explicites. Ajoutez LoRA NSFW de HF/Civitai.")
52
+
53
+ with gr.Row():
54
+ with gr.Column(scale=1):
55
+ lora_input = gr.Textbox(label="Repo HF LoRA (ex: XLabs-AI/flux-lora-collection ou NSFW)", value="")
56
+ load_btn = gr.Button("Charger LoRA")
57
+ status = gr.Textbox(label="Status LoRA")
58
+
59
+ with gr.Column(scale=4):
60
+ prompt = gr.Textbox(label="Prompt NSFW", placeholder="Fille nue réaliste, pose sexy, éclairage studio, haut détail, 8k", lines=3, value="beautiful naked woman, detailed anatomy, erotic pose, realistic skin, soft lighting")
61
+ neg_prompt = gr.Textbox(label="Negative", value="blurry, deformed, ugly, lowres, text, watermark")
62
+
63
+ with gr.Row():
64
+ steps = gr.Slider(1, 20, value=4, label="Steps (4=rapide)")
65
+ width = gr.Slider(512, 2048, value=1024, step=128, label="Largeur")
66
+ height = gr.Slider(512, 2048, value=1024, step=128, label="Hauteur")
67
+ lora_scale_slider = gr.Slider(0, 2, value=0.8, label="Force LoRA")
68
+ seed = gr.Number(value=-1, label="Seed (-1=random)")
69
+
70
+ generate_btn = gr.Button("Générer Image NSFW", variant="primary")
71
+ output = gr.Image(label="Résultat")
72
+
73
+ load_btn.click(load_lora, inputs=lora_input, outputs=status)
74
+ generate_btn.click(generate, inputs=[prompt, neg_prompt, width, height, steps, seed, lora_scale_slider], outputs=output)
75
+
76
+ if __name__ == "__main__":
77
+ demo.launch(server_name="0.0.0.0", server_port=7860)