Nad54 commited on
Commit
3aded6c
·
verified ·
1 Parent(s): 41a18bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -9
app.py CHANGED
@@ -9,7 +9,7 @@ from PIL import Image
9
  import gradio as gr
10
 
11
  from huggingface_hub import hf_hub_download
12
- from transformers import AutoModelForImageSegmentation
13
  from torchvision import transforms
14
  from pipeline import InstantCharacterFluxPipeline
15
 
@@ -33,6 +33,31 @@ ghibli_style_lora_path = hf_hub_download("InstantX/FLUX.1-dev-LoRA-Ghibli", "ghi
33
  onepiece_style_lora_path = os.path.join(os.path.dirname(__file__), "onepiece_flux_v2.safetensors")
34
  ONEPIECE_TRIGGER = "onepiece style"
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  # --------------------------------------------
37
  # Init pipeline
38
  # --------------------------------------------
@@ -82,6 +107,22 @@ def remove_bkg(subject_image):
82
  cropped = pad_to_square(obj, 255)
83
  return Image.fromarray(cropped.astype(np.uint8))
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  # --------------------------------------------
86
  # Generation logic
87
  # --------------------------------------------
@@ -104,7 +145,7 @@ def create_image(input_image, prompt, scale, guidance_scale, num_inference_steps
104
  generator = torch.manual_seed(seed)
105
  common_args = dict(
106
  prompt=prompt,
107
- negative_prompt=negative_prompt, # <-- ajouté
108
  num_inference_steps=num_inference_steps,
109
  guidance_scale=guidance_scale,
110
  width=1024, height=1024,
@@ -122,14 +163,20 @@ def create_image(input_image, prompt, scale, guidance_scale, num_inference_steps
122
  # --------------------------------------------
123
  # UI definition (Gradio 5)
124
  # --------------------------------------------
125
- def generate_fn(image, prompt, scale, style, guidance, steps, seed, randomize, negative_prompt):
 
 
 
 
 
 
126
  seed = randomize_seed(seed, randomize)
127
  return create_image(image, prompt, scale, guidance, steps, seed, style, negative_prompt)
128
 
129
  title = "🎨 InstantCharacter + One Piece LoRA"
130
  description = (
131
- "Upload your photo, describe your scene, choose **One Piece style** "
132
- "and generate yourself as an anime character!"
133
  )
134
 
135
  demo = gr.Interface(
@@ -144,15 +191,16 @@ demo = gr.Interface(
144
  gr.Slider(5, 50, value=28, step=1, label="Inference Steps"),
145
  gr.Slider(-1000000, 1000000, value=123456, step=1, label="Seed"),
146
  gr.Checkbox(value=True, label="Randomize Seed"),
147
- gr.Textbox(label="Negative Prompt", placeholder="e.g. photorealistic, realistic skin, pores, hdr") # <-- ajouté
 
148
  ],
149
  outputs=gr.Gallery(label="Generated Image"),
150
  title=title,
151
  description=description,
152
  examples=[
153
- ["./assets/girl.jpg", f"A girl playing guitar, {ONEPIECE_TRIGGER}", 0.9, "One Piece style", 3.5, 28, 123, False, ""],
154
- ["./assets/boy.jpg", f"A boy riding a bike, {ONEPIECE_TRIGGER}", 0.9, "One Piece style", 3.5, 28, 123, False, ""]
155
- ]
156
  )
157
 
158
  demo.launch()
 
9
  import gradio as gr
10
 
11
  from huggingface_hub import hf_hub_download
12
+ from transformers import AutoModelForImageSegmentation, CLIPProcessor, CLIPModel
13
  from torchvision import transforms
14
  from pipeline import InstantCharacterFluxPipeline
15
 
 
33
  onepiece_style_lora_path = os.path.join(os.path.dirname(__file__), "onepiece_flux_v2.safetensors")
34
  ONEPIECE_TRIGGER = "onepiece style"
35
 
36
+ # ---------- Gender-aware prompt templates ----------
37
+ MALE_PROMPT = (
38
+ "Upper-body anime portrait of a female pirate character inspired by One Piece, "
39
+ "confident and charismatic expression, elegant yet powerful pose, playful or determined smirk, "
40
+ "expressive eyes with anime-style lighting, slightly windswept hair, keeping the distinctive facial features and hairstyle of the person, "
41
+ "detailed anime rendering of their face, natural matte skin tone, lips matching the skin color (no pink or gloss), "
42
+ "wearing optional stylish pirate clothing inspired by One Piece (open shirt, corset, belts, jackets, jewelry, or scarves), "
43
+ "with optional pirate accessories such as earrings, necklace, or hat, only if it fits the character’s style, "
44
+ "well-framed composition showing the full head and shoulders clearly, centered and balanced, cinematic warm lighting, "
45
+ "high-quality cel-shaded coloring and detailed linework, One Piece-style background (ship deck or ocean sky), "
46
+ "designed to look beautiful, elegant and iconic like a real One Piece heroine character, no frame, no text."
47
+ )
48
+
49
+ FEMALE_PROMPT = (
50
+ "Upper-body anime portrait of a female pirate character inspired by One Piece, "
51
+ "confident and charismatic expression, elegant yet powerful pose, playful or determined smirk, "
52
+ "expressive eyes with anime-style lighting, slightly windswept hair, keeping the distinctive facial features and hairstyle of the person, "
53
+ "detailed anime rendering of their face, natural matte skin tone, lips matching the skin color (no pink or gloss), "
54
+ "wearing optional stylish pirate clothing inspired by One Piece (open shirt, corset, belts, jackets, jewelry, or scarves), "
55
+ "with optional pirate accessories such as earrings, necklace, or hat, only if it fits the character’s style, "
56
+ "well-framed composition showing the full head and shoulders clearly, centered and balanced, cinematic warm lighting, "
57
+ "high-quality cel-shaded coloring and detailed linework, One Piece-style background (ship deck or ocean sky), "
58
+ "designed to look beautiful, elegant and iconic like a real One Piece heroine character, no frame, no text."
59
+ )
60
+
61
  # --------------------------------------------
62
  # Init pipeline
63
  # --------------------------------------------
 
107
  cropped = pad_to_square(obj, 255)
108
  return Image.fromarray(cropped.astype(np.uint8))
109
 
110
+ # --------------------------------------------
111
+ # Simple gender detector (CLIP zero-shot)
112
+ # --------------------------------------------
113
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
114
+ clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
115
+ clip_model.eval()
116
+
117
+ @torch.no_grad()
118
+ def detect_gender(img_pil: Image.Image) -> str:
119
+ texts = ["a portrait photo of a man", "a portrait photo of a woman"]
120
+ inputs = clip_processor(text=texts, images=img_pil.convert("RGB"), return_tensors="pt", padding=True).to(device)
121
+ outputs = clip_model(**inputs)
122
+ logits_per_image = outputs.logits_per_image.squeeze(0) # (2,)
123
+ idx = int(torch.argmax(logits_per_image).item())
124
+ return "male" if idx == 0 else "female"
125
+
126
  # --------------------------------------------
127
  # Generation logic
128
  # --------------------------------------------
 
145
  generator = torch.manual_seed(seed)
146
  common_args = dict(
147
  prompt=prompt,
148
+ negative_prompt=negative_prompt,
149
  num_inference_steps=num_inference_steps,
150
  guidance_scale=guidance_scale,
151
  width=1024, height=1024,
 
163
  # --------------------------------------------
164
  # UI definition (Gradio 5)
165
  # --------------------------------------------
166
+ def generate_fn(image, prompt, scale, style, guidance, steps, seed, randomize, negative_prompt, auto_prompt):
167
+ # si auto_prompt est activé, on remplace le prompt par le template choisi via CLIP
168
+ if auto_prompt and image is not None:
169
+ gender = detect_gender(image)
170
+ chosen = MALE_PROMPT if gender == "male" else FEMALE_PROMPT
171
+ # on ajoute le trigger One Piece pour renforcer le style
172
+ prompt = f"{chosen}, {ONEPIECE_TRIGGER}"
173
  seed = randomize_seed(seed, randomize)
174
  return create_image(image, prompt, scale, guidance, steps, seed, style, negative_prompt)
175
 
176
  title = "🎨 InstantCharacter + One Piece LoRA"
177
  description = (
178
+ "Upload your photo, describe your scene, or tick **Auto One Piece Prompt** to auto-pick a gender-aware template. "
179
+ "Choose **One Piece style** to apply the LoRA."
180
  )
181
 
182
  demo = gr.Interface(
 
191
  gr.Slider(5, 50, value=28, step=1, label="Inference Steps"),
192
  gr.Slider(-1000000, 1000000, value=123456, step=1, label="Seed"),
193
  gr.Checkbox(value=True, label="Randomize Seed"),
194
+ gr.Textbox(label="Negative Prompt", placeholder="e.g. photorealistic, realistic skin, pores, hdr"),
195
+ gr.Checkbox(value=True, label="Auto One Piece Prompt (gender-aware)"), # <--- nouvel interrupteur
196
  ],
197
  outputs=gr.Gallery(label="Generated Image"),
198
  title=title,
199
  description=description,
200
  examples=[
201
+ ["./assets/girl.jpg", f"A girl playing guitar, {ONEPIECE_TRIGGER}", 0.9, "One Piece style", 3.5, 28, 123, False, "", True],
202
+ ["./assets/boy.jpg", f"A boy riding a bike, {ONEPIECE_TRIGGER}", 0.9, "One Piece style", 3.5, 28, 123, False, "", True],
203
+ ],
204
  )
205
 
206
  demo.launch()