Opera8 commited on
Commit
137477d
·
verified ·
1 Parent(s): 4d205e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +600 -215
app.py CHANGED
@@ -4,88 +4,57 @@ import numpy as np
4
  import spaces
5
  import torch
6
  import random
7
- from PIL import Image
8
  from typing import Iterable
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
 
 
11
 
12
- colors.orange_red = colors.Color(
13
- name="orange_red",
14
- c50="#FFF0E5",
15
- c100="#FFE0CC",
16
- c200="#FFC299",
17
- c300="#FFA366",
18
- c400="#FF8533",
19
- c500="#FF4500",
20
- c600="#E63E00",
21
- c700="#CC3700",
22
- c800="#B33000",
23
- c900="#992900",
24
- c950="#802200",
 
25
  )
26
 
27
- class OrangeRedTheme(Soft):
28
- def __init__(
29
- self,
30
- *,
31
- primary_hue: colors.Color | str = colors.gray,
32
- secondary_hue: colors.Color | str = colors.orange_red,
33
- neutral_hue: colors.Color | str = colors.slate,
34
- text_size: sizes.Size | str = sizes.text_lg,
35
- font: fonts.Font | str | Iterable[fonts.Font | str] = (
36
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
37
- ),
38
- font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
39
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
40
- ),
41
- ):
42
- super().__init__(
43
- primary_hue=primary_hue,
44
- secondary_hue=secondary_hue,
45
- neutral_hue=neutral_hue,
46
- text_size=text_size,
47
- font=font,
48
- font_mono=font_mono,
49
- )
50
- super().set(
51
- background_fill_primary="*primary_50",
52
- background_fill_primary_dark="*primary_900",
53
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
54
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
55
- button_primary_text_color="white",
56
- button_primary_text_color_hover="white",
57
- button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
58
- button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
59
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
60
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
61
- button_secondary_text_color="black",
62
- button_secondary_text_color_hover="white",
63
- button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
64
- button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
65
- button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
66
- button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
67
- slider_color="*secondary_500",
68
- slider_color_dark="*secondary_600",
69
- block_title_text_weight="600",
70
- block_border_width="3px",
71
- block_shadow="*shadow_drop_lg",
72
- button_primary_shadow="*shadow_drop_lg",
73
- button_large_padding="11px",
74
- color_accent_soft="*primary_100",
75
- block_label_background_fill="*primary_200",
76
- )
77
 
78
- orange_red_theme = OrangeRedTheme()
 
 
79
 
80
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
81
- dtype = torch.bfloat16
 
 
 
 
 
 
 
 
 
82
 
 
83
  from diffusers import FlowMatchEulerDiscreteScheduler
84
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
85
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
86
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
87
 
88
- print("Loading Qwen Image Edit Pipeline...")
 
 
 
89
  pipe = QwenImageEditPlusPipeline.from_pretrained(
90
  "Qwen/Qwen-Image-Edit-2509",
91
  transformer=QwenImageTransformer2DModel.from_pretrained(
@@ -97,52 +66,74 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
97
  torch_dtype=dtype
98
  ).to(device)
99
 
100
- print("Loading and Fusing Lightning LoRA...")
101
- pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
102
- weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
103
- adapter_name="lightning")
104
- pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
105
-
106
- print("Loading Task Adapters...")
107
-
108
- pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
109
- weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
110
- adapter_name="texture")
111
 
112
- pipe.load_lora_weights("ostris/qwen_image_edit_inpainting",
113
- weight_name="qwen_image_edit_inpainting.safetensors",
114
- adapter_name="fusion")
115
 
116
- pipe.load_lora_weights("ostris/qwen_image_edit_2509_shirt_design",
117
- weight_name="qwen_image_edit_2509_shirt_design.safetensors",
118
- adapter_name="shirt_design")
 
 
 
 
 
 
 
119
 
120
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
121
- weight_name="溶图.safetensors",
122
- adapter_name="fusion-x")
 
 
 
 
123
 
124
- pipe.load_lora_weights("oumoumad/Qwen-Edit-2509-Material-transfer",
125
- weight_name="material-transfer_000004769.safetensors",
126
- adapter_name="material-transfer")
 
 
 
 
127
 
128
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Light-Migration",
129
- weight_name="参考色调.safetensors",
130
- adapter_name="light-migration")
 
 
 
131
 
132
- try:
133
- pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
134
- print("Flash Attention 3 Processor set successfully.")
135
- except Exception as e:
136
- print(f"Could not set FA3 processor (likely hardware mismatch): {e}. using default attention.")
 
137
 
138
- MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
 
 
139
 
140
  def update_dimensions_on_upload(image):
141
  if image is None:
142
  return 1024, 1024
143
-
144
  original_width, original_height = image.size
145
-
146
  if original_width > original_height:
147
  new_width = 1024
148
  aspect_ratio = original_height / original_width
@@ -151,166 +142,560 @@ def update_dimensions_on_upload(image):
151
  new_height = 1024
152
  aspect_ratio = original_width / original_height
153
  new_width = int(new_height * aspect_ratio)
154
-
155
- # Ensure dimensions are multiples of 16
156
- new_width = (new_width // 16) * 16
157
- new_height = (new_height // 16) * 16
158
-
159
  return new_width, new_height
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  @spaces.GPU(duration=30)
162
  def infer(
163
- image_1,
164
- image_2,
165
  prompt,
166
- lora_adapter,
167
  seed,
168
  randomize_seed,
169
  guidance_scale,
170
  steps,
 
 
 
171
  progress=gr.Progress(track_tqdm=True)
172
  ):
173
- if image_1 is None or image_2 is None:
174
- raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
175
-
176
- if not prompt:
177
- if lora_adapter == "Cloth-Design-Fuse":
178
- prompt = "Put this design on their shirt."
179
- elif lora_adapter == "Texture Edit":
180
- prompt = "Apply texture to object."
181
- elif lora_adapter == "Fuse-Objects":
182
- prompt = "Fuse object into background."
183
- elif lora_adapter == "Super-Fusion":
184
- prompt = "Blend the product into the background, correct its perspective and lighting, and make it naturally integrated with the scene."
185
- elif lora_adapter == "Material-Transfer":
186
- prompt = "change materials of image1 to match the reference in image2"
187
- elif lora_adapter == "Light-Migration":
188
- prompt = "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2."
189
-
190
- adapters_map = {
191
- "Texture Edit": "texture",
192
- "Fuse-Objects": "fusion",
193
- "Cloth-Design-Fuse": "shirt_design",
194
- "Super-Fusion": "fusion-x",
195
- "Material-Transfer": "material-transfer",
196
- "Light-Migration": "light-migration",
197
- }
198
-
199
- active_adapter = adapters_map.get(lora_adapter)
200
-
201
- if active_adapter:
202
- pipe.set_adapters([active_adapter], adapter_weights=[1.0])
203
- else:
204
- pipe.set_adapters([], adapter_weights=[])
205
 
 
 
 
 
 
 
 
 
206
  if randomize_seed:
207
  seed = random.randint(0, MAX_SEED)
208
 
209
  generator = torch.Generator(device=device).manual_seed(seed)
210
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
 
 
 
211
 
212
- img1_pil = image_1.convert("RGB")
213
- img2_pil = image_2.convert("RGB")
 
214
 
215
- width, height = update_dimensions_on_upload(img1_pil)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
- result = pipe(
218
- image=[img1_pil, img2_pil],
219
- prompt=prompt,
220
- negative_prompt=negative_prompt,
221
- height=height,
222
- width=width,
223
- num_inference_steps=steps,
224
- generator=generator,
225
- true_cfg_scale=guidance_scale,
226
- ).images[0]
227
 
228
- return result, seed
 
229
 
230
  @spaces.GPU(duration=30)
231
- def infer_example(image_1, image_2, prompt, lora_adapter):
232
- if image_1 is None or image_2 is None:
233
- return None, 0
234
- result, seed = infer(
235
- image_1.convert("RGB"),
236
- image_2.convert("RGB"),
237
- prompt,
238
- lora_adapter,
239
- 0,
240
- True,
241
- 1.0,
242
- 4
243
- )
244
- return result, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
- css="""
 
 
 
 
 
 
 
 
247
  #col-container {
248
  margin: 0 auto;
249
- max-width: 1100px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  }
251
- #main-title h1 {font-size: 2.1em !important;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  """
253
 
254
  with gr.Blocks() as demo:
 
 
 
255
  with gr.Column(elem_id="col-container"):
256
- gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast-Fusion**", elem_id="main-title")
257
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
258
- with gr.Row(equal_height=True):
259
-
260
- with gr.Column(scale=1):
261
- with gr.Row():
262
- image_1 = gr.Image(label="Base Image", type="pil", height=290)
263
- image_2 = gr.Image(label="Reference Image", type="pil", height=290)
264
 
 
 
 
 
265
  prompt = gr.Text(
266
- label="Edit Prompt",
267
  show_label=True,
268
- placeholder="e.g., Apply wood texture to the mug...",
 
 
269
  )
270
 
271
- run_button = gr.Button("Edit Image", variant="primary")
272
-
273
- with gr.Accordion("Advanced Settings", open=False, visible=False):
274
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
275
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
276
- guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
277
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
278
 
279
- with gr.Column(scale=1):
280
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350)
 
 
281
 
282
  with gr.Row():
283
  lora_adapter = gr.Dropdown(
284
- label="Choose Editing Style",
285
- choices=["Texture Edit", "Cloth-Design-Fuse", "Fuse-Objects", "Super-Fusion", "Light-Migration", "Material-Transfer"],
286
- value="Texture Edit",
287
- )
288
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  gr.Examples(
290
  examples=[
291
- ["examples/M1.jpg", "examples/M2.jpg", "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
292
- ["examples/Cloth2.jpg", "examples/Design2.png", "Put this design on their shirt.", "Cloth-Design-Fuse"],
293
- ["examples/Cup1.png", "examples/Wood1.png", "Apply wood texture to mug.", "Texture Edit"],
294
- ["examples/Cloth1.jpg", "examples/Design1.png", "Put this design on their shirt.", "Cloth-Design-Fuse"],
295
- ["examples/F3.jpg", "examples/F4.jpg", "Replace her glasses with the new glasses from image 1.", "Super-Fusion"],
296
- ["examples/Chair.jpg", "examples/Material.jpg", "Change materials of image1 to match the reference in image2.", "Material-Transfer"],
297
- ["examples/F1.jpg", "examples/F2.jpg", "Put the small bottle on the table.", "Super-Fusion"],
298
- ["examples/Mug1.jpg", "examples/Texture1.jpg", "Apply the design from image 2 to the mug.", "Texture Edit"],
299
- ["examples/Cat1.jpg", "examples/Glass1.webp", "A cat wearing glasses in image 2.", "Fuse-Objects"],
300
-
 
301
  ],
302
- inputs=[image_1, image_2, prompt, lora_adapter],
303
- outputs=[output_image, seed],
304
  fn=infer_example,
305
  cache_examples=False,
306
- label="Examples"
307
  )
308
 
309
  run_button.click(
310
  fn=infer,
311
- inputs=[image_1, image_2, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
312
- outputs=[output_image, seed]
 
313
  )
314
-
 
 
 
 
 
 
 
315
  if __name__ == "__main__":
316
- demo.queue(max_size=50).launch(css=css, theme=orange_red_theme, mcp_server=True, ssr_mode=False, show_error=True)
 
4
  import spaces
5
  import torch
6
  import random
7
+ from PIL import Image, ImageFilter
8
  from typing import Iterable
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
+ from deep_translator import GoogleTranslator
12
+ from transformers import pipeline
13
 
14
+ # --- تعریف تم ---
15
+ colors.steel_blue = colors.Color(
16
+ name="steel_blue",
17
+ c50="#EBF3F8",
18
+ c100="#D3E5F0",
19
+ c200="#A8CCE1",
20
+ c300="#7DB3D2",
21
+ c400="#529AC3",
22
+ c500="#4682B4",
23
+ c600="#3E72A0",
24
+ c700="#36638C",
25
+ c800="#2E5378",
26
+ c900="#264364",
27
+ c950="#1E3450",
28
  )
29
 
30
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # --- بارگذاری مدل تشخیص محتوای نامناسب (NSFW) ---
33
+ print("Loading Safety Checker...")
34
+ safety_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection", device=-1)
35
 
36
+ def is_image_nsfw(image):
37
+ if image is None: return False
38
+ try:
39
+ results = safety_classifier(image)
40
+ for result in results:
41
+ if result['label'] == 'nsfw' and result['score'] > 0.75:
42
+ return True
43
+ return False
44
+ except Exception as e:
45
+ print(f"Safety check error: {e}")
46
+ return False
47
 
48
+ # --- بارگذاری مدل اصلی ---
49
  from diffusers import FlowMatchEulerDiscreteScheduler
50
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
51
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
52
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
53
 
54
+ dtype = torch.bfloat16
55
+ device = "cuda" if torch.cuda.is_available() else "cpu"
56
+
57
+ print("Loading pipeline...")
58
  pipe = QwenImageEditPlusPipeline.from_pretrained(
59
  "Qwen/Qwen-Image-Edit-2509",
60
  transformer=QwenImageTransformer2DModel.from_pretrained(
 
66
  torch_dtype=dtype
67
  ).to(device)
68
 
69
+ # بارگذاری LoRA ها
70
+ pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="anime")
71
+ pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="multiple-angles")
72
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light-restoration")
73
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight")
74
+ pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting", weight_name="多角度灯光-251116.safetensors", adapter_name="multi-angle-lighting")
75
+ pipe.load_lora_weights("tlennon-ie/qwen-edit-skin", weight_name="qwen-edit-skin_1.1_000002750.safetensors", adapter_name="edit-skin")
76
+ pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene")
77
+ pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA", weight_name="qwen-edit-enhance_64-v3_000001000.safetensors", adapter_name="upscale-image")
 
 
78
 
79
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
80
+ MAX_SEED = np.iinfo(np.int32).max
 
81
 
82
+ LORA_MAPPING = {
83
+ "تبدیل عکس به انیمه": "anime",
84
+ "تغییر زاویه دید": "multiple-angles",
85
+ "اصلاح نور و سایه": "light-restoration",
86
+ "نورپردازی مجدد (Relight)": "relight",
87
+ "نورپردازی چند زاویه‌ای": "multi-angle-lighting",
88
+ "روتوش پوست": "edit-skin",
89
+ "صحنه بعدی (سینمایی)": "next-scene",
90
+ "افزایش کیفیت (Upscale)": "upscale-image"
91
+ }
92
 
93
+ ASPECT_RATIOS_LIST = [
94
+ "خودکار (پیش‌فرض)",
95
+ "۱:۱ (مربع - 1024x1024)",
96
+ "۱۶:۹ (افقی - 1344x768)",
97
+ "۹:۱۶ (عمودی - 768x1344)",
98
+ "شخصی‌سازی (Custom)"
99
+ ]
100
 
101
+ ASPECT_RATIOS_MAP = {
102
+ "خودکار (پیش‌فرض)": "Auto",
103
+ "۱:۱ (مربع - 1024x1024)": (1024, 1024),
104
+ "۱۶:۹ (افقی - 1344x768)": (1344, 768),
105
+ "۹:۱۶ (عمودی - 768x1344)": (768, 1344),
106
+ "شخصی‌سازی (Custom)": "Custom"
107
+ }
108
 
109
+ BANNED_WORDS = [
110
+ "nude", "naked", "sex", "porn", "undressed", "nsfw", "erotic", "xxx",
111
+ "breast", "nipple", "genital", "vagina", "penis", "ass", "butt", "sexual",
112
+ "lingerie", "bikini", "swimwear", "underwear", "fetish", "topless",
113
+ "exhibitionism", "hentai", "ecchi", "18+"
114
+ ]
115
 
116
+ def check_text_safety(text):
117
+ text_lower = text.lower()
118
+ for word in BANNED_WORDS:
119
+ if f" {word} " in f" {text_lower} ":
120
+ return False
121
+ return True
122
 
123
+ def translate_prompt(text):
124
+ if not text:
125
+ return ""
126
+ try:
127
+ translated = GoogleTranslator(source='auto', target='en').translate(text)
128
+ return translated
129
+ except Exception as e:
130
+ print(f"Translation Error: {e}")
131
+ return text
132
 
133
  def update_dimensions_on_upload(image):
134
  if image is None:
135
  return 1024, 1024
 
136
  original_width, original_height = image.size
 
137
  if original_width > original_height:
138
  new_width = 1024
139
  aspect_ratio = original_height / original_width
 
142
  new_height = 1024
143
  aspect_ratio = original_width / original_height
144
  new_width = int(new_height * aspect_ratio)
145
+ new_width = (new_width // 8) * 8
146
+ new_height = (new_height // 8) * 8
 
 
 
147
  return new_width, new_height
148
 
149
+ def get_error_html(message):
150
+ return f"""
151
+ <div style="background-color: #fee2e2; border: 1px solid #ef4444; color: #b91c1c; padding: 12px; border-radius: 8px; text-align: center; margin-bottom: 10px; font-weight: bold; display: flex; align-items: center; justify-content: center; gap: 8px;">
152
+ <span style="font-size: 1.2em;">⛔</span>
153
+ {message}
154
+ </div>
155
+ """
156
+
157
+ def get_success_html(message):
158
+ return f"""
159
+ <div style="background-color: #dcfce7; border: 1px solid #22c55e; color: #15803d; padding: 12px; border-radius: 8px; text-align: center; margin-bottom: 10px; font-weight: bold; display: flex; align-items: center; justify-content: center; gap: 8px;">
160
+ <span style="font-size: 1.2em;">✅</span>
161
+ {message}
162
+ </div>
163
+ """
164
+
165
  @spaces.GPU(duration=30)
166
  def infer(
167
+ input_image,
 
168
  prompt,
169
+ lora_adapter_persian,
170
  seed,
171
  randomize_seed,
172
  guidance_scale,
173
  steps,
174
+ aspect_ratio_selection,
175
+ custom_width,
176
+ custom_height,
177
  progress=gr.Progress(track_tqdm=True)
178
  ):
179
+ if input_image is None:
180
+ return None, seed, get_error_html("لطفاً ابتدا یک تصویر بارگذاری کنید.")
181
+
182
+ if is_image_nsfw(input_image):
183
+ return None, seed, get_error_html("تصویر ورودی دارای محتوای نامناسب است و پردازش نمی‌شود.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
+ english_prompt = translate_prompt(prompt)
186
+ if not check_text_safety(english_prompt):
187
+ return None, seed, get_error_html("متن درخواست شامل کلمات غیرمجاز یا غیراخلاقی است.")
188
+
189
+ adapter_internal_name = LORA_MAPPING.get(lora_adapter_persian)
190
+ if adapter_internal_name:
191
+ pipe.set_adapters([adapter_internal_name], adapter_weights=[1.0])
192
+
193
  if randomize_seed:
194
  seed = random.randint(0, MAX_SEED)
195
 
196
  generator = torch.Generator(device=device).manual_seed(seed)
197
+
198
+ safety_negative = "nsfw, nude, naked, porn, sexual, xxx, breast, nipple, genital, vagina, penis, ass, lingerie, bikini, swimwear, underwear, fetish, topless, gore, violence, blood"
199
+ base_negative = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
200
+ final_negative_prompt = f"{safety_negative}, {base_negative}"
201
 
202
+ original_image = input_image.convert("RGB")
203
+
204
+ selection_value = ASPECT_RATIOS_MAP.get(aspect_ratio_selection)
205
 
206
+ if selection_value == "Custom":
207
+ width = (int(custom_width) // 8) * 8
208
+ height = (int(custom_height) // 8) * 8
209
+ elif selection_value == "Auto" or selection_value is None:
210
+ width, height = update_dimensions_on_upload(original_image)
211
+ else:
212
+ width, height = selection_value
213
+
214
+ try:
215
+ result = pipe(
216
+ image=original_image,
217
+ prompt=english_prompt,
218
+ negative_prompt=final_negative_prompt,
219
+ height=height,
220
+ width=width,
221
+ num_inference_steps=steps,
222
+ generator=generator,
223
+ true_cfg_scale=guidance_scale,
224
+ ).images[0]
225
 
226
+ if is_image_nsfw(result):
227
+ return None, seed, get_error_html("تصویر تولید شده حاوی محتوای نامناسب بود و حذف شد.")
228
+
229
+ return result, seed, get_success_html("تصویر با موفقیت ویرایش شد.")
 
 
 
 
 
 
230
 
231
+ except Exception as e:
232
+ return None, seed, get_error_html(f"خطا در پردازش: {str(e)}")
233
 
234
  @spaces.GPU(duration=30)
235
+ def infer_example(input_image, prompt, lora_adapter):
236
+ res, s, status = infer(input_image, prompt, lora_adapter, 0, True, 1.0, 4, "خودکار (پیش‌فرض)", 1024, 1024)
237
+ return res, s, status
238
+
239
+ # --- جاوااسکریپت (اصلاح شده و قدرتمند) ---
240
+ js_code = """
241
+ <script>
242
+ // تابع فورس کردن تم روشن
243
+ function forceLightMode() {
244
+ const body = document.querySelector('body');
245
+ if (body) {
246
+ body.classList.remove('dark');
247
+ body.style.backgroundColor = '#f5f7fa';
248
+ body.style.color = '#333333';
249
+ }
250
+ document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
251
+ }
252
+
253
+ // تابع بستن مودال
254
+ function closeQuotaModal() {
255
+ const modal = document.getElementById('custom-quota-modal');
256
+ if (modal) modal.style.display = 'none';
257
+
258
+ // حذف پیام‌های خطا از صفحه تا دوباره نمایش داده نشوند
259
+ document.querySelectorAll('.toast-wrap').forEach(el => {
260
+ if (el.innerText.includes('GPU quota')) el.remove();
261
+ });
262
+ }
263
+
264
+ // آبزرور قدرتمند برای شکار و جایگزینی خطا
265
+ const observer = new MutationObserver((mutations) => {
266
+ let quotaErrorFound = false;
267
+
268
+ // جستجو در تمام المان‌های اضافه شده
269
+ mutations.forEach((mutation) => {
270
+ mutation.addedNodes.forEach((node) => {
271
+ if (node.nodeType === 1) { // Element node
272
+ const text = node.innerText || node.textContent;
273
+ // بررسی متن‌های مربوط به Quota
274
+ if (text && (text.includes('exceeded your GPU quota') || text.includes('GPU quota'))) {
275
+ // مخفی کردن المان اصلی
276
+ node.style.display = 'none';
277
+ node.style.visibility = 'hidden';
278
+ node.style.opacity = '0';
279
+ quotaErrorFound = true;
280
+ }
281
+ }
282
+ });
283
+ });
284
+
285
+ // بررسی المان‌های موجود (برای اطمینان)
286
+ document.querySelectorAll('.toast-wrap, .error').forEach(el => {
287
+ if (el.innerText.includes('GPU quota')) {
288
+ el.style.display = 'none';
289
+ quotaErrorFound = true;
290
+ }
291
+ });
292
+
293
+ if (quotaErrorFound) {
294
+ const modal = document.getElementById('custom-quota-modal');
295
+ if (modal && modal.style.display !== 'flex') {
296
+ modal.style.display = 'flex';
297
+ }
298
+ }
299
+ });
300
+
301
+ document.addEventListener('DOMContentLoaded', () => {
302
+ forceLightMode();
303
+ setInterval(forceLightMode, 1000);
304
+
305
+ // شروع مانیتور کردن
306
+ observer.observe(document.body, {
307
+ childList: true,
308
+ subtree: true,
309
+ characterData: true
310
+ });
311
+ });
312
+ </script>
313
+ """
314
+
315
+ # جاوااسکریپت دانلود
316
+ js_dl = """
317
+ async (image) => {
318
+ if (!image) { alert("لطفاً ابتدا تصویر را تولید کنید."); return; }
319
+ let fileUrl = image.url;
320
+ if (fileUrl && !fileUrl.startsWith('http')) {
321
+ fileUrl = window.location.origin + fileUrl;
322
+ } else if (!fileUrl && image.path) {
323
+ fileUrl = window.location.origin + "/file=" + image.path;
324
+ }
325
+ console.log("Sending download request for:", fileUrl);
326
+ window.parent.postMessage({ type: 'DOWNLOAD_REQUEST', url: fileUrl }, '*');
327
+ }
328
+ """
329
+
330
+ # --- تنظیمات HTML و CSS ---
331
+ html_code = """
332
+ <style>
333
+ @import url('https://fonts.googleapis.com/css2?family=Vazirmatn:wght@300;400;500;700&display=swap');
334
+
335
+ /* تنظیمات کلی */
336
+ :root, .dark, body, .gradio-container {
337
+ --body-background-fill: #f5f7fa !important;
338
+ --body-text-color: #1f2937 !important;
339
+ --background-fill-primary: #ffffff !important;
340
+ --background-fill-secondary: #f3f4f6 !important;
341
+ --border-color-primary: #e5e7eb !important;
342
+ --block-background-fill: #ffffff !important;
343
+ --block-label-text-color: #374151 !important;
344
+ --block-title-text-color: #111827 !important;
345
+ --input-background-fill: #ffffff !important;
346
+ color-scheme: light !important;
347
+ }
348
+
349
+ body {
350
+ font-family: 'Vazirmatn', sans-serif !important;
351
+ background-color: #f5f7fa !important;
352
+ margin: 0;
353
+ padding: 10px;
354
+ }
355
+
356
+ /* استایل مودال سفارشی */
357
+ #custom-quota-modal {
358
+ position: fixed;
359
+ top: 0;
360
+ left: 0;
361
+ width: 100vw;
362
+ height: 100vh;
363
+ background-color: rgba(0, 0, 0, 0.85); /* تیره‌تر برای تمرکز بیشتر */
364
+ backdrop-filter: blur(8px);
365
+ z-index: 2147483647; /* بالاترین لایه ممکن */
366
+ display: none;
367
+ justify-content: center;
368
+ align-items: center;
369
+ direction: rtl;
370
+ }
371
+
372
+ .quota-modal-content {
373
+ background-color: white;
374
+ padding: 40px;
375
+ border-radius: 24px;
376
+ width: 90%;
377
+ max-width: 450px;
378
+ text-align: center;
379
+ box-shadow: 0 20px 60px rgba(0,0,0,0.4);
380
+ animation: slideIn 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
381
+ border: 1px solid rgba(255,255,255,0.2);
382
+ }
383
+
384
+ .quota-icon {
385
+ font-size: 72px;
386
+ margin-bottom: 25px;
387
+ display: block;
388
+ animation: float 3s ease-in-out infinite;
389
+ }
390
+
391
+ .quota-title {
392
+ font-size: 1.6em;
393
+ font-weight: 900;
394
+ color: #e11d48;
395
+ margin-bottom: 15px;
396
+ }
397
+
398
+ .quota-text {
399
+ font-size: 1.15em;
400
+ color: #374151;
401
+ line-height: 1.8;
402
+ margin-bottom: 30px;
403
+ }
404
+
405
+ .quota-btn {
406
+ background: linear-gradient(135deg, #e11d48 0%, #be123c 100%);
407
+ color: white;
408
+ border: none;
409
+ padding: 14px 35px;
410
+ border-radius: 14px;
411
+ font-weight: bold;
412
+ font-family: 'Vazirmatn', sans-serif;
413
+ font-size: 1.1em;
414
+ cursor: pointer;
415
+ box-shadow: 0 4px 15px rgba(225, 29, 72, 0.4);
416
+ transition: transform 0.2s;
417
+ width: 100%;
418
+ }
419
+
420
+ .quota-btn:hover {
421
+ transform: scale(1.02);
422
+ }
423
+
424
+ @keyframes slideIn {
425
+ from { opacity: 0; transform: translateY(30px) scale(0.9); }
426
+ to { opacity: 1; transform: translateY(0) scale(1); }
427
+ }
428
+
429
+ @keyframes float {
430
+ 0% { transform: translateY(0px); }
431
+ 50% { transform: translateY(-10px); }
432
+ 100% { transform: translateY(0px); }
433
+ }
434
 
435
+ /* مخفی کردن پیام‌های خطای پیش‌فرض Gradio */
436
+ .toast-wrap, .toast-error, .error {
437
+ display: none !important;
438
+ opacity: 0 !important;
439
+ visibility: hidden !important;
440
+ pointer-events: none !important;
441
+ }
442
+
443
+ /* سایر استایل‌ها */
444
  #col-container {
445
  margin: 0 auto;
446
+ max-width: 980px;
447
+ direction: rtl;
448
+ text-align: right;
449
+ padding: 30px;
450
+ background: #ffffff !important;
451
+ border-radius: 24px;
452
+ box-shadow: 0 10px 40px -10px rgba(0,0,0,0.08);
453
+ border: 1px solid rgba(255,255,255,0.8);
454
+ }
455
+
456
+ #main-title h1 {
457
+ font-size: 2.4em !important;
458
+ text-align: center;
459
+ color: #1a202c !important;
460
+ margin-bottom: 15px;
461
+ font-weight: 800;
462
+ background: -webkit-linear-gradient(45deg, #2563eb, #1e40af);
463
+ -webkit-background-clip: text;
464
+ -webkit-text-fill-color: transparent;
465
+ }
466
+
467
+ #main-description {
468
+ text-align: center;
469
+ font-size: 1.15em;
470
+ color: #4b5563 !important;
471
+ margin-bottom: 40px;
472
+ line-height: 1.6;
473
+ }
474
+
475
+ .gr-input-label, span.label-wrap, label span {
476
+ font-weight: 700 !important;
477
+ color: #374151 !important;
478
+ font-size: 0.95em !important;
479
+ margin-bottom: 8px !important;
480
  }
481
+
482
+ textarea, input[type="text"] {
483
+ border: 2px solid #e2e8f0 !important;
484
+ border-radius: 12px !important;
485
+ background-color: #ffffff !important;
486
+ color: #111827 !important;
487
+ padding: 12px !important;
488
+ font-family: 'Vazirmatn', sans-serif !important;
489
+ }
490
+
491
+ textarea:focus, input[type="text"]:focus {
492
+ border-color: #3b82f6 !important;
493
+ box-shadow: 0 0 0 4px rgba(59, 130, 246, 0.1) !important;
494
+ outline: none;
495
+ }
496
+
497
+ .gr-dropdown {
498
+ background: #ffffff !important;
499
+ border-radius: 12px !important;
500
+ }
501
+
502
+ .primary-btn, button.primary {
503
+ background: linear-gradient(135deg, #10b981 0%, #059669 100%) !important;
504
+ border: none !important;
505
+ color: white !important;
506
+ font-weight: 700 !important;
507
+ font-size: 1.1em !important;
508
+ padding: 14px 28px !important;
509
+ border-radius: 14px !important;
510
+ box-shadow: 0 4px 15px rgba(16, 185, 129, 0.3) !important;
511
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
512
+ cursor: pointer !important;
513
+ width: 100%;
514
+ margin-top: 15px;
515
+ }
516
+
517
+ .primary-btn:hover, button.primary:hover {
518
+ transform: translateY(-2px);
519
+ box-shadow: 0 8px 25px rgba(16, 185, 129, 0.45) !important;
520
+ }
521
+
522
+ #download-btn {
523
+ background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%) !important;
524
+ box-shadow: 0 4px 15px rgba(59, 130, 246, 0.3) !important;
525
+ }
526
+ #download-btn:hover {
527
+ box-shadow: 0 8px 25px rgba(59, 130, 246, 0.45) !important;
528
+ }
529
+
530
+ .gradio-container .prose table,
531
+ .gradio-container table {
532
+ background-color: #ffffff !important;
533
+ color: #111827 !important;
534
+ border: 1px solid #e5e7eb !important;
535
+ border-radius: 12px !important;
536
+ overflow: hidden !important;
537
+ width: 100% !important;
538
+ margin-top: 20px !important;
539
+ }
540
+
541
+ .gradio-container thead th {
542
+ background-color: #f3f4f6 !important;
543
+ color: #374151 !important;
544
+ font-weight: 700 !important;
545
+ border-bottom: 2px solid #e5e7eb !important;
546
+ padding: 12px !important;
547
+ text-align: right !important;
548
+ }
549
+
550
+ .gradio-container tbody tr {
551
+ background-color: #ffffff !important;
552
+ border-bottom: 1px solid #f3f4f6 !important;
553
+ }
554
+
555
+ .gradio-container tbody tr:hover {
556
+ background-color: #f9fafb !important;
557
+ }
558
+
559
+ .gradio-container tbody td {
560
+ background-color: #ffffff !important;
561
+ color: #374151 !important;
562
+ padding: 10px !important;
563
+ }
564
+
565
+ footer { display: none !important; }
566
+ .flagging { display: none !important; }
567
+
568
+ @media (prefers-color-scheme: dark) {
569
+ body, .gradio-container, .prose, table, tr, td, th {
570
+ background-color: #ffffff !important;
571
+ color: #333333 !important;
572
+ }
573
+ }
574
+ </style>
575
+ """
576
+
577
+ modal_html = """
578
+ <div id="custom-quota-modal">
579
+ <div class="quota-modal-content">
580
+ <span class="quota-icon">✈️</span>
581
+ <div class="quota-title">محدودیت استفاده</div>
582
+ <div class="quota-text">
583
+ سهمیه استفاده از گرافیک پر شده است.
584
+ <br><br>
585
+ برای ادامه، لطفاً <b>حالت هواپیما (Airplane Mode)</b> گوشی خود را یکبار روشن و خاموش کنید تا IP شما تغییر کند و سپس مجدد تلاش کنید.
586
+ </div>
587
+ <button class="quota-btn" onclick="closeQuotaModal()">متوجه شدم</button>
588
+ </div>
589
+ </div>
590
  """
591
 
592
  with gr.Blocks() as demo:
593
+ # تزریق کدها به ترتیب
594
+ gr.HTML(html_code + modal_html + js_code)
595
+
596
  with gr.Column(elem_id="col-container"):
597
+ gr.Markdown("# **ویرایشگر هوشمند آلفا**", elem_id="main-title")
598
+ gr.Markdown(
599
+ "با هوش مصنوعی آلفا تصاویر تونو به مدل های مختلف ویرایش کنید.",
600
+ elem_id="main-description"
601
+ )
 
 
 
602
 
603
+ with gr.Row(equal_height=True):
604
+ with gr.Column():
605
+ input_image = gr.Image(label="بارگذاری تصویر", type="pil", height=320)
606
+
607
  prompt = gr.Text(
608
+ label="دستور ویرایش (به فارسی)",
609
  show_label=True,
610
+ placeholder="مثال: تصویر را به سبک انیمه تبدیل کن...",
611
+ rtl=True,
612
+ lines=3
613
  )
614
 
615
+ status_box = gr.HTML(label="وضعیت")
616
+
617
+ run_button = gr.Button(" شروع پردازش و ساخت تصویر", variant="primary", elem_classes="primary-btn")
 
 
 
 
618
 
619
+ with gr.Column():
620
+ output_image = gr.Image(label="تصویر نهایی", interactive=False, format="png", height=380)
621
+
622
+ download_button = gr.Button("📥 دانلود و ذخیره تصویر", variant="secondary", elem_id="download-btn", elem_classes="primary-btn")
623
 
624
  with gr.Row():
625
  lora_adapter = gr.Dropdown(
626
+ label="انتخاب سبک ویرایش (LoRA)",
627
+ choices=list(LORA_MAPPING.keys()),
628
+ value="تبدیل عکس به انیمه"
629
+ )
630
+
631
+ with gr.Accordion("تنظیمات پیشرفته", open=False, visible=True):
632
+ aspect_ratio_selection = gr.Dropdown(
633
+ label="ابعاد تصویر خروجی",
634
+ choices=ASPECT_RATIOS_LIST,
635
+ value="خودکار (پیش‌فرض)",
636
+ interactive=True
637
+ )
638
+
639
+ with gr.Row(visible=False) as custom_dims_row:
640
+ custom_width = gr.Slider(
641
+ label="عرض دلخواه (Width)",
642
+ minimum=256, maximum=2048, step=8, value=1024
643
+ )
644
+ custom_height = gr.Slider(
645
+ label="ارتفاع دلخواه (Height)",
646
+ minimum=256, maximum=2048, step=8, value=1024
647
+ )
648
+
649
+ seed = gr.Slider(label="دانه تصادفی (Seed)", minimum=0, maximum=MAX_SEED, step=1, value=0)
650
+ randomize_seed = gr.Checkbox(label="استفاده از Seed تصادفی", value=True)
651
+ guidance_scale = gr.Slider(label="میزان وفاداری به متن (Guidance Scale)", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
652
+ steps = gr.Slider(label="تعداد مراحل پردازش (Steps)", minimum=1, maximum=50, step=1, value=4)
653
+
654
+ def toggle_row(choice):
655
+ if choice == "شخصی‌سازی (Custom)":
656
+ return gr.update(visible=True)
657
+ return gr.update(visible=False)
658
+
659
+ aspect_ratio_selection.change(
660
+ fn=toggle_row,
661
+ inputs=aspect_ratio_selection,
662
+ outputs=custom_dims_row
663
+ )
664
+
665
  gr.Examples(
666
  examples=[
667
+ ["examples/1.jpg", "تبدیل به انیمه کن.", "تبدیل عکس به انیمه"],
668
+ ["examples/5.jpg", "سایه‌ها را حذف کن و نورپردازی نرم به تصویر بده.", "اصلاح نور و سایه"],
669
+ ["examples/4.jpg", "از فیلتر ساعت طلایی با پخش نور ملایم استفاده کن.", "نورپردازی مجدد (Relight)"],
670
+ ["examples/2.jpeg", "دوربین را ۴۵ درجه به سمت چپ بچرخان.", "تغییر زاویه دید"],
671
+ ["examples/7.jpg", "منبع نور را از سمت راست عقب قرار بده.", "نورپردازی چند زاویه‌ای"],
672
+ ["examples/10.jpeg", "کیفیت تصویر را افزایش بده (Upscale).", "افزایش کیفیت (Upscale)"],
673
+ ["examples/7.jpg", "منبع نور را از پایین بتابان.", "نورپردازی چند زاویه‌ای"],
674
+ ["examples/2.jpeg", "زاویه دوربین را به نمای بالا گوشه راست تغییر بده.", "تغییر زاویه دید"],
675
+ ["examples/9.jpg", "دوربین کمی به جلو حرکت می‌کند در حالی که نور خورشید از میان ابرها می‌تابد و درخششی نرم اطراف شبح شخصیت در مه ایجاد می‌کند. سبک سینمایی واقعی.", "صحنه بعدی (سینمایی)"],
676
+ ["examples/8.jpg", "جزئیات پوست سوژه را برجسته‌تر و طبیعی‌تر کن.", "روتوش پوست"],
677
+ ["examples/6.jpg", "دوربین را به نمای پایین به بالا تغییر بده.", "تغییر زاویه دید"],
678
  ],
679
+ inputs=[input_image, prompt, lora_adapter],
680
+ outputs=[output_image, seed, status_box],
681
  fn=infer_example,
682
  cache_examples=False,
683
+ label="نمونه‌ها (برای تست کلیک کنید)"
684
  )
685
 
686
  run_button.click(
687
  fn=infer,
688
+ inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, aspect_ratio_selection, custom_width, custom_height],
689
+ outputs=[output_image, seed, status_box],
690
+ api_name="predict"
691
  )
692
+
693
+ download_button.click(
694
+ fn=None,
695
+ inputs=[output_image],
696
+ outputs=None,
697
+ js=js_dl
698
+ )
699
+
700
  if __name__ == "__main__":
701
+ demo.queue(max_size=30).launch(show_error=True)