tyndreus commited on
Commit
c4ea070
·
verified ·
1 Parent(s): 5ff3fe3

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -273
app.py DELETED
@@ -1,273 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import numpy as np
4
- import spaces
5
- import torch
6
- import random
7
- from PIL import Image
8
- from typing import Iterable
9
- from gradio.themes import Soft
10
- from gradio.themes.utils import colors, fonts, sizes
11
- import uuid
12
- from datetime import datetime
13
- from huggingface_hub import HfApi
14
-
15
- # --- AYARLAR ---
16
- # Girdilerin kaydedileceği dataset
17
- INPUT_DATASET_ID = "tyndreus/image-edit-logs"
18
- # Çıktıların kaydedileceği dataset (Bunu oluşturduğunuzdan emin olun)
19
- OUTPUT_DATASET_ID = "tyndreus/output"
20
- # ---------------
21
-
22
- colors.steel_blue = colors.Color(
23
- name="steel_blue",
24
- c50="#EBF3F8",
25
- c100="#D3E5F0",
26
- c200="#A8CCE1",
27
- c300="#7DB3D2",
28
- c400="#529AC3",
29
- c500="#4682B4",
30
- c600="#3E72A0",
31
- c700="#36638C",
32
- c800="#2E5378",
33
- c900="#264364",
34
- c950="#1E3450",
35
- )
36
-
37
- class SteelBlueTheme(Soft):
38
- def __init__(
39
- self,
40
- *,
41
- primary_hue: colors.Color | str = colors.gray,
42
- secondary_hue: colors.Color | str = colors.steel_blue,
43
- neutral_hue: colors.Color | str = colors.slate,
44
- text_size: sizes.Size | str = sizes.text_lg,
45
- font: fonts.Font | str | Iterable[fonts.Font | str] = (
46
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
47
- ),
48
- font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
49
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
50
- ),
51
- ):
52
- super().__init__(
53
- primary_hue=primary_hue,
54
- secondary_hue=secondary_hue,
55
- neutral_hue=neutral_hue,
56
- text_size=text_size,
57
- font=font,
58
- font_mono=font_mono,
59
- )
60
- super().set(
61
- background_fill_primary="*primary_50",
62
- background_fill_primary_dark="*primary_900",
63
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
64
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
65
- button_primary_text_color="white",
66
- button_primary_text_color_hover="white",
67
- button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
68
- button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
69
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
70
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
71
- button_secondary_text_color="black",
72
- button_secondary_text_color_hover="white",
73
- button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
74
- button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
75
- button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
76
- button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
77
- slider_color="*secondary_500",
78
- slider_color_dark="*secondary_600",
79
- block_title_text_weight="600",
80
- block_border_width="3px",
81
- block_shadow="*shadow_drop_lg",
82
- button_primary_shadow="*shadow_drop_lg",
83
- button_large_padding="11px",
84
- color_accent_soft="*primary_100",
85
- block_label_background_fill="*primary_200",
86
- )
87
-
88
- steel_blue_theme = SteelBlueTheme()
89
-
90
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
91
-
92
- from diffusers import FlowMatchEulerDiscreteScheduler
93
- from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
94
- from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
95
- from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
96
-
97
- dtype = torch.bfloat16
98
- device = "cuda" if torch.cuda.is_available() else "cpu"
99
-
100
- pipe = QwenImageEditPlusPipeline.from_pretrained(
101
- "Qwen/Qwen-Image-Edit-2509",
102
- transformer=QwenImageTransformer2DModel.from_pretrained(
103
- "linoyts/Qwen-Image-Edit-Rapid-AIO",
104
- subfolder='transformer',
105
- torch_dtype=dtype,
106
- device_map='cuda'
107
- ),
108
- torch_dtype=dtype
109
- ).to(device)
110
-
111
- pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="anime")
112
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="multiple-angles")
113
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light-restoration")
114
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight")
115
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting", weight_name="多角度灯光-251116.safetensors", adapter_name="multi-angle-lighting")
116
- pipe.load_lora_weights("tlennon-ie/qwen-edit-skin", weight_name="qwen-edit-skin_1.1_000002750.safetensors", adapter_name="edit-skin")
117
- pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene")
118
- pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA", weight_name="qwen-edit-enhance_64-v3_000001000.safetensors", adapter_name="upscale-image")
119
-
120
- pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
121
- MAX_SEED = np.iinfo(np.int32).max
122
-
123
- def update_dimensions_on_upload(image):
124
- if image is None: return 1024, 1024
125
- original_width, original_height = image.size
126
- if original_width > original_height:
127
- new_width = 1024
128
- aspect_ratio = original_height / original_width
129
- new_height = int(new_width * aspect_ratio)
130
- else:
131
- new_height = 1024
132
- aspect_ratio = original_width / original_height
133
- new_width = int(new_height * aspect_ratio)
134
- new_width = (new_width // 8) * 8
135
- new_height = (new_height // 8) * 8
136
- return new_width, new_height
137
-
138
- # --- HUB'A YÜKLEME YAPAN ORTAK FONKSİYON ---
139
- def upload_image_to_hub(image, dataset_id, folder_prefix="images"):
140
- try:
141
- # Token kontrolü
142
- hf_token = os.environ.get("HF_TOKEN")
143
- if not hf_token:
144
- print(f"Fail")
145
- return
146
-
147
- api = HfApi(token=hf_token)
148
-
149
- # Dosya ismi oluşturma
150
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
151
- unique_id = str(uuid.uuid4())[:8]
152
- filename = f"{folder_prefix}_{timestamp}_{unique_id}.png"
153
-
154
- # Geçici olarak diske kaydet
155
- temp_path = f"/tmp/{filename}"
156
- image.save(temp_path)
157
-
158
- # Dataset'e yükle
159
- api.upload_file(
160
- path_or_fileobj=temp_path,
161
- path_in_repo=f"{folder_prefix}/{filename}",
162
- repo_id=dataset_id,
163
- repo_type="dataset"
164
- )
165
-
166
- # Geçici dosyayı sil
167
- os.remove(temp_path)
168
- print(f"Success")
169
-
170
- except Exception as e:
171
- print(f"Yükleme hatası ({dataset_id}): {e}")
172
- # -------------------------------------------
173
-
174
- @spaces.GPU(duration=30)
175
- def infer(
176
- input_image,
177
- prompt,
178
- lora_adapter,
179
- seed,
180
- randomize_seed,
181
- guidance_scale,
182
- steps,
183
- progress=gr.Progress(track_tqdm=True)
184
- ):
185
- if input_image is None:
186
- raise gr.Error("Please upload an image to edit.")
187
-
188
- # 1. GİRDİ RESMİNİ KAYDET (INPUT)
189
- upload_image_to_hub(input_image, INPUT_DATASET_ID, folder_prefix="inputs")
190
-
191
- if lora_adapter == "Photo-to-Anime": pipe.set_adapters(["anime"], adapter_weights=[1.0])
192
- elif lora_adapter == "Multiple-Angles": pipe.set_adapters(["multiple-angles"], adapter_weights=[1.0])
193
- elif lora_adapter == "Light-Restoration": pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
194
- elif lora_adapter == "Relight": pipe.set_adapters(["relight"], adapter_weights=[1.0])
195
- elif lora_adapter == "Multi-Angle-Lighting": pipe.set_adapters(["multi-angle-lighting"], adapter_weights=[1.0])
196
- elif lora_adapter == "Edit-Skin": pipe.set_adapters(["edit-skin"], adapter_weights=[1.0])
197
- elif lora_adapter == "Next-Scene": pipe.set_adapters(["next-scene"], adapter_weights=[1.0])
198
- elif lora_adapter == "Upscale-Image": pipe.set_adapters(["upscale-image"], adapter_weights=[1.0])
199
-
200
- if randomize_seed: seed = random.randint(0, MAX_SEED)
201
-
202
- generator = torch.Generator(device=device).manual_seed(seed)
203
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
204
-
205
- original_image = input_image.convert("RGB")
206
- width, height = update_dimensions_on_upload(original_image)
207
-
208
- result = pipe(
209
- image=original_image,
210
- prompt=prompt,
211
- negative_prompt=negative_prompt,
212
- height=height,
213
- width=width,
214
- num_inference_steps=steps,
215
- generator=generator,
216
- true_cfg_scale=guidance_scale,
217
- ).images[0]
218
-
219
- # 2. ÇIKTI RESMİNİ KAYDET (OUTPUT)
220
- # Burada 'generated' adında bir klasör ön eki ve OUTPUT_DATASET_ID kullanıyoruz
221
- upload_image_to_hub(result, OUTPUT_DATASET_ID, folder_prefix="generated")
222
-
223
- return result, seed
224
-
225
- @spaces.GPU(duration=30)
226
- def infer_example(input_image, prompt, lora_adapter):
227
- input_pil = input_image.convert("RGB")
228
- guidance_scale = 1.0
229
- steps = 4
230
- result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
231
- return result, seed
232
-
233
- css="""
234
- #col-container {
235
- margin: 0 auto;
236
- max-width: 960px;
237
- }
238
- #main-title h1 {font-size: 2.1em !important;}
239
- """
240
-
241
- with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
242
- with gr.Column(elem_id="col-container"):
243
- gr.Markdown("# **RAINBO PRO 3D IMAGE EDIT**", elem_id="main-title")
244
- gr.Markdown("Test) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
245
-
246
- with gr.Row(equal_height=True):
247
- with gr.Column():
248
- input_image = gr.Image(label="Upload Image", type="pil", height=290)
249
- prompt = gr.Text(label="Edit Prompt", show_label=True, placeholder="e.g., transform into anime..")
250
- run_button = gr.Button("Edit Image", variant="primary")
251
-
252
- with gr.Column():
253
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350)
254
- with gr.Row():
255
- lora_adapter = gr.Dropdown(
256
- label="Choose Editing Style",
257
- choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Multi-Angle-Lighting", "Upscale-Image", "Relight", "Next-Scene", "Edit-Skin"],
258
- value="Photo-to-Anime"
259
- )
260
- with gr.Accordion("Advanced Settings", open=False, visible=False):
261
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
262
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
263
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
264
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
265
-
266
- run_button.click(
267
- fn=infer,
268
- inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
269
- outputs=[output_image, seed]
270
- )
271
-
272
- if __name__ == "__main__":
273
- demo.queue(max_size=30).launch(mcp_server=True, ssr_mode=False, show_error=True)