tyndreus commited on
Commit
227892c
·
verified ·
1 Parent(s): 9b7cc5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -77
app.py CHANGED
@@ -8,6 +8,14 @@ from PIL import Image
8
  from typing import Iterable
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
 
 
 
 
 
 
 
 
11
 
12
  colors.steel_blue = colors.Color(
13
  name="steel_blue",
@@ -79,17 +87,7 @@ steel_blue_theme = SteelBlueTheme()
79
 
80
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
81
 
82
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
83
- print("torch.__version__ =", torch.__version__)
84
- print("torch.version.cuda =", torch.version.cuda)
85
- print("cuda available:", torch.cuda.is_available())
86
- print("cuda device count:", torch.cuda.device_count())
87
- if torch.cuda.is_available():
88
- print("current device:", torch.cuda.current_device())
89
- print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
90
-
91
- print("Using device:", device)
92
-
93
  from diffusers import FlowMatchEulerDiscreteScheduler
94
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
95
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
@@ -101,7 +99,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
101
  pipe = QwenImageEditPlusPipeline.from_pretrained(
102
  "Qwen/Qwen-Image-Edit-2509",
103
  transformer=QwenImageTransformer2DModel.from_pretrained(
104
- "linoyts/Qwen-Image-Edit-Rapid-AIO", # [transformer weights extracted from: Phr00t/Qwen-Image-Edit-Rapid-AIO]
105
  subfolder='transformer',
106
  torch_dtype=dtype,
107
  device_map='cuda'
@@ -109,41 +107,21 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
109
  torch_dtype=dtype
110
  ).to(device)
111
 
112
- pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
113
- weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
114
- adapter_name="anime")
115
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles",
116
- weight_name="镜头转换.safetensors",
117
- adapter_name="multiple-angles")
118
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration",
119
- weight_name="移除光影.safetensors",
120
- adapter_name="light-restoration")
121
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
122
- weight_name="Qwen-Edit-Relight.safetensors",
123
- adapter_name="relight")
124
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting",
125
- weight_name="多角度灯光-251116.safetensors",
126
- adapter_name="multi-angle-lighting")
127
- pipe.load_lora_weights("tlennon-ie/qwen-edit-skin",
128
- weight_name="qwen-edit-skin_1.1_000002750.safetensors",
129
- adapter_name="edit-skin")
130
- pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509",
131
- weight_name="next-scene_lora-v2-3000.safetensors",
132
- adapter_name="next-scene")
133
- pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA",
134
- weight_name="qwen-edit-enhance_64-v3_000001000.safetensors",
135
- adapter_name="upscale-image")
136
-
137
 
138
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
139
  MAX_SEED = np.iinfo(np.int32).max
140
 
141
  def update_dimensions_on_upload(image):
142
- if image is None:
143
- return 1024, 1024
144
-
145
  original_width, original_height = image.size
146
-
147
  if original_width > original_height:
148
  new_width = 1024
149
  aspect_ratio = original_height / original_width
@@ -152,13 +130,46 @@ def update_dimensions_on_upload(image):
152
  new_height = 1024
153
  aspect_ratio = original_width / original_height
154
  new_width = int(new_height * aspect_ratio)
155
-
156
- # Ensure dimensions are multiples of 8
157
  new_width = (new_width // 8) * 8
158
  new_height = (new_height // 8) * 8
159
-
160
  return new_width, new_height
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  @spaces.GPU(duration=30)
163
  def infer(
164
  input_image,
@@ -173,32 +184,25 @@ def infer(
173
  if input_image is None:
174
  raise gr.Error("Please upload an image to edit.")
175
 
176
- if lora_adapter == "Photo-to-Anime":
177
- pipe.set_adapters(["anime"], adapter_weights=[1.0])
178
- elif lora_adapter == "Multiple-Angles":
179
- pipe.set_adapters(["multiple-angles"], adapter_weights=[1.0])
180
- elif lora_adapter == "Light-Restoration":
181
- pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
182
- elif lora_adapter == "Relight":
183
- pipe.set_adapters(["relight"], adapter_weights=[1.0])
184
- elif lora_adapter == "Multi-Angle-Lighting":
185
- pipe.set_adapters(["multi-angle-lighting"], adapter_weights=[1.0])
186
- elif lora_adapter == "Edit-Skin":
187
- pipe.set_adapters(["edit-skin"], adapter_weights=[1.0])
188
- elif lora_adapter == "Next-Scene":
189
- pipe.set_adapters(["next-scene"], adapter_weights=[1.0])
190
- elif lora_adapter == "Upscale-Image":
191
- pipe.set_adapters(["upscale-image"], adapter_weights=[1.0])
192
 
193
- if randomize_seed:
194
- seed = random.randint(0, MAX_SEED)
195
 
196
  generator = torch.Generator(device=device).manual_seed(seed)
197
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
198
 
199
  original_image = input_image.convert("RGB")
200
-
201
- # Use the new function to update dimensions
202
  width, height = update_dimensions_on_upload(original_image)
203
 
204
  result = pipe(
@@ -222,7 +226,6 @@ def infer_example(input_image, prompt, lora_adapter):
222
  result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
223
  return result, seed
224
 
225
-
226
  css="""
227
  #col-container {
228
  margin: 0 auto;
@@ -233,25 +236,17 @@ css="""
233
 
234
  with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
235
  with gr.Column(elem_id="col-container"):
236
- # BAŞLIK BURADAN DEĞİŞTİRİLDİ
237
  gr.Markdown("# **RAINBO PRO 3D IMAGE EDIT**", elem_id="main-title")
238
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
239
 
240
  with gr.Row(equal_height=True):
241
  with gr.Column():
242
  input_image = gr.Image(label="Upload Image", type="pil", height=290)
243
-
244
- prompt = gr.Text(
245
- label="Edit Prompt",
246
- show_label=True,
247
- placeholder="e.g., transform into anime..",
248
- )
249
-
250
  run_button = gr.Button("Edit Image", variant="primary")
251
 
252
  with gr.Column():
253
  output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350)
254
-
255
  with gr.Row():
256
  lora_adapter = gr.Dropdown(
257
  label="Choose Editing Style",
@@ -263,8 +258,6 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
263
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
264
  guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
265
  steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
266
-
267
- # EXAMPLES KISMI TAMAMEN SİLİNDİ
268
 
269
  run_button.click(
270
  fn=infer,
 
8
  from typing import Iterable
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
+ import uuid
12
+ from datetime import datetime
13
+ from huggingface_hub import HfApi # EKLENDI: Hub'a yükleme yapmak için
14
+
15
+ # --- AYARLAR ---
16
+ # BURAYI KENDİ OLUŞTURDUĞUNUZ PRIVATE DATASET ADIYLA DEĞİŞTİRİN
17
+ DATASET_ID = "tyndreus/image-edit-logs"
18
+ # ---------------
19
 
20
  colors.steel_blue = colors.Color(
21
  name="steel_blue",
 
87
 
88
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
89
 
90
+ # --- MODEL YÜKLEME KISMI ---
 
 
 
 
 
 
 
 
 
 
91
  from diffusers import FlowMatchEulerDiscreteScheduler
92
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
93
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
 
99
  pipe = QwenImageEditPlusPipeline.from_pretrained(
100
  "Qwen/Qwen-Image-Edit-2509",
101
  transformer=QwenImageTransformer2DModel.from_pretrained(
102
+ "linoyts/Qwen-Image-Edit-Rapid-AIO",
103
  subfolder='transformer',
104
  torch_dtype=dtype,
105
  device_map='cuda'
 
107
  torch_dtype=dtype
108
  ).to(device)
109
 
110
+ pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="anime")
111
+ pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="multiple-angles")
112
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light-restoration")
113
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight")
114
+ pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting", weight_name="多角度灯光-251116.safetensors", adapter_name="multi-angle-lighting")
115
+ pipe.load_lora_weights("tlennon-ie/qwen-edit-skin", weight_name="qwen-edit-skin_1.1_000002750.safetensors", adapter_name="edit-skin")
116
+ pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509", weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene")
117
+ pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA", weight_name="qwen-edit-enhance_64-v3_000001000.safetensors", adapter_name="upscale-image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
120
  MAX_SEED = np.iinfo(np.int32).max
121
 
122
  def update_dimensions_on_upload(image):
123
+ if image is None: return 1024, 1024
 
 
124
  original_width, original_height = image.size
 
125
  if original_width > original_height:
126
  new_width = 1024
127
  aspect_ratio = original_height / original_width
 
130
  new_height = 1024
131
  aspect_ratio = original_width / original_height
132
  new_width = int(new_height * aspect_ratio)
 
 
133
  new_width = (new_width // 8) * 8
134
  new_height = (new_height // 8) * 8
 
135
  return new_width, new_height
136
 
137
+ # --- RESIM KAYDETME FONKSIYONU (PRIVATE DATASET) ---
138
+ def save_user_upload(image):
139
+ try:
140
+ # Token kontrolü
141
+ hf_token = os.environ.get("HF_TOKEN")
142
+ if not hf_token:
143
+ print("Hata: HF_TOKEN bulunamadı. Lütfen Space ayarlarına ekleyin.")
144
+ return
145
+
146
+ api = HfApi(token=hf_token)
147
+
148
+ # Dosya ismi oluşturma
149
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
150
+ unique_id = str(uuid.uuid4())[:8]
151
+ filename = f"upload_{timestamp}_{unique_id}.png"
152
+
153
+ # Geçici olarak diske kaydet
154
+ temp_path = f"/tmp/{filename}"
155
+ image.save(temp_path)
156
+
157
+ # Private Dataset'e yükle
158
+ api.upload_file(
159
+ path_or_fileobj=temp_path,
160
+ path_in_repo=f"user_uploads/{filename}", # Dataset içinde klasör oluşturur
161
+ repo_id=DATASET_ID,
162
+ repo_type="dataset"
163
+ )
164
+
165
+ # Geçici dosyayı sil
166
+ os.remove(temp_path)
167
+ print(f"Resim Private Dataset'e yüklendi: {filename}")
168
+
169
+ except Exception as e:
170
+ print(f"Resim yüklenirken hata oluştu: {e}")
171
+ # ---------------------------------
172
+
173
  @spaces.GPU(duration=30)
174
  def infer(
175
  input_image,
 
184
  if input_image is None:
185
  raise gr.Error("Please upload an image to edit.")
186
 
187
+ # --- BURADA RESMİ DATASET'E GÖNDERİYORUZ ---
188
+ save_user_upload(input_image)
189
+ # ---------------------------------
190
+
191
+ if lora_adapter == "COLOR": pipe.set_adapters(["anime"], adapter_weights=[1.0])
192
+ elif lora_adapter == "ANGLE": pipe.set_adapters(["multiple-angles"], adapter_weights=[1.0])
193
+ elif lora_adapter == "Light-Restoration": pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
194
+ elif lora_adapter == "Relight": pipe.set_adapters(["relight"], adapter_weights=[1.0])
195
+ elif lora_adapter == "Multi-Angle-Lighting": pipe.set_adapters(["multi-angle-lighting"], adapter_weights=[1.0])
196
+ elif lora_adapter == "Edit-Skin": pipe.set_adapters(["edit-skin"], adapter_weights=[1.0])
197
+ elif lora_adapter == "Next-Scene": pipe.set_adapters(["next-scene"], adapter_weights=[1.0])
198
+ elif lora_adapter == "Upscale-Image": pipe.set_adapters(["upscale-image"], adapter_weights=[1.0])
 
 
 
 
199
 
200
+ if randomize_seed: seed = random.randint(0, MAX_SEED)
 
201
 
202
  generator = torch.Generator(device=device).manual_seed(seed)
203
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
204
 
205
  original_image = input_image.convert("RGB")
 
 
206
  width, height = update_dimensions_on_upload(original_image)
207
 
208
  result = pipe(
 
226
  result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
227
  return result, seed
228
 
 
229
  css="""
230
  #col-container {
231
  margin: 0 auto;
 
236
 
237
  with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
238
  with gr.Column(elem_id="col-container"):
 
239
  gr.Markdown("# **RAINBO PRO 3D IMAGE EDIT**", elem_id="main-title")
240
+ gr.Markdown("Rainbo pro 3d color adjustment and upscaler program")
241
 
242
  with gr.Row(equal_height=True):
243
  with gr.Column():
244
  input_image = gr.Image(label="Upload Image", type="pil", height=290)
245
+ prompt = gr.Text(label="Edit Prompt", show_label=True, placeholder="e.g., transform into anime..")
 
 
 
 
 
 
246
  run_button = gr.Button("Edit Image", variant="primary")
247
 
248
  with gr.Column():
249
  output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350)
 
250
  with gr.Row():
251
  lora_adapter = gr.Dropdown(
252
  label="Choose Editing Style",
 
258
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
259
  guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
260
  steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
 
 
261
 
262
  run_button.click(
263
  fn=infer,