theanhntp commited on
Commit
b65a763
·
verified ·
1 Parent(s): b7b594d

Update setup.py

Browse files
Files changed (1) hide show
  1. setup.py +61 -48
setup.py CHANGED
@@ -15,14 +15,19 @@ from huggingface_hub import snapshot_download
15
  # torch.backends.cuda.enable_mem_efficient_sdp(False)
16
  # torch.backends.cuda.enable_flash_sdp(False)
17
 
 
 
 
 
 
 
 
 
18
  base_model_path='booksforcharlie/stable-diffusion-inpainting'
19
  allow_tf32=True
20
  mixed_precision='bf16'
21
  resume_path='zhengchong/CatVTON'
22
- tmp_folder = "/rs"
23
-
24
- repo_path = snapshot_download(repo_id=resume_path)
25
-
26
 
27
  automasker = AutoMasker(
28
  densepose_ckpt=os.path.join(repo_path, "DensePose"),
@@ -125,53 +130,61 @@ def inference(
125
  new_result_image.paste(result_image, (condition_width + 5, 0))
126
  return new_result_image
127
 
128
- person_path = '/content/data/CatVTON__00009_.png'
129
  mask_path = None
130
- cloth_path = "/content/data/Coolmate_Áo thun Smile Together.webp"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  cloth_type = "upper"
132
  image_size = (1024, 768)
133
  num_inference_steps = 50
134
  guidance_scale = 2.5
135
  seed = 42
136
- show_type = "result only"
137
-
138
-
139
- person_image = Image.open(person_path).convert("RGB")
140
- mask_image = Image.open(mask_path).convert("L") if mask_path else None
141
- cloth_image = Image.open(cloth_path).convert("RGB")
142
-
143
- vton_img = inference(person_image,
144
- mask_image,
145
- cloth_image,
146
- cloth_type,
147
- image_size,
148
- num_inference_steps,
149
- guidance_scale,
150
- seed,
151
- show_type)
152
-
153
- import atexit, requests, subprocess, time, re, os
154
- from random import randint
155
- from threading import Timer
156
- from queue import Queue
157
- def cloudflared(port, metrics_port, output_queue):
158
- atexit.register(lambda p: p.terminate(), subprocess.Popen(['/workspace/cloudflared-linux-amd64', 'tunnel', '--url', f'http://127.0.0.1:{port}', '--metrics', f'127.0.0.1:{metrics_port}'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT))
159
- attempts, tunnel_url = 0, None
160
- while attempts < 10 and not tunnel_url:
161
- attempts += 1
162
- time.sleep(3)
163
- try:
164
- tunnel_url = re.search("(?P<url>https?:\/\/[^\s]+.trycloudflare.com)", requests.get(f'http://127.0.0.1:{metrics_port}/metrics').text).group("url")
165
- except:
166
- pass
167
- if not tunnel_url:
168
- raise Exception("Can't connect to Cloudflare Edge")
169
- output_queue.put(tunnel_url)
170
-
171
- output_queue, metrics_port = Queue(), randint(8100, 9000)
172
- thread = Timer(2, cloudflared, args=(8188, metrics_port, output_queue))
173
- thread.start()
174
- thread.join()
175
- tunnel_url = output_queue.get()
176
- os.environ['webui_url'] = tunnel_url
177
- print(tunnel_url)
 
15
  # torch.backends.cuda.enable_mem_efficient_sdp(False)
16
  # torch.backends.cuda.enable_flash_sdp(False)
17
 
18
+ def get_files(folder_path, extensions=['py', 'png', 'JPEG']):
19
+ if isinstance(extensions, str):
20
+ extensions = [extensions]
21
+ else:
22
+ extensions = [ex.lower() for ex in extensions]
23
+ result = [x for x in os.listdir(folder_path) if x.split('.')[-1].lower() in extensions]
24
+ return result
25
+
26
  base_model_path='booksforcharlie/stable-diffusion-inpainting'
27
  allow_tf32=True
28
  mixed_precision='bf16'
29
  resume_path='zhengchong/CatVTON'
30
+ tmp_folder = "/workspace/rs"
 
 
 
31
 
32
  automasker = AutoMasker(
33
  densepose_ckpt=os.path.join(repo_path, "DensePose"),
 
130
  new_result_image.paste(result_image, (condition_width + 5, 0))
131
  return new_result_image
132
 
133
+ person_path = '/workspace/data/person'
134
  mask_path = None
135
+ cloth_path = '/workspace/data/cloth'
136
+ result_path = '/workspace/data/result'
137
+
138
+
139
+ if not os.path.isfile(person_path):
140
+ os.makedirs(person_path, exist_ok=True)
141
+ person_files = get_files(person_path, extensions=['png', 'jpeg', 'jpg', 'webp'])
142
+
143
+ if mask_path:
144
+ os.makedirs(mask_path, exist_ok=True)
145
+ mask_files = [os.path.join(mask_path, f'{os.path.splitext(pf)[0]}.png') for pf in person_files]
146
+ else:
147
+ mask_files = [mask_path] * len(person_files)
148
+ person_files = [os.path.join(person_path, pf) for pf in person_files] if person_files else []
149
+ else:
150
+ person_files = [person_path]
151
+ mask_files = [mask_path] * len(person_files)
152
+
153
+
154
+ if not os.path.isfile(cloth_path):
155
+ os.makedirs(cloth_path, exist_ok=True)
156
+ cloth_files = get_files(cloth_path, extensions=['png', 'jpeg', 'jpg', 'webp'])
157
+ cloth_files = [os.path.join(cloth_path, cf) for cf in cloth_files] if cloth_files else []
158
+ else:
159
+ cloth_files = [cloth_path]
160
+
161
+
162
+ if not os.path.isdir(result_path):
163
+ os.makedirs(result_path, exist_ok=True)
164
+
165
+ repo_path = snapshot_download(repo_id=resume_path)
166
+
167
  cloth_type = "upper"
168
  image_size = (1024, 768)
169
  num_inference_steps = 50
170
  guidance_scale = 2.5
171
  seed = 42
172
+ show_type = "all"
173
+
174
+
175
+ for person_file, mask_file in zip(person_files, mask_files):
176
+ for cloth_file in cloth_files:
177
+ person_instance = Image.open(person_file).convert("RGB")
178
+ mask_instance = Image.open(mask_file).convert("L") if mask_file else None
179
+ cloth_instance = Image.open(cloth_file).convert("RGB")
180
+
181
+ vton_img = inference(person_instance,
182
+ mask_instance,
183
+ cloth_instance,
184
+ cloth_type,
185
+ image_size,
186
+ num_inference_steps,
187
+ guidance_scale,
188
+ seed,
189
+ show_type)
190
+ vton_img.save(os.path.join(result_path, f'{datetime.now().strftime("%Y%m%d%M%S")}.jpg'))