plutosss commited on
Commit
71bfff1
·
verified ·
1 Parent(s): 687bed1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -274
app.py CHANGED
@@ -1,11 +1,4 @@
1
- # 部署 teed、depth-anything
2
- # 腐蚀算法
3
- # 读取图片
4
- # 输出图片
5
- # 使用 depth-anything + teed 生成外轮廓
6
- # 使用 teed + 腐蚀算法 生成内边缘
7
  from PIL import Image
8
-
9
  import cv2
10
  import numpy as np
11
  import os
@@ -20,240 +13,106 @@ from depthAnything.depth_anything.dpt import DepthAnything
20
  from depthAnything.depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
21
  import shutil
22
 
23
-
24
  def multiply_blend(image1, image2):
25
- # 将图片转换为浮点数,方便计算
26
- # Ensure image2 has the same shape as image1
27
  image2 = np.stack((image2,) * 3, axis=-1)
28
- # Perform the blending
29
  multiplied = np.multiply(image1 / 255.0, image2 / 255.0) * 255.0
30
  return multiplied.astype(np.uint8)
31
 
32
- # Example usage
33
-
34
-
35
- image1 = np.random.randint(0, 256, (717, 790, 3), dtype=np.uint8)
36
- image2 = np.random.randint(0, 256, (717, 790), dtype=np.uint8)
37
-
38
- result = multiply_blend(image1, image2)
39
- print(result.shape) # Should be (717, 790, 3)
40
-
41
  def screen_blend(image1, image2):
42
- # 将图片转换为浮点数,方便计算
43
  image1 = image1.astype(float)
44
  image2 = image2.astype(float)
45
-
46
- # 执行滤色操作
47
  screened = 1 - (1 - image1 / 255) * (1 - image2 / 255) * 255
48
-
49
- # 将结果转换回uint8
50
  result = np.clip(screened, 0, 255).astype('uint8')
51
  return result
52
 
53
- def erosion(img, kernel_size = 3, iterations = 1, dilate = False):
54
-
55
- # 灰度化
56
  if len(img.shape) == 3:
57
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
58
-
59
- # # 二值化
60
- # _, img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
61
-
62
- # 腐蚀
63
  kernel = np.ones((kernel_size, kernel_size), np.uint8)
64
  if dilate:
65
  img = cv2.dilate(img, kernel, iterations=iterations)
66
  else:
67
  img = cv2.erode(img, kernel, iterations=iterations)
68
-
69
  return img
70
 
71
- def erosion_img_from_path(img_path, output_dir = './output/erosion_img', kernel_size = 3, iterations = 1, dilate = False):
72
- # 读取图片
73
- if os.path.isfile(img_path):
74
- name, extension = os.path.splitext(img_path)
75
- if extension:
76
- if extension.lower() == 'txt':
77
- with open(img_path, 'r',encoding= 'utf-8') as f:
78
- filenames = f.read().splitlines()
79
- elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif']:
80
- filenames = [img_path]
81
- else:
82
- filenames = os.listdir(img_path)
83
- filenames = [os.path.join(img_path, filename) for filename in filenames if not filename.startswith('.') and filename.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif'))]
84
- filenames.sort()
85
-
86
- os.makedirs(output_dir, exist_ok=True)
87
-
88
- for filename in tqdm(filenames):
89
- img = cv2.imread(filename)
90
- img = erosion(img, kernel_size, iterations, dilate)
91
- cv2.imwrite(os.path.join(output_dir, os.path.basename(filename)), img)
92
-
93
 
94
- def copy_file(src, dest):
95
- # 移动文件
96
- source = src
97
- destination = dest
98
- try:
99
- shutil.copy(source, destination)
100
- except IOError as e:
101
- print("Unable to copy file. %s" % e)
102
 
 
 
 
103
 
104
- def guassian_blur_path(img_path, output_dir = './output/guassian_blur', kernel_size = 3, sigmaX = 0):
105
- # 读取图片
106
- if os.path.isfile(img_path):
107
- name, extension = os.path.splitext(img_path)
108
- if extension:
109
- if extension.lower() == 'txt':
110
- with open(img_path, 'r',encoding= 'utf-8') as f:
111
- filenames = f.read().splitlines()
112
- elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif']:
113
- filenames = [img_path]
114
- else:
115
- filenames = os.listdir(img_path)
116
- filenames = [os.path.join(img_path, filename) for filename in filenames if not filename.startswith('.') and filename.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif'))]
117
- filenames.sort()
118
-
119
- os.makedirs(output_dir, exist_ok=True)
120
-
121
- for filename in tqdm(filenames):
122
- img = cv2.imread(filename)
123
- img = cv2.GaussianBlur(img, (kernel_size,kernel_size), sigmaX)
124
- cv2.imwrite(os.path.join(output_dir, os.path.basename(filename)), img)
125
 
126
- def depth_anything(img_path = './input', outdir = './output/depth_anything', encoder = 'vitl', pred_only = True, grayscale = True):
127
- # parser = argparse.ArgumentParser()
128
- # parser.add_argument('--img-path', type=str)
129
- # parser.add_argument('--outdir', type=str, default='./vis_depth')
130
- # parser.add_argument('--encoder', type=str, default='vitl', choices=['vits', 'vitb', 'vitl'])
131
-
132
- # parser.add_argument('--pred-only', dest='pred_only', action='store_true', help='only display the prediction')
133
- # parser.add_argument('--grayscale', dest='grayscale', action='store_true', help='do not apply colorful palette')
134
-
135
- # args = parser.parse_args()
136
-
137
- margin_width = 50
138
- caption_height = 60
139
-
140
- font = cv2.FONT_HERSHEY_SIMPLEX
141
- font_scale = 1
142
- font_thickness = 2
143
-
144
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
145
 
146
  model_configs = {
147
- 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
148
- 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
149
- 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}
150
  }
151
 
152
  depth_anything = DepthAnything(model_configs[encoder])
153
  depth_anything.load_state_dict(torch.load('./checkpoints/depth_anything_{}14.pth'.format(encoder)))
154
  depth_anything = depth_anything.to(DEVICE).eval()
155
-
156
- total_params = sum(param.numel() for param in depth_anything.parameters())
157
- print('Total parameters: {:.2f}M'.format(total_params / 1e6))
158
-
159
  transform = Compose([
160
- Resize(
161
- width=518,
162
- height=518,
163
- resize_target=False,
164
- keep_aspect_ratio=True,
165
- ensure_multiple_of=14,
166
- resize_method='lower_bound',
167
- image_interpolation_method=cv2.INTER_CUBIC,
168
- ),
169
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
170
  PrepareForNet(),
171
  ])
172
-
173
  if os.path.isfile(img_path):
174
- name, extension = os.path.splitext(img_path)
175
- if extension:
176
- if extension.lower() == 'txt':
177
- with open(img_path, 'r',encoding= 'utf-8') as f:
178
- filenames = f.read().splitlines()
179
- elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif']:
180
- filenames = [img_path]
181
- else:
182
- filenames = os.listdir(img_path)
183
- filenames = [os.path.join(img_path, filename) for filename in filenames if not filename.startswith('.') and filename.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif'))]
184
- filenames.sort()
185
-
186
- os.makedirs(outdir, exist_ok=True)
187
-
188
- for filename in tqdm(filenames):
189
- raw_image = cv2.imread(filename)
190
  image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
191
-
192
  h, w = image.shape[:2]
193
-
194
  image = transform({'image': image})['image']
195
  image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
196
-
197
  with torch.no_grad():
198
  depth = depth_anything(image)
199
-
200
  depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
201
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
202
-
203
  depth = depth.cpu().numpy().astype(np.uint8)
204
-
205
  if grayscale:
206
  depth = np.repeat(depth[..., np.newaxis], 3, axis=-1)
207
  else:
208
  depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)
209
-
210
- filename = os.path.basename(filename)
211
-
212
- if pred_only:
213
- cv2.imwrite(os.path.join(outdir, filename[:filename.rfind('.')] + '_depth.png'), depth)
214
- else:
215
- split_region = np.ones((raw_image.shape[0], margin_width, 3), dtype=np.uint8) * 255
216
- combined_results = cv2.hconcat([raw_image, split_region, depth])
217
-
218
- caption_space = np.ones((caption_height, combined_results.shape[1], 3), dtype=np.uint8) * 255
219
- captions = ['Raw image', 'Depth Anything']
220
- segment_width = w + margin_width
221
-
222
- for i, caption in enumerate(captions):
223
- # Calculate text size
224
- text_size = cv2.getTextSize(caption, font, font_scale, font_thickness)[0]
225
-
226
- # Calculate x-coordinate to center the text
227
- text_x = int((segment_width * i) + (w - text_size[0]) / 2)
228
-
229
- # Add text caption
230
- cv2.putText(caption_space, caption, (text_x, 40), font, font_scale, (0, 0, 0), font_thickness)
231
-
232
- final_result = cv2.vconcat([caption_space, combined_results])
233
-
234
- cv2.imwrite(os.path.join(outdir, filename[:filename.rfind('.')] + '_img_depth.png'), final_result)
235
 
236
- def teed_imgs(img_path='./input', outdir='./output/teed_imgs', gaussianBlur=[0, 3, 0]):
237
- os.makedirs(outdir, exist_ok=True)
238
- os.makedirs('teed_tmp', exist_ok=True)
239
-
240
- # 处理单个上传的图片
241
- if os.path.isfile(img_path):
242
- img = cv2.imread(img_path)
243
- if gaussianBlur[0] != 0:
244
- img = cv2.GaussianBlur(img, (gaussianBlur[1], gaussianBlur[1]), gaussianBlur[2])
245
- cv2.imwrite(os.path.join('teed_tmp', 'temp_image.png'), img)
246
- else:
247
- cv2.imwrite(os.path.join('teed_tmp', 'temp_image.png'), img)
248
 
249
- # 使用临时图像文件进行处理
250
- args, train_info = parse_args(is_testing=True, pl_opt_dir=outdir)
251
- args.input_val_dir = 'teed_tmp' # 使用临时目录
252
- teed.main(args, train_info)
253
 
254
- shutil.rmtree('teed_tmp')
 
 
 
 
 
 
 
 
 
255
 
256
- def merge_2_images(img1, img2, mode, erosion_para = [[0,0],[0,0]], dilate = [0,0]): #将 img1 合并至 img2,调整大小与 img2 相同
257
  img1 = cv2.imread(img1)
258
  img2 = cv2.imread(img2)
259
  img1 = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
@@ -265,97 +124,22 @@ def merge_2_images(img1, img2, mode, erosion_para = [[0,0],[0,0]], dilate = [0,0
265
  return multiply_blend(img1, img2)
266
  elif mode == 'screen':
267
  return screen_blend(img1, img2)
268
-
269
- def merge_images_in_2_folder(folder1, folder2, outdir, suffix_need_remove = None, suffix_floder = 0 , mode = 'multiply', erosion_para = [[0,0],[0,0]], dilate = [0,0]): #将 folder1 和 folder2 中的图片合并,可选是否移除某文件夹后缀,可选腐蚀参数[kernel_size,iterations]
270
  os.makedirs(outdir, exist_ok=True)
271
- name_extension_pairs_folder1 = [os.path.splitext(filename) for filename in os.listdir(folder1) if filename.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif'))]
272
  filenames_noext_folder1, extensions_folder1 = zip(*name_extension_pairs_folder1)
273
- name_extension_pairs_folder2 = [os.path.splitext(filename) for filename in os.listdir(folder2) if filename.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp','tif'))]
274
  filenames_noext_folder2, extensions_folder2 = zip(*name_extension_pairs_folder2)
275
- if suffix_need_remove:
276
- if suffix_floder == 0:
277
- filenames_raw = list(filenames_noext_folder1).copy()
278
- filenames_noext_folder1 = [filename[:-len(suffix_need_remove)] + filename[-len(suffix_need_remove):].replace(suffix_need_remove, '') for filename in filenames_noext_folder1]
279
- if suffix_floder == 1:
280
- filenames_raw = list(filenames_noext_folder2).copy()
281
- filenames_noext_folder2 = [filename[:-len(suffix_need_remove)] + filename[-len(suffix_need_remove):].replace(suffix_need_remove, '') for filename in filenames_noext_folder2]
282
 
283
  for index, filename in enumerate(filenames_noext_folder1):
284
  if filename in filenames_noext_folder2:
285
- print(filename)
286
- if suffix_need_remove:
287
- if suffix_floder == 0:
288
- img1 = os.path.join(folder1, filenames_raw[index] + extensions_folder1[index])
289
- img2 = os.path.join(folder2, filename + extensions_folder2[filenames_noext_folder2.index(filename)])
290
- if suffix_floder == 1:
291
- img1 = os.path.join(folder1, filename + extensions_folder1[index])
292
- img2 = os.path.join(folder2, filenames_raw[filenames_noext_folder2.index(filename)] + extensions_folder2[filenames_noext_folder2.index(filename)])
293
- else:
294
- img1 = os.path.join(folder1, filename + extensions_folder1[index])
295
- img2 = os.path.join(folder2, filename + extensions_folder2[filenames_noext_folder2.index(filename)])
296
  result = merge_2_images(img1, img2, mode, erosion_para, dilate)
297
  cv2.imwrite(os.path.join(outdir, filename + extensions_folder1[index]), result)
298
 
299
-
300
- def invert_image(image):
301
- # 将图片从BGR转为灰度图
302
- gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
303
- # 对灰度图进行反转
304
- inverted_image = cv2.bitwise_not(gray_image)
305
- # 将反转后的灰度图转换回BGR格式
306
- inverted_image_bgr = cv2.cvtColor(inverted_image, cv2.COLOR_GRAY2BGR)
307
- return inverted_image_bgr
308
-
309
- def process_images(input_folder='./output/merged_imgs'):
310
- output_folder = os.path.join(os.path.dirname(input_folder), 'output_invert')
311
- os.makedirs(output_folder, exist_ok=True)
312
-
313
- # 获取输入文件夹中的所有图片文件
314
- image_files = [f for f in os.listdir(input_folder) if
315
- f.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
316
-
317
- for image_file in tqdm(image_files):
318
- image_path = os.path.join(input_folder, image_file)
319
- try:
320
- # 使用PIL库读取图像
321
- with Image.open(image_path) as img:
322
- image = np.array(img.convert('RGB'))[:, :, ::-1].copy()
323
- if image is not None:
324
- # 翻转图片
325
- inverted_image = invert_image(image)
326
- # 保存翻转后的图片到输出文件夹
327
- output_path = os.path.join(output_folder, image_file)
328
- cv2.imwrite(output_path, inverted_image)
329
- else:
330
- raise ValueError(f"Failed to read image: {image_file}")
331
- except Exception as e:
332
- print(f"Error processing file {image_file}: {e}")
333
-
334
- def process_line(img_path='./input', outdir='./output'):
335
- # 处理深度图像
336
- depth_anything(img_path, os.path.join(outdir, "depth_anything"))
337
-
338
- # 处理 teed_imgs
339
- teed_imgs(img_path, os.path.join(outdir, "teed_imgs"), [1, 7, 2])
340
-
341
- # 处理深度图的 teed_imgs
342
- teed_imgs(os.path.join(outdir, "depth_anything"), os.path.join(outdir, "dp_teed_imgs"), [0, 7, 2])
343
-
344
- # 合并图像
345
- merge_images_in_2_folder(
346
- os.path.join(outdir, "teed_imgs"),
347
- os.path.join(outdir, "dp_teed_imgs"),
348
- os.path.join(outdir, "merged_imgs"),
349
- '_depth',
350
- 1,
351
- 'multiply',
352
- [[2, 0], [2, 1]],
353
- [1, 0]
354
- )
355
-
356
-
357
  if __name__ == '__main__':
358
- # Gradio 接口代码
359
  import gradio as gr
360
 
361
  def gradio_process_line(img):
@@ -364,10 +148,9 @@ if __name__ == '__main__':
364
 
365
  process_line(img_path, './output')
366
 
367
- output_image_path = './output/teed_imgs/processed_image.png' # 更新为实际输出路径
368
  return Image.open(output_image_path)
369
 
370
- # 定义 Gradio 接口
371
  iface = gr.Interface(
372
  fn=gradio_process_line,
373
  inputs=gr.Image(type="pil"),
@@ -376,9 +159,4 @@ if __name__ == '__main__':
376
  description="Upload an image to process it with depth estimation and edge detection."
377
  )
378
 
379
- # 启动 Gradio 应用
380
  iface.launch()
381
-
382
-
383
-
384
-
 
 
 
 
 
 
 
1
  from PIL import Image
 
2
  import cv2
3
  import numpy as np
4
  import os
 
13
  from depthAnything.depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
14
  import shutil
15
 
 
16
  def multiply_blend(image1, image2):
17
+ # 确保 image2 具有与 image1 相同的形状
 
18
  image2 = np.stack((image2,) * 3, axis=-1)
19
+ # 执行混合操作
20
  multiplied = np.multiply(image1 / 255.0, image2 / 255.0) * 255.0
21
  return multiplied.astype(np.uint8)
22
 
 
 
 
 
 
 
 
 
 
23
  def screen_blend(image1, image2):
 
24
  image1 = image1.astype(float)
25
  image2 = image2.astype(float)
 
 
26
  screened = 1 - (1 - image1 / 255) * (1 - image2 / 255) * 255
 
 
27
  result = np.clip(screened, 0, 255).astype('uint8')
28
  return result
29
 
30
+ def erosion(img, kernel_size=3, iterations=1, dilate=False):
 
 
31
  if len(img.shape) == 3:
32
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 
 
 
 
 
33
  kernel = np.ones((kernel_size, kernel_size), np.uint8)
34
  if dilate:
35
  img = cv2.dilate(img, kernel, iterations=iterations)
36
  else:
37
  img = cv2.erode(img, kernel, iterations=iterations)
 
38
  return img
39
 
40
+ def teed_imgs(img_path='./input', outdir='./output/teed_imgs', gaussianBlur=[0, 3, 0]):
41
+ os.makedirs(outdir, exist_ok=True)
42
+ os.makedirs('teed_tmp', exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ if os.path.isfile(img_path):
45
+ img = cv2.imread(img_path)
46
+ if gaussianBlur[0] != 0:
47
+ img = cv2.GaussianBlur(img, (gaussianBlur[1], gaussianBlur[1]), gaussianBlur[2])
48
+ cv2.imwrite(os.path.join('teed_tmp', 'temp_image.png'), img)
49
+ else:
50
+ cv2.imwrite(os.path.join('teed_tmp', 'temp_image.png'), img)
 
51
 
52
+ args, train_info = parse_args(is_testing=True, pl_opt_dir=outdir)
53
+ args.input_val_dir = 'teed_tmp'
54
+ teed.main(args, train_info)
55
 
56
+ shutil.rmtree('teed_tmp')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ def depth_anything(img_path='./input', outdir='./output/depth_anything', encoder='vitl', pred_only=True, grayscale=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
60
 
61
  model_configs = {
62
+ 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
63
+ 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
64
+ 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}
65
  }
66
 
67
  depth_anything = DepthAnything(model_configs[encoder])
68
  depth_anything.load_state_dict(torch.load('./checkpoints/depth_anything_{}14.pth'.format(encoder)))
69
  depth_anything = depth_anything.to(DEVICE).eval()
70
+
 
 
 
71
  transform = Compose([
72
+ Resize(width=518, height=518, resize_target=False, keep_aspect_ratio=True, ensure_multiple_of=14, resize_method='lower_bound', image_interpolation_method=cv2.INTER_CUBIC),
 
 
 
 
 
 
 
 
73
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
74
  PrepareForNet(),
75
  ])
76
+
77
  if os.path.isfile(img_path):
78
+ raw_image = cv2.imread(img_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
 
80
  h, w = image.shape[:2]
 
81
  image = transform({'image': image})['image']
82
  image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
83
+
84
  with torch.no_grad():
85
  depth = depth_anything(image)
86
+
87
  depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
88
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
 
89
  depth = depth.cpu().numpy().astype(np.uint8)
90
+
91
  if grayscale:
92
  depth = np.repeat(depth[..., np.newaxis], 3, axis=-1)
93
  else:
94
  depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ filename = os.path.basename(img_path)
97
+ cv2.imwrite(os.path.join(outdir, filename[:filename.rfind('.')] + '_depth.png'), depth)
 
 
 
 
 
 
 
 
 
 
98
 
99
+ def process_line(img_path='./input', outdir='./output'):
100
+ depth_anything(img_path, os.path.join(outdir, "depth_anything"))
101
+ teed_imgs(img_path, os.path.join(outdir, "teed_imgs"), [1, 7, 2])
102
+ teed_imgs(os.path.join(outdir, "depth_anything"), os.path.join(outdir, "dp_teed_imgs"), [0, 7, 2])
103
 
104
+ merge_images_in_2_folder(
105
+ os.path.join(outdir, "teed_imgs"),
106
+ os.path.join(outdir, "dp_teed_imgs"),
107
+ os.path.join(outdir, "merged_imgs"),
108
+ '_depth',
109
+ 1,
110
+ 'multiply',
111
+ [[2, 0], [2, 1]],
112
+ [1, 0]
113
+ )
114
 
115
+ def merge_2_images(img1, img2, mode, erosion_para=[[0, 0], [0, 0]], dilate=[0, 0]):
116
  img1 = cv2.imread(img1)
117
  img2 = cv2.imread(img2)
118
  img1 = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
 
124
  return multiply_blend(img1, img2)
125
  elif mode == 'screen':
126
  return screen_blend(img1, img2)
127
+
128
+ def merge_images_in_2_folder(folder1, folder2, outdir, suffix_need_remove=None, suffix_floder=0, mode='multiply', erosion_para=[[0, 0], [0, 0]], dilate=[0, 0]):
129
  os.makedirs(outdir, exist_ok=True)
130
+ name_extension_pairs_folder1 = [os.path.splitext(filename) for filename in os.listdir(folder1) if filename.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
131
  filenames_noext_folder1, extensions_folder1 = zip(*name_extension_pairs_folder1)
132
+ name_extension_pairs_folder2 = [os.path.splitext(filename) for filename in os.listdir(folder2) if filename.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
133
  filenames_noext_folder2, extensions_folder2 = zip(*name_extension_pairs_folder2)
 
 
 
 
 
 
 
134
 
135
  for index, filename in enumerate(filenames_noext_folder1):
136
  if filename in filenames_noext_folder2:
137
+ img1 = os.path.join(folder1, filename + extensions_folder1[index])
138
+ img2 = os.path.join(folder2, filename + extensions_folder2[filenames_noext_folder2.index(filename)])
 
 
 
 
 
 
 
 
 
139
  result = merge_2_images(img1, img2, mode, erosion_para, dilate)
140
  cv2.imwrite(os.path.join(outdir, filename + extensions_folder1[index]), result)
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  if __name__ == '__main__':
 
143
  import gradio as gr
144
 
145
  def gradio_process_line(img):
 
148
 
149
  process_line(img_path, './output')
150
 
151
+ output_image_path = './output/merged_imgs/temp_input.png' # 更新为实际输出路径
152
  return Image.open(output_image_path)
153
 
 
154
  iface = gr.Interface(
155
  fn=gradio_process_line,
156
  inputs=gr.Image(type="pil"),
 
159
  description="Upload an image to process it with depth estimation and edge detection."
160
  )
161
 
 
162
  iface.launch()