plutosss commited on
Commit
6186433
·
verified ·
1 Parent(s): 9560b5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +381 -57
app.py CHANGED
@@ -1,20 +1,158 @@
 
 
 
 
 
 
1
  from PIL import Image
 
2
  import cv2
 
3
  import numpy as np
 
 
4
  import torch
5
  import torch.nn.functional as F
6
  from torchvision.transforms import Compose
7
- import shutil
8
- import os
9
-
 
10
 
11
  from depthAnything.depth_anything.dpt import DepthAnything
12
  from depthAnything.depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
13
- from TEED.main import parse_args, main
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # 深度处理函数
16
- def depth_anything_image(image, encoder='vitl', pred_only=True, grayscale=True):
17
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
 
18
  model_configs = {
19
  'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
20
  'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
@@ -22,77 +160,263 @@ def depth_anything_image(image, encoder='vitl', pred_only=True, grayscale=True):
22
  }
23
 
24
  depth_anything = DepthAnything(model_configs[encoder])
25
- depth_anything.load_state_dict(torch.load(f'./checkpoints/depth_anything_{encoder}14.pth'))
26
  depth_anything = depth_anything.to(DEVICE).eval()
27
 
 
 
 
28
  transform = Compose([
29
- Resize(width=518, height=518, resize_target=False, keep_aspect_ratio=True,
30
- ensure_multiple_of=14, resize_method='lower_bound', image_interpolation_method=cv2.INTER_CUBIC),
 
 
 
 
 
 
 
31
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
32
  PrepareForNet(),
33
  ])
34
 
35
- raw_image = np.array(image.convert('RGB'))[:, :, ::-1].copy() # RGB to BGR
36
- h, w = raw_image.shape[:2]
37
- image_tensor = transform({'image': raw_image / 255.0})['image']
38
- image_tensor = torch.from_numpy(image_tensor).unsqueeze(0).to(DEVICE)
 
 
 
 
 
 
 
 
 
 
39
 
40
- with torch.no_grad():
41
- depth = depth_anything(image_tensor)
42
 
43
- depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
44
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
45
- depth = depth.cpu().numpy().astype(np.uint8)
46
 
47
- return np.repeat(depth[..., np.newaxis], 3, axis=-1) if grayscale else cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)
48
 
49
- # TEED 图像处理函数
50
- def teed_process_image(image):
51
- os.makedirs('./output/teed_imgs', exist_ok=True)
52
- os.makedirs('./teed_tmp', exist_ok=True)
53
 
54
- temp_image_path = './teed_tmp/temp_image.png'
55
- cv2.imwrite(temp_image_path, np.array(image))
56
 
57
- args, train_info = parse_args(is_testing=True, pl_opt_dir='./output/teed_imgs')
58
- args.input_val_dir = './teed_tmp'
59
- args.output_dir = './output/teed_imgs'
60
 
61
- checkpoint_path = './TEED/checkpoints/BIPED/5/5_model.pth'
62
- if not os.path.exists(checkpoint_path):
63
- raise FileNotFoundError(f"Checkpoint file not found: {checkpoint_path}")
64
-
65
- args.checkpoint_data = checkpoint_path # 确保使用正确的路径
66
 
67
- main(args, train_info)
 
 
 
68
 
69
- shutil.rmtree('./teed_tmp')
70
- return cv2.imread(os.path.join('./output/teed_imgs', 'processed_image.png'))
71
 
 
 
 
 
 
72
 
 
 
 
73
 
74
- # 处理单个图像
75
- def process_single_image(image):
76
- depth_result = depth_anything_image(image, 'vitl')
77
- teed_result = teed_process_image(image)
78
- merged_result = multiply_blend(depth_result, teed_result)
79
- return merged_result
80
 
81
- # Gradio 界面处理函数
82
- def gradio_process_line(img):
83
- processed_image = process_single_image(img)
84
- return Image.fromarray(processed_image)
85
 
86
- # Gradio 界面
87
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- iface = gr.Interface(
90
- fn=gradio_process_line,
91
- inputs=gr.Image(type="pil"),
92
- outputs=gr.Image(type="pil"),
93
- title="Image Processing with Depth Anything and TEED",
94
- description="Upload an image to process it with depth estimation and edge detection."
95
- )
96
 
97
- # 启动 Gradio 应用
98
- iface.launch()
 
1
+ # 部署 teed、depth-anything
2
+ # 腐蚀算法
3
+ # 读取图片
4
+ # 输出图片
5
+ # 使用 depth-anything + teed 生成外轮廓
6
+ # 使用 teed + 腐蚀算法 生成内边缘
7
  from PIL import Image
8
+
9
  import cv2
10
+ import cv2_ext
11
  import numpy as np
12
+ import gradio as gr
13
+ import os
14
  import torch
15
  import torch.nn.functional as F
16
  from torchvision.transforms import Compose
17
+ from tqdm import tqdm
18
+ import TEED.main as teed
19
+ from TEED.main import parse_args
20
+ import logging
21
 
22
  from depthAnything.depth_anything.dpt import DepthAnything
23
  from depthAnything.depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
24
+ import shutil
25
+
26
+ def multiply_blend(image1, image2):
27
+ # 将图片转换为浮点数,方便计算
28
+ # Ensure image2 has the same shape as image1
29
+ image2 = np.stack((image2,) * 3, axis=-1)
30
+ # Perform the blending
31
+ multiplied = np.multiply(image1 / 255.0, image2 / 255.0) * 255.0
32
+ return multiplied.astype(np.uint8)
33
+
34
+ # Example usage
35
+
36
+
37
+ image1 = np.random.randint(0, 256, (717, 790, 3), dtype=np.uint8)
38
+ image2 = np.random.randint(0, 256, (717, 790), dtype=np.uint8)
39
+
40
+ result = multiply_blend(image1, image2)
41
+ print(result.shape) # Should be (717, 790, 3)
42
+
43
+
44
+ def screen_blend(image1, image2):
45
+ # 将图片转换为浮点数,方便计算
46
+ image1 = image1.astype(float)
47
+ image2 = image2.astype(float)
48
+
49
+ # 执行滤色操作
50
+ screened = 1 - (1 - image1 / 255) * (1 - image2 / 255) * 255
51
+
52
+ # 将结果转换回uint8
53
+ result = np.clip(screened, 0, 255).astype('uint8')
54
+ return result
55
+
56
+
57
+ def erosion(img, kernel_size=3, iterations=1, dilate=False):
58
+ # 灰度化
59
+ if len(img.shape) == 3:
60
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
61
+
62
+ # # 二值化
63
+ # _, img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
64
+
65
+ # 腐蚀
66
+ kernel = np.ones((kernel_size, kernel_size), np.uint8)
67
+ if dilate:
68
+ img = cv2.dilate(img, kernel, iterations=iterations)
69
+ else:
70
+ img = cv2.erode(img, kernel, iterations=iterations)
71
+
72
+ return img
73
+
74
+
75
+ def erosion_img_from_path(img_path, output_dir='./output/erosion_img', kernel_size=3, iterations=1, dilate=False):
76
+ # 读取图片
77
+ if os.path.isfile(img_path):
78
+ name, extension = os.path.splitext(img_path)
79
+ if extension:
80
+ if extension.lower() == 'txt':
81
+ with open(img_path, 'r', encoding='utf-8') as f:
82
+ filenames = f.read().splitlines()
83
+ elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif']:
84
+ filenames = [img_path]
85
+ else:
86
+ filenames = os.listdir(img_path)
87
+ filenames = [os.path.join(img_path, filename) for filename in filenames if
88
+ not filename.startswith('.') and filename.lower().endswith(
89
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
90
+ filenames.sort()
91
+
92
+ os.makedirs(output_dir, exist_ok=True)
93
+
94
+ for filename in tqdm(filenames):
95
+ img = cv2.imread(filename)
96
+ img = erosion(img, kernel_size, iterations, dilate)
97
+ cv2.imwrite(os.path.join(output_dir, os.path.basename(filename)), img)
98
+
99
+
100
+ def copy_file(src, dest):
101
+ # 移动文件
102
+ source = src
103
+ destination = dest
104
+ try:
105
+ shutil.copy(source, destination)
106
+ except IOError as e:
107
+ print("Unable to copy file. %s" % e)
108
+
109
+
110
+ def guassian_blur_path(img_path, output_dir='./output/guassian_blur', kernel_size=3, sigmaX=0):
111
+ # 读取图片
112
+ if os.path.isfile(img_path):
113
+ name, extension = os.path.splitext(img_path)
114
+ if extension:
115
+ if extension.lower() == 'txt':
116
+ with open(img_path, 'r', encoding='utf-8') as f:
117
+ filenames = f.read().splitlines()
118
+ elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif']:
119
+ filenames = [img_path]
120
+ else:
121
+ filenames = os.listdir(img_path)
122
+ filenames = [os.path.join(img_path, filename) for filename in filenames if
123
+ not filename.startswith('.') and filename.lower().endswith(
124
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
125
+ filenames.sort()
126
+
127
+ os.makedirs(output_dir, exist_ok=True)
128
+
129
+ for filename in tqdm(filenames):
130
+ img = cv2.imread(filename)
131
+ img = cv2.GaussianBlur(img, (kernel_size, kernel_size), sigmaX)
132
+ cv2.imwrite(os.path.join(output_dir, os.path.basename(filename)), img)
133
+
134
+
135
+ def depth_anything(img_path='./input', outdir='./output/depth_anything', encoder='vitl', pred_only=True,
136
+ grayscale=True):
137
+ # parser = argparse.ArgumentParser()
138
+ # parser.add_argument('--img-path', type=str)
139
+ # parser.add_argument('--outdir', type=str, default='./vis_depth')
140
+ # parser.add_argument('--encoder', type=str, default='vitl', choices=['vits', 'vitb', 'vitl'])
141
+
142
+ # parser.add_argument('--pred-only', dest='pred_only', action='store_true', help='only display the prediction')
143
+ # parser.add_argument('--grayscale', dest='grayscale', action='store_true', help='do not apply colorful palette')
144
+
145
+ # args = parser.parse_args()
146
+
147
+ margin_width = 50
148
+ caption_height = 60
149
+
150
+ font = cv2.FONT_HERSHEY_SIMPLEX
151
+ font_scale = 1
152
+ font_thickness = 2
153
 
 
 
154
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
155
+
156
  model_configs = {
157
  'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
158
  'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
 
160
  }
161
 
162
  depth_anything = DepthAnything(model_configs[encoder])
163
+ depth_anything.load_state_dict(torch.load('./checkpoints/depth_anything_{}14.pth'.format(encoder)))
164
  depth_anything = depth_anything.to(DEVICE).eval()
165
 
166
+ total_params = sum(param.numel() for param in depth_anything.parameters())
167
+ print('Total parameters: {:.2f}M'.format(total_params / 1e6))
168
+
169
  transform = Compose([
170
+ Resize(
171
+ width=518,
172
+ height=518,
173
+ resize_target=False,
174
+ keep_aspect_ratio=True,
175
+ ensure_multiple_of=14,
176
+ resize_method='lower_bound',
177
+ image_interpolation_method=cv2.INTER_CUBIC,
178
+ ),
179
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
180
  PrepareForNet(),
181
  ])
182
 
183
+ if os.path.isfile(img_path):
184
+ name, extension = os.path.splitext(img_path)
185
+ if extension:
186
+ if extension.lower() == 'txt':
187
+ with open(img_path, 'r', encoding='utf-8') as f:
188
+ filenames = f.read().splitlines()
189
+ elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif']:
190
+ filenames = [img_path]
191
+ else:
192
+ filenames = os.listdir(img_path)
193
+ filenames = [os.path.join(img_path, filename) for filename in filenames if
194
+ not filename.startswith('.') and filename.lower().endswith(
195
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
196
+ filenames.sort()
197
 
198
+ os.makedirs(outdir, exist_ok=True)
 
199
 
200
+ for filename in tqdm(filenames):
201
+ raw_image = cv2.imread(filename)
202
+ image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
203
 
204
+ h, w = image.shape[:2]
205
 
206
+ image = transform({'image': image})['image']
207
+ image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
 
 
208
 
209
+ with torch.no_grad():
210
+ depth = depth_anything(image)
211
 
212
+ depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
213
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
 
214
 
215
+ depth = depth.cpu().numpy().astype(np.uint8)
 
 
 
 
216
 
217
+ if grayscale:
218
+ depth = np.repeat(depth[..., np.newaxis], 3, axis=-1)
219
+ else:
220
+ depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)
221
 
222
+ filename = os.path.basename(filename)
 
223
 
224
+ if pred_only:
225
+ cv2.imwrite(os.path.join(outdir, filename[:filename.rfind('.')] + '_depth.png'), depth)
226
+ else:
227
+ split_region = np.ones((raw_image.shape[0], margin_width, 3), dtype=np.uint8) * 255
228
+ combined_results = cv2.hconcat([raw_image, split_region, depth])
229
 
230
+ caption_space = np.ones((caption_height, combined_results.shape[1], 3), dtype=np.uint8) * 255
231
+ captions = ['Raw image', 'Depth Anything']
232
+ segment_width = w + margin_width
233
 
234
+ for i, caption in enumerate(captions):
235
+ # Calculate text size
236
+ text_size = cv2.getTextSize(caption, font, font_scale, font_thickness)[0]
 
 
 
237
 
238
+ # Calculate x-coordinate to center the text
239
+ text_x = int((segment_width * i) + (w - text_size[0]) / 2)
 
 
240
 
241
+ # Add text caption
242
+ cv2.putText(caption_space, caption, (text_x, 40), font, font_scale, (0, 0, 0), font_thickness)
243
+
244
+ final_result = cv2.vconcat([caption_space, combined_results])
245
+
246
+ cv2.imwrite(os.path.join(outdir, filename[:filename.rfind('.')] + '_img_depth.png'), final_result)
247
+
248
+
249
+ def teed_imgs(img_path='./input', outdir='./output/teed_imgs', gaussianBlur=[0, 3, 0]):
250
+ args, train_info = parse_args(is_testing=True, pl_opt_dir=outdir)
251
+ os.makedirs('teed_tmp', exist_ok=True)
252
+ if os.path.isfile(img_path):
253
+ name, extension = os.path.splitext(img_path)
254
+ if extension:
255
+ if extension.lower() == 'txt':
256
+ with open(img_path, 'r', encoding='utf-8') as f:
257
+ filenames = f.read().splitlines()
258
+ elif extension.lower() in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif']:
259
+ filenames = [img_path]
260
+ else:
261
+ filenames = os.listdir(img_path)
262
+ filenames = [os.path.join(img_path, filename) for filename in filenames if
263
+ not filename.startswith('.') and filename.lower().endswith(
264
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
265
+ filenames.sort()
266
+ for filename in tqdm(filenames):
267
+ if gaussianBlur[0] != 0:
268
+ img = cv2.imread(filename)
269
+ img = cv2.GaussianBlur(img, (gaussianBlur[1], gaussianBlur[1]), gaussianBlur[2])
270
+ cv2.imwrite(os.path.join('teed_tmp', os.path.basename(filename)), img)
271
+ else:
272
+ copy_file(filename, 'teed_tmp')
273
+ teed.main(args, train_info)
274
+ shutil.rmtree('teed_tmp')
275
+
276
+
277
+ def merge_2_images(img1, img2, mode, erosion_para=[[0, 0], [0, 0]], dilate=[0, 0]): # 将 img1 合并至 img2,调整大小与 img2 相同
278
+ img1 = cv2.imread(img1)
279
+ img2 = cv2.imread(img2)
280
+ img1 = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
281
+ if erosion_para[0][1] != 0:
282
+ img1 = erosion(img1, erosion_para[0][0], erosion_para[0][1], dilate[0])
283
+ if erosion_para[1][1] != 0:
284
+ img2 = erosion(img2, erosion_para[1][0], erosion_para[1][1], dilate[1])
285
+ if mode == 'multiply':
286
+ return multiply_blend(img1, img2)
287
+ elif mode == 'screen':
288
+ return screen_blend(img1, img2)
289
+
290
+
291
+ def merge_images_in_2_folder(folder1, folder2, outdir, suffix_need_remove=None, suffix_floder=0, mode='multiply',
292
+ erosion_para=[[0, 0], [0, 0]],
293
+ dilate=[0, 0]): # 将 folder1 和 folder2 中的图片合并,可选是否移除某文件夹后缀,可选腐蚀参数[kernel_size,iterations]
294
+ os.makedirs(outdir, exist_ok=True)
295
+ name_extension_pairs_folder1 = [os.path.splitext(filename) for filename in os.listdir(folder1) if filename.endswith(
296
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
297
+ filenames_noext_folder1, extensions_folder1 = zip(*name_extension_pairs_folder1)
298
+ name_extension_pairs_folder2 = [os.path.splitext(filename) for filename in os.listdir(folder2) if filename.endswith(
299
+ ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
300
+ filenames_noext_folder2, extensions_folder2 = zip(*name_extension_pairs_folder2)
301
+ if suffix_need_remove:
302
+ if suffix_floder == 0:
303
+ filenames_raw = list(filenames_noext_folder1).copy()
304
+ filenames_noext_folder1 = [
305
+ filename[:-len(suffix_need_remove)] + filename[-len(suffix_need_remove):].replace(suffix_need_remove,
306
+ '') for filename in
307
+ filenames_noext_folder1]
308
+ if suffix_floder == 1:
309
+ filenames_raw = list(filenames_noext_folder2).copy()
310
+ filenames_noext_folder2 = [
311
+ filename[:-len(suffix_need_remove)] + filename[-len(suffix_need_remove):].replace(suffix_need_remove,
312
+ '') for filename in
313
+ filenames_noext_folder2]
314
+
315
+ for index, filename in enumerate(filenames_noext_folder1):
316
+ if filename in filenames_noext_folder2:
317
+ print(filename)
318
+ if suffix_need_remove:
319
+ if suffix_floder == 0:
320
+ img1 = os.path.join(folder1, filenames_raw[index] + extensions_folder1[index])
321
+ img2 = os.path.join(folder2, filename + extensions_folder2[filenames_noext_folder2.index(filename)])
322
+ if suffix_floder == 1:
323
+ img1 = os.path.join(folder1, filename + extensions_folder1[index])
324
+ img2 = os.path.join(folder2,
325
+ filenames_raw[filenames_noext_folder2.index(filename)] + extensions_folder2[
326
+ filenames_noext_folder2.index(filename)])
327
+ else:
328
+ img1 = os.path.join(folder1, filename + extensions_folder1[index])
329
+ img2 = os.path.join(folder2, filename + extensions_folder2[filenames_noext_folder2.index(filename)])
330
+ result = merge_2_images(img1, img2, mode, erosion_para, dilate)
331
+ cv2.imwrite(os.path.join(outdir, filename + extensions_folder1[index]), result)
332
+
333
+
334
+ def invert_image(image):
335
+ # 将图片从BGR转为灰度图
336
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
337
+ # 对灰度图进行反转
338
+ inverted_image = cv2.bitwise_not(gray_image)
339
+ # 将反转后的灰度图转换回BGR格式
340
+ inverted_image_bgr = cv2.cvtColor(inverted_image, cv2.COLOR_GRAY2BGR)
341
+ return inverted_image_bgr
342
+
343
+ def process_images(input_folder='./output/merged_imgs'):
344
+ output_folder = os.path.join(os.path.dirname(input_folder), 'output_invert')
345
+ os.makedirs(output_folder, exist_ok=True)
346
+
347
+ # 获取输入文件夹中的所有图片文件
348
+ image_files = [f for f in os.listdir(input_folder) if
349
+ f.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp', 'tif'))]
350
+
351
+ for image_file in tqdm(image_files):
352
+ image_path = os.path.join(input_folder, image_file)
353
+ try:
354
+ # 使用PIL库读取图像
355
+ with Image.open(image_path) as img:
356
+ image = np.array(img.convert('RGB'))[:, :, ::-1].copy()
357
+ if image is not None:
358
+ # 翻转图片
359
+ inverted_image = invert_image(image)
360
+ # 保存翻转后的图片到输出文件夹
361
+ output_path = os.path.join(output_folder, image_file)
362
+ cv2.imwrite(output_path, inverted_image)
363
+ else:
364
+ raise ValueError(f"Failed to read image: {image_file}")
365
+ except Exception as e:
366
+ print(f"Error processing file {image_file}: {e}")
367
+ def process_line(input_files, output_folder):
368
+ try:
369
+ # 创建输出文件夹(如果不存在)
370
+ os.makedirs(output_folder, exist_ok=True)
371
+
372
+ # 存储处理后的图片路径
373
+ processed_images = []
374
+
375
+ # 遍历所有输入文件
376
+ for img_path in input_files:
377
+ img_path = img_path.name # 获取文件路径
378
+
379
+ # 处理图片的函数调用
380
+ depth_anything(img_path, os.path.join(output_folder, "depth_anything"))
381
+ teed_imgs(img_path, os.path.join(output_folder, "teed_imgs"), [1, 7, 2])
382
+ teed_imgs(os.path.join(output_folder, "depth_anything"), os.path.join(output_folder, "dp_teed_imgs"), [0, 7, 2])
383
+ merge_images_in_2_folder(os.path.join(output_folder, "teed_imgs"),
384
+ os.path.join(output_folder, "dp_teed_imgs"),
385
+ os.path.join(output_folder, "merged_imgs"),
386
+ '_depth', 1, 'multiply', [[2, 0], [2, 1]], [1, 0])
387
+ process_images(os.path.join(output_folder, "merged_imgs"))
388
+
389
+ # 获取 merged_imgs 中的图片路径
390
+ processed_images = [os.path.join(output_folder, "merged_imgs", f)
391
+ for f in os.listdir(os.path.join(output_folder, "merged_imgs"))
392
+ if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
393
+
394
+ return processed_images, "" # 返回处理后的图片路径和空错误信息
395
+ except Exception as e:
396
+ return [], f"发生错误: {str(e)}" # 返回空图片和错误信息
397
+
398
+
399
+ def launch_interface():
400
+ with gr.Blocks() as demo:
401
+ # 允许用户选择多张图片
402
+ input_files = gr.File(label="选择输入图片", file_count="multiple", type="filepath")
403
+
404
+ # 手动输入输出文件夹路径
405
+ output_folder = gr.Textbox(label="输出文件夹路径", placeholder="请输入输出文件夹路径", lines=1)
406
+
407
+ submit_button = gr.Button("开始处理")
408
+
409
+ # 显示处理后的图片
410
+ output_gallery = gr.Gallery(label="处理后的图片", show_label=True, height='auto')
411
+
412
+ # 显示错误信息
413
+ error_text = gr.Textbox(label="错误信息", interactive=False, visible=False)
414
+
415
+ # 点击按钮时调用 process_line 函数
416
+ submit_button.click(process_line, inputs=[input_files, output_folder], outputs=[output_gallery, error_text])
417
+
418
+ demo.launch(share=True)
419
 
 
 
 
 
 
 
 
420
 
421
+ if __name__ == "__main__":
422
+ launch_interface()