yyang181 commited on
Commit
c2a5690
·
1 Parent(s): d01f62c
Files changed (4) hide show
  1. .gitignore +2 -0
  2. app.py +526 -138
  3. inference/data/video_reader.py +4 -0
  4. requirements.txt +9 -1
.gitignore CHANGED
@@ -9,6 +9,8 @@ wandb/
9
  pretrain/
10
  Pytorch-Correlation-extension/
11
  result
 
 
12
 
13
  # Byte-compiled / optimized / DLL files
14
  __pycache__/
 
9
  pretrain/
10
  Pytorch-Correlation-extension/
11
  result
12
+ src/
13
+ DINOv2FeatureV6_LocalAtten_s2_154000.pth
14
 
15
  # Byte-compiled / optimized / DLL files
16
  __pycache__/
app.py CHANGED
@@ -1,147 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
 
 
5
  import torch
6
- import spaces
7
-
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
-
10
- if torch.cuda.is_available():
11
- torch.cuda.max_memory_allocated(device=device)
12
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
13
- pipe.enable_xformers_memory_efficient_attention()
14
- pipe = pipe.to(device)
15
- else:
16
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
17
- pipe = pipe.to(device)
18
-
19
- MAX_SEED = np.iinfo(np.int32).max
20
- MAX_IMAGE_SIZE = 1024
21
-
22
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
23
-
24
- if randomize_seed:
25
- seed = random.randint(0, MAX_SEED)
26
-
27
- generator = torch.Generator().manual_seed(seed)
28
-
29
- image = pipe(
30
- prompt = prompt,
31
- negative_prompt = negative_prompt,
32
- guidance_scale = guidance_scale,
33
- num_inference_steps = num_inference_steps,
34
- width = width,
35
- height = height,
36
- generator = generator
37
- ).images[0]
38
-
39
- return image
40
-
41
- examples = [
42
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
43
- "An astronaut riding a green horse",
44
- "A delicious ceviche cheesecake slice",
45
- ]
46
-
47
- css="""
48
- #col-container {
49
- margin: 0 auto;
50
- max-width: 520px;
51
- }
52
  """
53
 
54
- if torch.cuda.is_available():
55
- power_device = "GPU"
56
- else:
57
- power_device = "CPU"
58
-
59
- with gr.Blocks(css=css) as demo:
60
-
61
- with gr.Column(elem_id="col-container"):
62
- gr.Markdown(f"""
63
- # Text-to-Image Gradio Template
64
- Currently running on {power_device}.
65
- """)
66
-
67
- with gr.Row():
68
-
69
- prompt = gr.Text(
70
- label="Prompt",
71
- show_label=False,
72
- max_lines=1,
73
- placeholder="Enter your prompt",
74
- container=False,
75
- )
76
-
77
- run_button = gr.Button("Run", scale=0)
78
-
79
- result = gr.Image(label="Result", show_label=False)
80
-
81
- with gr.Accordion("Advanced Settings", open=False):
82
-
83
- negative_prompt = gr.Text(
84
- label="Negative prompt",
85
- max_lines=1,
86
- placeholder="Enter a negative prompt",
87
- visible=False,
88
- )
89
-
90
- seed = gr.Slider(
91
- label="Seed",
92
- minimum=0,
93
- maximum=MAX_SEED,
94
- step=1,
95
- value=0,
96
- )
97
-
98
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
-
100
- with gr.Row():
101
-
102
- width = gr.Slider(
103
- label="Width",
104
- minimum=256,
105
- maximum=MAX_IMAGE_SIZE,
106
- step=32,
107
- value=512,
108
- )
109
-
110
- height = gr.Slider(
111
- label="Height",
112
- minimum=256,
113
- maximum=MAX_IMAGE_SIZE,
114
- step=32,
115
- value=512,
116
- )
117
-
118
- with gr.Row():
119
-
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0,
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=12,
132
- step=1,
133
- value=2,
134
- )
135
-
136
- gr.Examples(
137
- examples = examples,
138
- inputs = [prompt]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- run_button.click(
142
- fn = infer,
143
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
144
- outputs = [result]
 
 
 
 
 
 
 
 
 
 
 
 
145
  )
146
 
147
- demo.queue().launch()
 
 
 
 
 
 
 
1
+ # app.py (aligned to main.py logic; keeps debug hooks; Gradio-safe DataLoader)
2
+ # Inputs: (1) Black-and-white video (mp4/webm/avi) (2) Reference image (RGB)
3
+ # Output: Colored video (mp4)
4
+ #
5
+ # Model checkpoint is HARD-CODED as required:
6
+ # https://github.com/yyang181/colormnet/releases/download/v0.1/DINOv2FeatureV6_LocalAtten_s2_154000.pth
7
+
8
+ import os
9
+ import sys
10
+ import shutil
11
+ import subprocess
12
+ import uuid
13
+ import urllib.request
14
+ import warnings
15
+ from os import path
16
+
17
+ warnings.filterwarnings("ignore", message="The detected CUDA version .* minor version mismatch")
18
+ warnings.filterwarnings("ignore", message="There are no g\\+\\+ version bounds defined for CUDA version.*")
19
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch.utils.cpp_extension")
20
+ os.environ.setdefault("TORCH_COMPILE_DISABLE", "1")
21
+ os.environ.setdefault("MAX_JOBS", "1")
22
+
23
  import gradio as gr
24
+ import spaces # ZeroGPU decorator
25
  import numpy as np
26
+ from PIL import Image
27
+ import cv2
28
+ import traceback
29
+
30
  import torch
31
+ import torch.nn.functional as F
32
+ from torch.utils.data import DataLoader
33
+
34
+ # ---- Project imports ----
35
+ from inference.data.test_datasets import DAVISTestDataset_221128_TransColorization_batch
36
+ from inference.data.mask_mapper import MaskMapper
37
+ from model.network import ColorMNet
38
+ from inference.inference_core import InferenceCore
39
+ from dataset.range_transform import inv_lll2rgb_trans
40
+ from skimage import color
41
+
42
+ # ----------------- CONFIG -----------------
43
+ CHECKPOINT_URL = "https://github.com/yyang181/colormnet/releases/download/v0.1/DINOv2FeatureV6_LocalAtten_s2_154000.pth"
44
+ CHECKPOINT_LOCAL = "DINOv2FeatureV6_LocalAtten_s2_154000.pth"
45
+
46
+ TITLE = "ColorMNet — ZeroGPU (CUDA-only) Video Colorization with Reference Image"
47
+ DESC = """
48
+ 上传**黑白视频**与**参考图像**,点击“开始着色”。
49
+ Space **仅在 ZeroGPU(CUDA)** 上运行;若未分配到 GPU,会报错提示。
50
+ 模型权重已固定链接(如需修改,请编辑 `CHECKPOINT_URL`)。
51
+ **数据集结构:**
52
+ - 抽帧 -> `./colormnet_run_<UUID>/input_video/<视频名不含扩展>/00000.png...`
53
+ - 参考图 -> `./colormnet_run_<UUID>/input_ref/<视频名不含扩展>/ref.png`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  """
55
 
56
+ torch.set_grad_enabled(False)
57
+
58
+ # ----------------- DEBUG (kept) -----------------
59
+ def _enable_runtime_debug():
60
+ os.environ["CUDA_LAUNCH_BLOCKING"] = "1" # 同步执行,定位准确
61
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" # 显示 C++ 栈
62
+ os.environ["PYTORCH_JIT"] = "0" # 关闭 JIT
63
+ try:
64
+ torch.autograd.set_detect_anomaly(True) # 捕捉无效 op/grad
65
+ except Exception:
66
+ pass
67
+
68
+ # ----------------- PATH/DIR UTILS -----------------
69
+ def ensure_clean_dir(d: str):
70
+ if path.exists(d):
71
+ if path.isdir(d):
72
+ return
73
+ else:
74
+ os.remove(d)
75
+ os.makedirs(d, exist_ok=True)
76
+
77
+ # ----------------- MISC UTILS -----------------
78
+ def ensure_checkpoint():
79
+ if not path.exists(CHECKPOINT_LOCAL):
80
+ print(f"[INFO] Downloading checkpoint from: {CHECKPOINT_URL}")
81
+ urllib.request.urlretrieve(CHECKPOINT_URL, CHECKPOINT_LOCAL)
82
+ print("[INFO] Checkpoint downloaded:", CHECKPOINT_LOCAL)
83
+
84
+ def detach_to_cpu(x: torch.Tensor) -> torch.Tensor:
85
+ return x.detach().cpu()
86
+
87
+ def tensor_to_np_float(image: torch.Tensor) -> np.ndarray:
88
+ image_np = image.numpy().astype("float32")
89
+ return image_np
90
+
91
+ def lab2rgb_transform_PIL(mask: torch.Tensor) -> np.ndarray:
92
+ mask_d = detach_to_cpu(mask)
93
+ mask_d = inv_lll2rgb_trans(mask_d)
94
+ im = tensor_to_np_float(mask_d)
95
+ if len(im.shape) == 3:
96
+ im = im.transpose((1, 2, 0))
97
+ else:
98
+ im = im[:, :, None]
99
+ im = color.lab2rgb(im)
100
+ return im.clip(0, 1)
101
+
102
+ # ---------- extract frames: dataset-root/<video_stem>/00000.png ----------
103
+ def video_to_dataset_root(video_path: str, dataset_root: str):
104
+ """
105
+ 将单个视频抽帧到 dataset_root/<video_stem>/00000.png...
106
+ 返回: (subdir_path, video_stem, width, height, fps, frame_count)
107
+ """
108
+ ensure_clean_dir(dataset_root)
109
+ basename = path.basename(video_path)
110
+ stem, _ = path.splitext(basename)
111
+ subdir = path.join(dataset_root, stem)
112
+ ensure_clean_dir(subdir)
113
+
114
+ cap = cv2.VideoCapture(video_path)
115
+ assert cap.isOpened(), f"Cannot open video: {video_path}"
116
+
117
+ fps = cap.get(cv2.CAP_PROP_FPS)
118
+ if not fps or fps <= 0:
119
+ fps = 25.0
120
+
121
+ idx = 0
122
+ w = h = None
123
+
124
+ while True:
125
+ ret, frame = cap.read()
126
+ if not ret:
127
+ break
128
+ if frame is None:
129
+ continue
130
+
131
+ h, w = frame.shape[:2]
132
+ out_path = path.join(subdir, f"{idx:05d}.png")
133
+
134
+ parent = path.dirname(out_path)
135
+ if not path.isdir(parent):
136
+ if path.exists(parent):
137
+ os.remove(parent)
138
+ os.makedirs(parent, exist_ok=True)
139
+
140
+ ok = cv2.imwrite(out_path, frame)
141
+ if not ok:
142
+ raise RuntimeError(f"写入抽帧失败: {out_path}")
143
+ idx += 1
144
+
145
+ cap.release()
146
+ if idx == 0:
147
+ raise RuntimeError("Input video has no readable frames.")
148
+
149
+ return subdir, stem, w, h, fps, idx
150
+
151
+ # ---------- place ref image into ref_root/<video_stem>/ref.png ----------
152
+ def ref_to_dataset_root(ref_image_path: str, ref_root: str, video_stem: str):
153
+ ensure_clean_dir(ref_root)
154
+ subdir = path.join(ref_root, video_stem)
155
+ ensure_clean_dir(subdir)
156
+
157
+ img = Image.open(ref_image_path).convert("RGB")
158
+ out_path = path.join(subdir, "ref.png")
159
+ img.save(out_path)
160
+ return subdir
161
+
162
+ def encode_frames_to_video(frames_dir: str, out_path: str, fps: float):
163
+ frames = sorted([f for f in os.listdir(frames_dir) if f.lower().endswith(".png")])
164
+ assert len(frames) > 0, "No frames to encode."
165
+
166
+ first = cv2.imread(path.join(frames_dir, frames[0]))
167
+ h, w = first.shape[:2]
168
+
169
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
170
+ vw = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
171
+ for f in frames:
172
+ img = cv2.imread(path.join(frames_dir, f))
173
+ vw.write(img)
174
+ vw.release()
175
+
176
+ # ----------------- MAIN PIPELINE (CUDA-only) -----------------
177
+ def run_pipeline_cuda(bw_video_path: str, ref_image_path: str, user_config: dict, debug_shapes: bool) -> str:
178
+ if not torch.cuda.is_available():
179
+ raise RuntimeError("未检测到 GPU。此 Space 仅支持 ZeroGPU (CUDA)。")
180
+
181
+ if debug_shapes:
182
+ _enable_runtime_debug()
183
+
184
+ ensure_checkpoint()
185
+
186
+ DEVICE = torch.device("cuda")
187
+
188
+ # Workspace in CWD
189
+ base_run_dir = path.join(os.getcwd(), f"colormnet_run_{uuid.uuid4().hex}")
190
+ input_video_root = path.join(base_run_dir, "input_video")
191
+ input_ref_root = path.join(base_run_dir, "input_ref")
192
+ output_dir = path.join(base_run_dir, "result")
193
+
194
+ for p in (base_run_dir, input_video_root, input_ref_root, output_dir):
195
+ ensure_clean_dir(p)
196
+
197
+ # 1) 抽帧
198
+ vid_subdir, vid_stem, w, h, fps, n_frames = video_to_dataset_root(bw_video_path, input_video_root)
199
+ assert n_frames > 0, "Input video has no frames."
200
+
201
+ # 2) 参考图
202
+ _ = ref_to_dataset_root(ref_image_path, input_ref_root, vid_stem)
203
+
204
+ # 3) 配置(字段与 main.py 一致;值从 UI 合并)
205
+ default_config = {
206
+ "FirstFrameIsNotExemplar": False,
207
+ "d16_batch_path": "input", # parity only
208
+ "ref_path": "ref", # parity only
209
+ "output": "result", # parity only
210
+ "generic_path": None,
211
+ "dataset": "D16_batch",
212
+ "split": "val",
213
+ "save_all": True,
214
+ "benchmark": False,
215
+ "disable_long_term": False,
216
+ "max_mid_term_frames": 10,
217
+ "min_mid_term_frames": 5,
218
+ "max_long_term_elements": 10000,
219
+ "num_prototypes": 128,
220
+ "top_k": 30,
221
+ "mem_every": 5,
222
+ "deep_update_every": -1,
223
+ "save_scores": False,
224
+ "flip": False,
225
+ "size": -1,
226
+ }
227
+ config = {**default_config, **(user_config or {})}
228
+ config["enable_long_term"] = not config["disable_long_term"]
229
+
230
+ # 4) 构建数据集(只选本视频 reader)
231
+ meta_dataset = DAVISTestDataset_221128_TransColorization_batch(
232
+ input_video_root, imset=input_ref_root, size=config["size"]
233
+ )
234
+ meta_list = meta_dataset.get_datasets()
235
+
236
+ target_reader = None
237
+ for vr in meta_list:
238
+ if getattr(vr, "vid_name", None) == vid_stem:
239
+ target_reader = vr
240
+ break
241
+ if target_reader is None:
242
+ if len(meta_list) == 1:
243
+ target_reader = meta_list[0]
244
+ else:
245
+ raise RuntimeError(f"未在数据集中找到目标视频子目录:{vid_stem};可用={ [getattr(v, 'vid_name', '?') for v in meta_list] }")
246
+
247
+ # 输出路径规则(与 main.py 一致)
248
+ is_youtube = str(config["dataset"]).startswith("Y")
249
+ is_davis = str(config["dataset"]).startswith("D")
250
+ is_lv = str(config["dataset"]).startswith("LV")
251
+
252
+ app_output_root = output_dir
253
+ if is_youtube or config["save_scores"]:
254
+ out_path = path.join(app_output_root, "Annotations")
255
+ else:
256
+ out_path = app_output_root
257
+
258
+ # 5) 模型(保持 app 的 URL 权重加载方式)
259
+ network = ColorMNet(config, CHECKPOINT_LOCAL).to(DEVICE).eval()
260
+ model_weights = torch.load(CHECKPOINT_LOCAL, map_location="cuda")
261
+ network.load_weights(model_weights, init_as_zero_if_needed=True)
262
+
263
+ total_process_time = 0.0
264
+ total_frames = 0
265
+
266
+ # 6) 推理(逐帧;内部逻辑与 main.py 对齐;保留调试打印)
267
+ vid_reader = target_reader
268
+ # Gradio/Spaces 环境禁止子进程:num_workers=0(否则会触发 daemonic processes 错误)
269
+ loader = DataLoader(vid_reader, batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
270
+ vid_name = vid_reader.vid_name
271
+ vid_length = len(loader)
272
+
273
+ # 长时记忆触发逻辑:按 main.py 原样(无除零保护)
274
+ config['enable_long_term_count_usage'] = (
275
+ config['enable_long_term'] and
276
+ (vid_length
277
+ / (config['max_mid_term_frames'] - config['min_mid_term_frames'])
278
+ * config['num_prototypes'])
279
+ >= config['max_long_term_elements']
280
+ )
281
+
282
+ mapper = MaskMapper()
283
+ processor = InferenceCore(network, config=config)
284
+ first_mask_loaded = False
285
+
286
+ for ti, data in enumerate(loader):
287
+ try:
288
+ with torch.cuda.amp.autocast(enabled=not config["benchmark"]):
289
+ rgb = data['rgb'].cuda()[0]
290
+ msk = data.get('mask')
291
+ if not config['FirstFrameIsNotExemplar']:
292
+ msk = msk[:, 1:3, :, :] if msk is not None else None
293
+
294
+ info = data['info']
295
+ frame = info['frame'][0]
296
+ shape = info['shape']
297
+ need_resize = info['need_resize'][0]
298
+
299
+ if debug_shapes:
300
+ print(f"[Loop] frame={ti} rgb={tuple(rgb.shape)} "
301
+ f"msk={None if msk is None else tuple(msk.shape)}", flush=True)
302
+
303
+ # timing 与 main.py 一致
304
+ start = torch.cuda.Event(enable_timing=True)
305
+ end = torch.cuda.Event(enable_timing=True)
306
+ start.record()
307
+
308
+ if not first_mask_loaded:
309
+ if msk is not None:
310
+ first_mask_loaded = True
311
+ else:
312
+ continue
313
+
314
+ if config['flip']:
315
+ rgb = torch.flip(rgb, dims=[-1])
316
+ msk = torch.flip(msk, dims=[-1]) if msk is not None else None
317
+
318
+ if msk is not None:
319
+ msk = torch.Tensor(msk[0]).cuda()
320
+ if need_resize:
321
+ msk = vid_reader.resize_mask(msk.unsqueeze(0))[0]
322
+ processor.set_all_labels(list(range(1, 3)))
323
+ labels = range(1, 3)
324
+ else:
325
+ labels = None
326
+
327
+ if config['FirstFrameIsNotExemplar']:
328
+ prob = processor.step_AnyExemplar(
329
+ rgb,
330
+ msk[:1, :, :].repeat(3, 1, 1) if msk is not None else None,
331
+ msk[1:3, :, :] if msk is not None else None,
332
+ labels,
333
+ end=(ti == vid_length - 1)
334
+ )
335
+ else:
336
+ prob = processor.step(rgb, msk, labels, end=(ti == vid_length - 1))
337
+
338
+ if need_resize:
339
+ prob = F.interpolate(prob.unsqueeze(1), shape, mode='bilinear', align_corners=False)[:, 0]
340
+
341
+ end.record()
342
+ torch.cuda.synchronize()
343
+ total_process_time += (start.elapsed_time(end) / 1000.0)
344
+ total_frames += 1
345
+
346
+ if config['flip']:
347
+ prob = torch.flip(prob, dims=[-1])
348
+
349
+ if debug_shapes:
350
+ try:
351
+ print(f"[Loop] prob={tuple(prob.shape)}", flush=True)
352
+ except Exception:
353
+ pass
354
+
355
+ if config['save_scores']:
356
+ prob = (prob.detach().cpu().numpy() * 255).astype(np.uint8)
357
+
358
+ if config['save_all'] or info['save'][0]:
359
+ this_out_path = path.join(out_path, vid_name)
360
+ os.makedirs(this_out_path, exist_ok=True)
361
+
362
+ out_mask_final = lab2rgb_transform_PIL(torch.cat([rgb[:1, :, :], prob], dim=0))
363
+ out_mask_final = (out_mask_final * 255).astype(np.uint8)
364
+ Image.fromarray(out_mask_final).save(os.path.join(this_out_path, frame[:-4] + '.png'))
365
+
366
+ except Exception as _e:
367
+ # 保留完整 traceback,方便定位
368
+ raise RuntimeError("FRAME_ERROR:\n" + traceback.format_exc())
369
+
370
+ if total_process_time > 0:
371
+ print(f'Total processing time: {total_process_time}')
372
+ print(f'Total processed frames: {total_frames}')
373
+ print(f'FPS: {total_frames / total_process_time}')
374
+ print(f'Max allocated memory (MB): {torch.cuda.max_memory_allocated() / (2**20)}')
375
+
376
+ # 7) 合成 mp4(按 main.py 的 out_path 规则找帧目录)
377
+ frames_dir = path.join(out_path, vid_stem if path.isdir(path.join(out_path, vid_stem)) else vid_name)
378
+ if not path.isdir(frames_dir):
379
+ subs = [d for d in os.listdir(out_path) if path.isdir(path.join(out_path, d))]
380
+ if len(subs) == 1:
381
+ frames_dir = path.join(out_path, subs[0])
382
+ else:
383
+ frames_dir = path.join(output_dir, vid_stem)
384
+
385
+ colored_mp4 = path.join(base_run_dir, "colored_output.mp4")
386
+ encode_frames_to_video(frames_dir, colored_mp4, fps=fps)
387
+
388
+ # 8) 输出视频到 CWD
389
+ final_mp4 = path.join(os.getcwd(), "result.mp4")
390
+ shutil.move(colored_mp4, final_mp4)
391
+ shutil.rmtree(base_run_dir, ignore_errors=True)
392
+
393
+ return final_mp4
394
+
395
+ # ----------------- GRADIO HANDLERS -----------------
396
+ @spaces.GPU(duration=1200)
397
+ def gradio_infer(
398
+ debug_shapes, # 调试开关(保留)
399
+ bw_video, ref_image,
400
+ first_not_exemplar, dataset, split, save_all, benchmark,
401
+ disable_long_term, max_mid, min_mid, max_long,
402
+ num_proto, top_k, mem_every, deep_update,
403
+ save_scores, flip, size
404
+ ):
405
+ if not torch.cuda.is_available():
406
+ return None, "ZeroGPU 未分配到 GPU,请重试(或检查 Space 硬件是否为 ZeroGPU)。"
407
+
408
+ if bw_video is None:
409
+ return None, "请上传黑白视频。"
410
+ if ref_image is None:
411
+ return None, "请上传参考图像。"
412
+
413
+ # Video path
414
+ if isinstance(bw_video, dict) and "name" in bw_video:
415
+ bw_video_path = bw_video["name"]
416
+ elif isinstance(bw_video, str):
417
+ bw_video_path = bw_video
418
+ else:
419
+ return None, "无法读取视频输入。"
420
+
421
+ # Ref path
422
+ if isinstance(ref_image, Image.Image):
423
+ tmp_ref_path = path.join(os.getcwd(), f"ref_{uuid.uuid4().hex}.png")
424
+ ref_image.save(tmp_ref_path)
425
+ ref_path = tmp_ref_path
426
+ elif isinstance(ref_image, str):
427
+ ref_path = ref_image
428
+ else:
429
+ return None, "无法读取参考图像输入。"
430
+
431
+ default_config = {
432
+ "FirstFrameIsNotExemplar": True,
433
+ "dataset": "D16_batch",
434
+ "split": "val",
435
+ "save_all": True,
436
+ "benchmark": False,
437
+ "disable_long_term": False,
438
+ "max_mid_term_frames": 10,
439
+ "min_mid_term_frames": 5,
440
+ "max_long_term_elements": 10000,
441
+ "num_prototypes": 128,
442
+ "top_k": 30,
443
+ "mem_every": 5,
444
+ "deep_update_every": -1,
445
+ "save_scores": False,
446
+ "flip": False,
447
+ "size": -1,
448
+ }
449
+
450
+ user_config = {
451
+ "FirstFrameIsNotExemplar": bool(first_not_exemplar) if first_not_exemplar is not None else default_config["FirstFrameIsNotExemplar"],
452
+ "dataset": str(dataset) if dataset else default_config["dataset"],
453
+ "split": str(split) if split else default_config["split"],
454
+ "save_all": bool(save_all) if save_all is not None else default_config["save_all"],
455
+ "benchmark": bool(benchmark) if benchmark is not None else default_config["benchmark"],
456
+ "disable_long_term": bool(disable_long_term) if disable_long_term is not None else default_config["disable_long_term"],
457
+ "max_mid_term_frames": int(max_mid) if max_mid is not None else default_config["max_mid_term_frames"],
458
+ "min_mid_term_frames": int(min_mid) if min_mid is not None else default_config["min_mid_term_frames"],
459
+ "max_long_term_elements": int(max_long) if max_long is not None else default_config["max_long_term_elements"],
460
+ "num_prototypes": int(num_proto) if num_proto is not None else default_config["num_prototypes"],
461
+ "top_k": int(top_k) if top_k is not None else default_config["top_k"],
462
+ "mem_every": int(mem_every) if mem_every is not None else default_config["mem_every"],
463
+ "deep_update_every": int(deep_update) if deep_update is not None else default_config["deep_update_every"],
464
+ "save_scores": bool(save_scores) if save_scores is not None else default_config["save_scores"],
465
+ "flip": bool(flip) if flip is not None else default_config["flip"],
466
+ "size": int(size) if size is not None else default_config["size"],
467
+ }
468
+
469
+ try:
470
+ out_mp4 = run_pipeline_cuda(
471
+ bw_video_path, ref_path, user_config, debug_shapes=bool(debug_shapes)
472
  )
473
+ return out_mp4, "完成 ✅"
474
+ except subprocess.CalledProcessError as e:
475
+ return None, f"运行时错误:\n{e}"
476
+ except Exception as e:
477
+ return None, f"{e}"
478
+
479
+ # ----------------- UI -----------------
480
+ with gr.Blocks() as demo:
481
+ gr.Markdown(f"# {TITLE}")
482
+ gr.Markdown(DESC)
483
+
484
+ debug_shapes = gr.Checkbox(label="调试日志(打印形状与完整Traceback)", value=False)
485
+
486
+ with gr.Row():
487
+ inp_video = gr.Video(label="黑白视频(mp4/webm/avi)", interactive=True)
488
+ inp_ref = gr.Image(label="参考图像(RGB)", type="pil")
489
+
490
+ with gr.Accordion("高级参数设置(与 main.py 对齐)", open=False):
491
+ with gr.Row():
492
+ first_not_exemplar = gr.Checkbox(label="FirstFrameIsNotExemplar", value=False)
493
+ dataset = gr.Textbox(label="dataset", value="D16_batch")
494
+ split = gr.Textbox(label="split", value="val")
495
+ save_all = gr.Checkbox(label="save_all", value=True)
496
+ benchmark = gr.Checkbox(label="benchmark", value=False)
497
+ with gr.Row():
498
+ disable_long_term = gr.Checkbox(label="disable_long_term", value=False)
499
+ max_mid = gr.Number(label="max_mid_term_frames", value=10, precision=0)
500
+ min_mid = gr.Number(label="min_mid_term_frames", value=5, precision=0)
501
+ max_long = gr.Number(label="max_long_term_elements", value=10000, precision=0)
502
+ num_proto = gr.Number(label="num_prototypes", value=128, precision=0)
503
+ with gr.Row():
504
+ top_k = gr.Number(label="top_k", value=30, precision=0)
505
+ mem_every = gr.Number(label="mem_every", value=5, precision=0)
506
+ deep_update = gr.Number(label="deep_update_every", value=-1, precision=0)
507
+ save_scores = gr.Checkbox(label="save_scores", value=False)
508
+ flip = gr.Checkbox(label="flip", value=False)
509
+ size = gr.Number(label="size", value=-1, precision=0)
510
 
511
+ run_btn = gr.Button("开始着色(ZeroGPU 推理)")
512
+ with gr.Row():
513
+ out_video = gr.Video(label="输出视频(着色结果)")
514
+ status = gr.Textbox(label="状态 / 调试输出", interactive=False, lines=12)
515
+
516
+ run_btn.click(
517
+ fn=gradio_infer,
518
+ inputs=[
519
+ debug_shapes,
520
+ inp_video, inp_ref,
521
+ first_not_exemplar, dataset, split, save_all, benchmark,
522
+ disable_long_term, max_mid, min_mid, max_long,
523
+ num_proto, top_k, mem_every, deep_update,
524
+ save_scores, flip, size
525
+ ],
526
+ outputs=[out_video, status]
527
  )
528
 
529
+ if __name__ == "__main__":
530
+ try:
531
+ ensure_checkpoint()
532
+ except Exception as e:
533
+ print(f"[WARN] 预下载权重失败(首次推理会再试): {e}")
534
+
535
+ demo.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860)
inference/data/video_reader.py CHANGED
@@ -82,6 +82,10 @@ class VideoReader_221128_TransColorization(Dataset):
82
  load_mask = self.use_all_mask or (gt_path == self.first_gt_path)
83
  if load_mask and path.exists(gt_path):
84
  mask = Image.open(gt_path).convert('RGB')
 
 
 
 
85
  mask = self.im_transform(mask)
86
  mask_ab = mask[1:3,:,:]
87
  data['mask'] = mask_ab
 
82
  load_mask = self.use_all_mask or (gt_path == self.first_gt_path)
83
  if load_mask and path.exists(gt_path):
84
  mask = Image.open(gt_path).convert('RGB')
85
+
86
+ # 用 PIL 先 resize 成和 img 尺寸一致
87
+ mask = mask.resize((img.shape[2], img.shape[1]), Image.BILINEAR)
88
+
89
  mask = self.im_transform(mask)
90
  mask_ab = mask[1:3,:,:]
91
  data['mask'] = mask_ab
requirements.txt CHANGED
@@ -82,6 +82,7 @@ tb-nightly
82
  tensorboard
83
  tensorboard-data-server
84
  -e git+https://github.com/cheind/py-thin-plate-spline.git@f6995795397118b7d0ac01aecd3f39ffbfad9dee#egg=thinplate
 
85
  tifffile
86
  tomli
87
  tqdm
@@ -91,4 +92,11 @@ urllib3
91
  wandb
92
  Werkzeug
93
  yapf
94
- zipp
 
 
 
 
 
 
 
 
82
  tensorboard
83
  tensorboard-data-server
84
  -e git+https://github.com/cheind/py-thin-plate-spline.git@f6995795397118b7d0ac01aecd3f39ffbfad9dee#egg=thinplate
85
+ # -e git+https://github.com/ClementPinard/Pytorch-Correlation-extension.git#egg=spatial_correlation_sampler
86
  tifffile
87
  tomli
88
  tqdm
 
92
  wandb
93
  Werkzeug
94
  yapf
95
+ zipp
96
+ gradio
97
+ torch
98
+ opencv-python
99
+ numpy
100
+ pillow
101
+ scikit-image
102
+ spaces # <<< 关键:提供 @spaces.GPU 装饰器