Spaces:
Sleeping
Sleeping
revert
Browse files
app.py
CHANGED
|
@@ -24,7 +24,7 @@ from models.SpaTrackV2.models.vggt4track.models.vggt_moe import VGGT4Track
|
|
| 24 |
from models.SpaTrackV2.models.vggt4track.utils.load_fn import preprocess_image
|
| 25 |
from models.SpaTrackV2.models.predictor import Predictor
|
| 26 |
from models.SpaTrackV2.models.utils import get_points_on_a_grid
|
| 27 |
-
|
| 28 |
|
| 29 |
from diffusers.utils import export_to_video, load_image
|
| 30 |
|
|
@@ -194,145 +194,6 @@ def render_from_pointcloud(rgb_frames, depth_frames, intrinsics, original_extrin
|
|
| 194 |
return {'rendered': output_path, 'motion_signal': motion_signal_path, 'mask': mask_path}
|
| 195 |
|
| 196 |
|
| 197 |
-
def visualize_tracks_with_selective_history(
|
| 198 |
-
video: np.ndarray, # (T, H, W, C) uint8
|
| 199 |
-
tracks: np.ndarray, # (T, N, 2) float - 2D track coordinates
|
| 200 |
-
track_depths: np.ndarray, # (T, N) float - depth at each track point
|
| 201 |
-
visibility: np.ndarray = None, # (T, N) float
|
| 202 |
-
output_path: str = None,
|
| 203 |
-
fps: int = 24,
|
| 204 |
-
depth_threshold: float = None, # Threshold to separate foreground/background
|
| 205 |
-
bg_history_length: int = -1, # History length for background (-1 = infinite)
|
| 206 |
-
fg_history_length: int = 0, # History length for foreground (0 = no history)
|
| 207 |
-
linewidth: int = 2,
|
| 208 |
-
point_radius: int = 4,
|
| 209 |
-
):
|
| 210 |
-
"""
|
| 211 |
-
Visualize tracked points with selective history trails.
|
| 212 |
-
|
| 213 |
-
Background points (depth > threshold): Show history trails
|
| 214 |
-
Foreground points (depth <= threshold): No history trails (only current position)
|
| 215 |
-
|
| 216 |
-
Args:
|
| 217 |
-
video: Input video frames (T, H, W, C)
|
| 218 |
-
tracks: 2D track positions (T, N, 2) - (x, y) coordinates
|
| 219 |
-
track_depths: Depth values at each track point (T, N)
|
| 220 |
-
visibility: Visibility mask for each track point (T, N)
|
| 221 |
-
output_path: Path to save the output video
|
| 222 |
-
fps: Output video frame rate
|
| 223 |
-
depth_threshold: Depth threshold to separate FG/BG. If None, uses median.
|
| 224 |
-
bg_history_length: History length for background tracks (-1 = infinite)
|
| 225 |
-
fg_history_length: History length for foreground tracks (0 = no history)
|
| 226 |
-
linewidth: Width of the history trail lines
|
| 227 |
-
point_radius: Radius of the current position marker
|
| 228 |
-
|
| 229 |
-
Returns:
|
| 230 |
-
Annotated video frames (T, H, W, C)
|
| 231 |
-
"""
|
| 232 |
-
T, H, W, C = video.shape
|
| 233 |
-
N = tracks.shape[1]
|
| 234 |
-
|
| 235 |
-
# Compute depth threshold if not provided
|
| 236 |
-
# Use median depth at frame 0 as the threshold
|
| 237 |
-
if depth_threshold is None:
|
| 238 |
-
valid_depths = track_depths[0][track_depths[0] > 0]
|
| 239 |
-
if len(valid_depths) > 0:
|
| 240 |
-
depth_threshold = np.median(valid_depths)
|
| 241 |
-
else:
|
| 242 |
-
depth_threshold = 1.0
|
| 243 |
-
|
| 244 |
-
# Classify points as foreground or background based on initial depth
|
| 245 |
-
# Foreground = closer (smaller depth), Background = farther (larger depth)
|
| 246 |
-
initial_depths = track_depths[0]
|
| 247 |
-
is_background = initial_depths > depth_threshold
|
| 248 |
-
|
| 249 |
-
# Create color palette - rainbow based on y-coordinate
|
| 250 |
-
color_map = cm.get_cmap("gist_rainbow")
|
| 251 |
-
y_min, y_max = tracks[0, :, 1].min(), tracks[0, :, 1].max()
|
| 252 |
-
if y_max - y_min < 1e-6:
|
| 253 |
-
y_max = y_min + 1
|
| 254 |
-
|
| 255 |
-
colors = np.zeros((N, 3), dtype=np.uint8)
|
| 256 |
-
for i in range(N):
|
| 257 |
-
norm_y = (tracks[0, i, 1] - y_min) / (y_max - y_min)
|
| 258 |
-
color = np.array(color_map(norm_y)[:3]) * 255
|
| 259 |
-
colors[i] = color.astype(np.uint8)
|
| 260 |
-
|
| 261 |
-
# Process each frame
|
| 262 |
-
res_video = video.copy()
|
| 263 |
-
|
| 264 |
-
for t in range(T):
|
| 265 |
-
frame = res_video[t].copy()
|
| 266 |
-
|
| 267 |
-
# Draw history trails
|
| 268 |
-
# Background points: draw full history
|
| 269 |
-
# Foreground points: draw limited or no history
|
| 270 |
-
for i in range(N):
|
| 271 |
-
# Determine history length based on foreground/background
|
| 272 |
-
if is_background[i]:
|
| 273 |
-
history_len = bg_history_length
|
| 274 |
-
else:
|
| 275 |
-
history_len = fg_history_length
|
| 276 |
-
|
| 277 |
-
# Calculate start frame for history
|
| 278 |
-
if history_len < 0: # Infinite history
|
| 279 |
-
start_frame = 0
|
| 280 |
-
elif history_len == 0: # No history
|
| 281 |
-
start_frame = t
|
| 282 |
-
else:
|
| 283 |
-
start_frame = max(0, t - history_len)
|
| 284 |
-
|
| 285 |
-
# Draw history trail (lines connecting past positions)
|
| 286 |
-
if start_frame < t:
|
| 287 |
-
for j in range(start_frame, t):
|
| 288 |
-
# Check visibility
|
| 289 |
-
if visibility is not None:
|
| 290 |
-
if visibility[j, i] < 0.5 or visibility[j + 1, i] < 0.5:
|
| 291 |
-
continue
|
| 292 |
-
|
| 293 |
-
pt1 = (int(tracks[j, i, 0]), int(tracks[j, i, 1]))
|
| 294 |
-
pt2 = (int(tracks[j + 1, i, 0]), int(tracks[j + 1, i, 1]))
|
| 295 |
-
|
| 296 |
-
# Skip invalid coordinates
|
| 297 |
-
if pt1[0] <= 0 or pt1[1] <= 0 or pt2[0] <= 0 or pt2[1] <= 0:
|
| 298 |
-
continue
|
| 299 |
-
if pt1[0] >= W or pt1[1] >= H or pt2[0] >= W or pt2[1] >= H:
|
| 300 |
-
continue
|
| 301 |
-
|
| 302 |
-
# Draw with fading alpha (older = more transparent)
|
| 303 |
-
alpha = (j - start_frame + 1) / (t - start_frame + 1)
|
| 304 |
-
color = colors[i].tolist()
|
| 305 |
-
cv2.line(frame, pt1, pt2, color, linewidth, cv2.LINE_AA)
|
| 306 |
-
|
| 307 |
-
# Draw current positions (dots)
|
| 308 |
-
for i in range(N):
|
| 309 |
-
if visibility is not None and visibility[t, i] < 0.5:
|
| 310 |
-
continue
|
| 311 |
-
|
| 312 |
-
coord = (int(tracks[t, i, 0]), int(tracks[t, i, 1]))
|
| 313 |
-
|
| 314 |
-
# Skip invalid coordinates
|
| 315 |
-
if coord[0] <= 0 or coord[1] <= 0:
|
| 316 |
-
continue
|
| 317 |
-
if coord[0] >= W or coord[1] >= H:
|
| 318 |
-
continue
|
| 319 |
-
|
| 320 |
-
color = colors[i].tolist()
|
| 321 |
-
cv2.circle(frame, coord, point_radius, color, -1)
|
| 322 |
-
|
| 323 |
-
res_video[t] = frame
|
| 324 |
-
|
| 325 |
-
# Save video if output path provided
|
| 326 |
-
if output_path:
|
| 327 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 328 |
-
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 329 |
-
for frame in res_video:
|
| 330 |
-
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 331 |
-
out.release()
|
| 332 |
-
|
| 333 |
-
return res_video
|
| 334 |
-
|
| 335 |
-
|
| 336 |
@spaces.GPU
|
| 337 |
def run_spatial_tracker(video_tensor: torch.Tensor):
|
| 338 |
"""
|
|
@@ -397,8 +258,6 @@ def run_spatial_tracker(video_tensor: torch.Tensor):
|
|
| 397 |
point_map = T.Resize((new_h, new_w))(point_map)
|
| 398 |
conf_depth = T.Resize((new_h, new_w))(conf_depth)
|
| 399 |
intrs_out[:, :2, :] = intrs_out[:, :2, :] * scale
|
| 400 |
-
# Scale 2D track coordinates
|
| 401 |
-
track2d_pred[..., :2] = track2d_pred[..., :2] * scale
|
| 402 |
|
| 403 |
return {
|
| 404 |
'video_out': video_out.cpu(),
|
|
@@ -406,10 +265,6 @@ def run_spatial_tracker(video_tensor: torch.Tensor):
|
|
| 406 |
'conf_depth': conf_depth.cpu(),
|
| 407 |
'intrs_out': intrs_out.cpu(),
|
| 408 |
'c2w_traj': c2w_traj.cpu(),
|
| 409 |
-
'track2d_pred': track2d_pred.cpu(), # 2D track positions (T, N, 2)
|
| 410 |
-
'track3d_pred': track3d_pred.cpu(), # 3D track positions (T, N, 3)
|
| 411 |
-
'vis_pred': vis_pred.cpu(), # Visibility mask (T, N)
|
| 412 |
-
'conf_pred': conf_pred.cpu(), # Confidence scores (T, N)
|
| 413 |
}
|
| 414 |
|
| 415 |
|
|
@@ -480,7 +335,7 @@ def run_wan_ttm_generation(prompt, tweak_index, tstrong_index, first_frame_path,
|
|
| 480 |
|
| 481 |
def process_video(video_path, camera_movement, generate_ttm=True, progress=gr.Progress()):
|
| 482 |
if video_path is None:
|
| 483 |
-
return None, None, None, None,
|
| 484 |
|
| 485 |
progress(0, desc="Initializing...")
|
| 486 |
temp_dir = create_user_temp_dir()
|
|
@@ -514,33 +369,6 @@ def process_video(video_path, camera_movement, generate_ttm=True, progress=gr.Pr
|
|
| 514 |
new_exts = generate_camera_trajectory(len(
|
| 515 |
rgb_frames), camera_movement, tracking_results['intrs_out'].numpy(), scene_scale)
|
| 516 |
|
| 517 |
-
progress(0.7, desc="Visualizing tracks...")
|
| 518 |
-
# Get track data for visualization
|
| 519 |
-
track2d = tracking_results['track2d_pred'].numpy() # (T, N, 2+)
|
| 520 |
-
track3d = tracking_results['track3d_pred'].numpy() # (T, N, 3)
|
| 521 |
-
vis_pred = tracking_results['vis_pred'].numpy() # (T, N)
|
| 522 |
-
|
| 523 |
-
# Get depth at each track point (use Z coordinate from 3D tracks)
|
| 524 |
-
track_depths = track3d[..., 2] # (T, N) - depth is the Z coordinate
|
| 525 |
-
|
| 526 |
-
# Create track visualization with selective history:
|
| 527 |
-
# - Background points (farther): show history trails
|
| 528 |
-
# - Foreground points (closer): no history trails
|
| 529 |
-
track_viz_path = os.path.join(out_dir, "track_visualization.mp4")
|
| 530 |
-
visualize_tracks_with_selective_history(
|
| 531 |
-
video=rgb_frames.copy(),
|
| 532 |
-
tracks=track2d[..., :2], # Use only x, y coordinates
|
| 533 |
-
track_depths=track_depths,
|
| 534 |
-
visibility=vis_pred,
|
| 535 |
-
output_path=track_viz_path,
|
| 536 |
-
fps=OUTPUT_FPS,
|
| 537 |
-
depth_threshold=None, # Auto-compute based on median depth
|
| 538 |
-
bg_history_length=-1, # Infinite history for background
|
| 539 |
-
fg_history_length=0, # No history for foreground
|
| 540 |
-
linewidth=2,
|
| 541 |
-
point_radius=4
|
| 542 |
-
)
|
| 543 |
-
|
| 544 |
progress(0.8, desc="Rendering viewpoint...")
|
| 545 |
output_video_path = os.path.join(out_dir, "rendered_video.mp4")
|
| 546 |
render_results = render_from_pointcloud(rgb_frames, depth_frames, tracking_results['intrs_out'].numpy(),
|
|
@@ -553,13 +381,11 @@ def process_video(video_path, camera_movement, generate_ttm=True, progress=gr.Pr
|
|
| 553 |
rgb_frames[0], cv2.COLOR_RGB2BGR))
|
| 554 |
|
| 555 |
status_msg = f"✅ 3D results ready! You can now use the prompt below to generate a high-quality TTM video."
|
| 556 |
-
return render_results['rendered'], render_results['motion_signal'], render_results['mask'], first_frame_path,
|
| 557 |
|
| 558 |
except Exception as e:
|
| 559 |
logger.error(f"Error: {e}")
|
| 560 |
-
|
| 561 |
-
traceback.print_exc()
|
| 562 |
-
return None, None, None, None, None, f"❌ Error: {str(e)}"
|
| 563 |
|
| 564 |
|
| 565 |
# --- GRADIO INTERFACE ---
|
|
@@ -585,7 +411,6 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🎬 TTM Wan Video Generator") as
|
|
| 585 |
"🚀 1. Run Spatial Tracker", variant="primary")
|
| 586 |
|
| 587 |
output_video = gr.Video(label="Point Cloud Render (Draft)")
|
| 588 |
-
track_viz_output = gr.Video(label="Track Visualization (BG history, FG no history)")
|
| 589 |
status_text = gr.Markdown("Ready...")
|
| 590 |
|
| 591 |
with gr.Column(scale=1):
|
|
@@ -622,17 +447,15 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🎬 TTM Wan Video Generator") as
|
|
| 622 |
motion_signal_output,
|
| 623 |
mask_output,
|
| 624 |
first_frame_output,
|
| 625 |
-
track_viz_output,
|
| 626 |
status_text
|
| 627 |
]
|
| 628 |
).then(
|
| 629 |
-
fn=lambda a, b, c, d, e
|
| 630 |
inputs=[
|
| 631 |
output_video,
|
| 632 |
motion_signal_output,
|
| 633 |
mask_output,
|
| 634 |
first_frame_output,
|
| 635 |
-
track_viz_output,
|
| 636 |
status_text
|
| 637 |
],
|
| 638 |
outputs=[motion_signal_file, mask_file, first_frame_file]
|
|
|
|
| 24 |
from models.SpaTrackV2.models.vggt4track.utils.load_fn import preprocess_image
|
| 25 |
from models.SpaTrackV2.models.predictor import Predictor
|
| 26 |
from models.SpaTrackV2.models.utils import get_points_on_a_grid
|
| 27 |
+
|
| 28 |
|
| 29 |
from diffusers.utils import export_to_video, load_image
|
| 30 |
|
|
|
|
| 194 |
return {'rendered': output_path, 'motion_signal': motion_signal_path, 'mask': mask_path}
|
| 195 |
|
| 196 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
@spaces.GPU
|
| 198 |
def run_spatial_tracker(video_tensor: torch.Tensor):
|
| 199 |
"""
|
|
|
|
| 258 |
point_map = T.Resize((new_h, new_w))(point_map)
|
| 259 |
conf_depth = T.Resize((new_h, new_w))(conf_depth)
|
| 260 |
intrs_out[:, :2, :] = intrs_out[:, :2, :] * scale
|
|
|
|
|
|
|
| 261 |
|
| 262 |
return {
|
| 263 |
'video_out': video_out.cpu(),
|
|
|
|
| 265 |
'conf_depth': conf_depth.cpu(),
|
| 266 |
'intrs_out': intrs_out.cpu(),
|
| 267 |
'c2w_traj': c2w_traj.cpu(),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
}
|
| 269 |
|
| 270 |
|
|
|
|
| 335 |
|
| 336 |
def process_video(video_path, camera_movement, generate_ttm=True, progress=gr.Progress()):
|
| 337 |
if video_path is None:
|
| 338 |
+
return None, None, None, None, "❌ Please upload a video first"
|
| 339 |
|
| 340 |
progress(0, desc="Initializing...")
|
| 341 |
temp_dir = create_user_temp_dir()
|
|
|
|
| 369 |
new_exts = generate_camera_trajectory(len(
|
| 370 |
rgb_frames), camera_movement, tracking_results['intrs_out'].numpy(), scene_scale)
|
| 371 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
progress(0.8, desc="Rendering viewpoint...")
|
| 373 |
output_video_path = os.path.join(out_dir, "rendered_video.mp4")
|
| 374 |
render_results = render_from_pointcloud(rgb_frames, depth_frames, tracking_results['intrs_out'].numpy(),
|
|
|
|
| 381 |
rgb_frames[0], cv2.COLOR_RGB2BGR))
|
| 382 |
|
| 383 |
status_msg = f"✅ 3D results ready! You can now use the prompt below to generate a high-quality TTM video."
|
| 384 |
+
return render_results['rendered'], render_results['motion_signal'], render_results['mask'], first_frame_path, status_msg
|
| 385 |
|
| 386 |
except Exception as e:
|
| 387 |
logger.error(f"Error: {e}")
|
| 388 |
+
return None, None, None, None, f"❌ Error: {str(e)}"
|
|
|
|
|
|
|
| 389 |
|
| 390 |
|
| 391 |
# --- GRADIO INTERFACE ---
|
|
|
|
| 411 |
"🚀 1. Run Spatial Tracker", variant="primary")
|
| 412 |
|
| 413 |
output_video = gr.Video(label="Point Cloud Render (Draft)")
|
|
|
|
| 414 |
status_text = gr.Markdown("Ready...")
|
| 415 |
|
| 416 |
with gr.Column(scale=1):
|
|
|
|
| 447 |
motion_signal_output,
|
| 448 |
mask_output,
|
| 449 |
first_frame_output,
|
|
|
|
| 450 |
status_text
|
| 451 |
]
|
| 452 |
).then(
|
| 453 |
+
fn=lambda a, b, c, d, e: (b, c, d),
|
| 454 |
inputs=[
|
| 455 |
output_video,
|
| 456 |
motion_signal_output,
|
| 457 |
mask_output,
|
| 458 |
first_frame_output,
|
|
|
|
| 459 |
status_text
|
| 460 |
],
|
| 461 |
outputs=[motion_signal_file, mask_file, first_frame_file]
|