File size: 9,076 Bytes
434b0b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Organization  : Tongyi Lab, Alibaba
# @Author        : Lingteng Qiu
# @Email         : 220019047@link.cuhk.edu.cn
# @Time          : 2025-08-31 10:02:15
# @Function      : Test app video input inference without Gradio UI

"""
Test app.py inference with video input (no Gradio UI).

Extracts reference frames from input video and runs the same inference
logic as the Gradio "Generate" button. Auto-downloads prior models and
LHM++ weights if not present.

Usage:
    python scripts/test/test_app_video.py
    # Default input_video: ./assets/example_videos/yuliang.mp4

    python scripts/test/test_app_video.py --input_video ./assets/example_videos/woman.mp4

    # Override model path (skip AutoModelQuery)
    python scripts/test/test_app_video.py --input_video ./video.mp4 --model_path ./exps/checkpoints/LHMPP-Released-v0.1
"""

import argparse
import os
import sys

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

import imageio.v3 as iio
import numpy as np
import torch
from accelerate import Accelerator

torch._dynamo.config.disable = True

from app import get_motion_video_fps, prior_model_check
from scripts.download_motion_video import motion_video_check
from core.utils.model_card import MODEL_CONFIG
from core.utils.model_download_utils import AutoModelQuery

DEFAULT_VIDEO_CODEC = "libx264"
DEFAULT_PIXEL_FORMAT = "yuv420p"
DEFAULT_VIDEO_BITRATE = "10M"
MACRO_BLOCK_SIZE = 16


def _resolve_video_path(video) -> str:
    """Resolve video path from Gradio Video component value (str or dict)."""
    if isinstance(video, dict):
        return video.get("path") or video.get("name") or ""
    return video if isinstance(video, str) else ""


def main() -> None:
    parser = argparse.ArgumentParser(
        description="Test app.py inference with video input (no Gradio UI)"
    )
    parser.add_argument(
        "--input_video",
        type=str,
        default="./assets/example_videos/yuliang.mp4",
        help="Path to input human video (source for reference frames)",
    )
    parser.add_argument(
        "--model_name",
        type=str,
        default="LHMPP-700M",
        choices=["LHMPP-700M", "LHMPPS-700M"],  # LHMPP-700MC coming soon
        help="Model to use",
    )
    parser.add_argument(
        "--model_path",
        type=str,
        default=None,
        help="Override model path (skip AutoModelQuery, use local checkpoint)",
    )
    parser.add_argument(
        "--motion_video",
        type=str,
        default="./motion_video/BasketBall_II/BasketBall_II.mp4",
        help="Path to motion video. Default: BasketBall_II",
    )
    parser.add_argument(
        "--ref_view",
        type=int,
        default=8,
        help="Number of reference frames to extract from input video",
    )
    parser.add_argument(
        "--motion_size",
        type=int,
        default=120,
        help="Number of motion frames to render",
    )
    parser.add_argument(
        "--render_fps",
        type=int,
        default=30,
        help="Output video FPS (fallback)",
    )
    parser.add_argument(
        "--visualized_center",
        action="store_true",
        help="Center crop output with padding",
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default="debug/video_test",
        help="Output directory",
    )
    args = parser.parse_args()

    input_video = _resolve_video_path(args.input_video)
    if not input_video or not os.path.isfile(input_video):
        raise FileNotFoundError(
            f"Input video not found: {args.input_video}. "
            "Provide a video containing a full-body human."
        )

    motion_video_check(save_dir=".")

    # Default motion video
    motion_video = args.motion_video
    if not motion_video or not os.path.isfile(motion_video):
        motion_dir = "./motion_video"
        if os.path.isdir(motion_dir):
            candidates = []
            for name in sorted(os.listdir(motion_dir)):
                subdir = os.path.join(motion_dir, name)
                if not os.path.isdir(subdir):
                    continue
                mp4 = os.path.join(subdir, name + ".mp4")
                if os.path.isfile(mp4):
                    candidates.append(mp4)
            if candidates:
                motion_video = candidates[0]
                print(f"[*] Using default motion video: {motion_video}")
        if not motion_video or not os.path.isfile(motion_video):
            raise FileNotFoundError(
                f"Motion video not found: {args.motion_video}. "
                "Specify --motion_video or ensure motion_video/<name>/<name>.mp4 exists."
            )

    # Env (same as app.py)
    os.environ.update(
        {
            "APP_ENABLED": "1",
            "APP_MODEL_NAME": args.model_name,
            "APP_TYPE": "infer.human_lrm_a4o",
            "NUMBA_THREADING_LAYER": "omp",
        }
    )

    from core.datasets.data_utils import SrcImagePipeline
    from core.utils.app_utils import get_motion_information, prepare_input_and_output
    from engine.pose_estimation.pose_estimator import PoseEstimator
    from scripts.inference.app_inference import (
        build_app_model,
        inference_results,
        parse_app_configs,
    )
    from scripts.inference.utils import easy_memory_manager

    # Build model_cards (same as app.py): AutoModelQuery or --model_path override
    prior_model_check(save_dir="./pretrained_models")
    model_config = MODEL_CONFIG[args.model_name]
    if args.model_path:
        model_path = args.model_path
    else:
        auto_query = AutoModelQuery(save_dir="./pretrained_models")
        model_path = auto_query.query(args.model_name)
    model_cards = {
        args.model_name: {
            "model_path": model_path,
            "model_config": model_config,
        }
    }

    processing_list = [
        dict(
            name="PadRatioWithScale",
            target_ratio=5 / 3,
            tgt_max_size_list=[840],
            val=True,
        ),
    ]
    dataset_pipeline = SrcImagePipeline(*processing_list)
    accelerator = Accelerator()
    cfg, _ = parse_app_configs(model_cards)

    print("[1/6] Loading model...")
    lhmpp = build_app_model(cfg)
    lhmpp.to("cuda")
    pose_estimator = PoseEstimator(
        "./pretrained_models/human_model_files/", device="cpu"
    )
    pose_estimator.device = "cuda"

    output_dir = os.path.abspath(args.output_dir)
    os.makedirs(output_dir, exist_ok=True)

    class WorkingDir:
        name = output_dir

    working_dir = WorkingDir()

    print(f"[2/6] Extracting reference frames from input video: {input_video}")
    imgs, save_sample_imgs, motion_path, dump_image_dir, dump_video_path = (
        prepare_input_and_output(
            image=None,
            video=input_video,
            ref_view=args.ref_view,
            video_params=motion_video,
            working_dir=working_dir,
            dataset_pipeline=dataset_pipeline,
            cfg=cfg,
        )
    )
    print(f"  Extracted {len(imgs)} reference frames, saved to {save_sample_imgs}")

    print("[3/6] Loading motion sequences...")
    motion_name, motion_seqs = get_motion_information(
        motion_path, cfg, motion_size=args.motion_size
    )
    video_size = len(motion_seqs["motion_seqs"])
    print(f"  Motion: {motion_name}, frames: {video_size}")

    print("[4/6] Running pose estimation...")
    device = "cuda"
    dtype = torch.float32
    with torch.no_grad():
        with easy_memory_manager(pose_estimator, device="cuda"):
            shape_pose = pose_estimator(imgs[0])
    if not shape_pose.is_full_body:
        raise ValueError(f"Input video invalid: {shape_pose.msg}")

    print("[5/6] Running inference...")
    img_np = np.stack(imgs) / 255.0
    ref_imgs_tensor = torch.from_numpy(img_np).permute(0, 3, 1, 2).float().to(device)
    smplx_params = motion_seqs["smplx_params"].copy()
    smplx_params["betas"] = torch.tensor(
        shape_pose.beta, dtype=dtype, device=device
    ).unsqueeze(0)

    rgbs = inference_results(
        lhmpp,
        ref_imgs_tensor,
        smplx_params,
        motion_seqs,
        video_size=video_size,
        visualized_center=args.visualized_center,
        device=device,
    )

    # Get FPS (same as app.py: prefer samurai_visualize.mp4 in motion dir)
    motion_dir = os.path.dirname(motion_video)
    samurai_path = os.path.join(motion_dir, "samurai_visualize.mp4")
    video_for_fps = samurai_path if os.path.isfile(samurai_path) else motion_video
    render_fps = get_motion_video_fps(video_for_fps, default=args.render_fps)

    print(f"[6/6] Saving video ({render_fps} fps) to {dump_video_path}...")
    iio.imwrite(
        dump_video_path,
        rgbs,
        fps=render_fps,
        codec=DEFAULT_VIDEO_CODEC,
        pixelformat=DEFAULT_PIXEL_FORMAT,
        bitrate=DEFAULT_VIDEO_BITRATE,
        macro_block_size=MACRO_BLOCK_SIZE,
    )
    print(f"Done. Output: {dump_video_path}")


if __name__ == "__main__":
    main()