File size: 6,037 Bytes
a0863d1 3412cf2 a0863d1 3412cf2 a0863d1 0923c5b a0863d1 b562d03 a0863d1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | #!/usr/bin/env python
# coding: utf-8
# Cleaned and enhanced InternVideo2 6B evaluation script with structured logging
# Source:
import os
import sys
import subprocess
import logging
import json
import argparse
from pathlib import Path
import numpy as np
import cv2
import torch
from tqdm import tqdm
from huggingface_hub import hf_hub_download, HfApi, login
def setup_logging(log_level=logging.INFO, log_file=None):
handlers = []
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if log_file:
handlers.append(logging.FileHandler(log_file))
handlers.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(level=log_level, format=fmt, handlers=handlers)
logging.info("Logging initialized.")
def run_command(cmd, cwd=None):
logging.debug(f"Running command: {cmd} (cwd={cwd})")
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True)
if result.returncode != 0:
logging.error(f"Command failed: {cmd}\nSTDOUT: {result.stdout}\nSTDERR: {result.stderr}")
raise RuntimeError(f"Command '{cmd}' failed (exit code {result.returncode})")
logging.debug(f"Command succeeded, output: {result.stdout.strip()}")
return result.stdout.strip()
def download_checkpoint(repo_id: str, filename: str) -> str:
logging.info(f"Downloading {filename} from {repo_id}...")
path = hf_hub_download(repo_id=repo_id, filename=filename)
logging.info(f"Downloaded vision checkpoint to {path}")
return path
def load_config(config_path: str, vision_ckpt_path: str):
from demo.config import Config, eval_dict_leaf
logging.info(f"Loading config from {config_path}")
cfg = Config.from_file(config_path)
cfg = eval_dict_leaf(cfg)
cfg.model.vision_ckpt_path = vision_ckpt_path
cfg.model.vision_encoder.pretrained = vision_ckpt_path
cfg.pretrained_path = vision_ckpt_path
logging.debug(f"Config loaded: {cfg}")
return cfg
def process_videos(
json_path: str,
model,
config,
output_prefix: str,
num_frames_override: int = None
):
"""
Run inference over each video, write outputs.
If num_frames_override is given, use it; otherwise use config.num_frames.
"""
from demo.utils import retrieve_text, _frame_from_video
logging.info(f"Reading evaluation data from {json_path}")
data = json.loads(Path(json_path).read_text())
preds, logits = [], []
# choose frame window size
num_frames = num_frames_override if num_frames_override is not None else config.num_frames
logging.info(f"Using window size: {num_frames} frames")
for video_path, phrase, _ in data:
logging.info("\n--- Starting new video ---")
full_video = Path("photography-model") / video_path
logging.info(f"Processing {full_video} with phrase '{phrase}'")
frames = list(_frame_from_video(cv2.VideoCapture(str(full_video))))
scores = []
for j in tqdm(range(len(frames) - (num_frames - 1)), desc=Path(video_path).stem):
_, probs = retrieve_text(
frames[j : j + num_frames], [phrase],
model=model, topk=1, config=config
)
scores.append(probs[0])
best_idx = int(np.argmax(scores) + 1)
preds.append(best_idx)
logits.append(list(zip(map(float, scores), range(1, len(scores) + 1))))
logging.info(f"Video result: predicted frame {best_idx}\n")
preds_file = f"{output_prefix}-t{num_frames}.json"
logits_file = f"{output_prefix}-logits-t{num_frames}.json"
logging.info(f"Writing predictions to {preds_file}")
Path(preds_file).write_text(json.dumps(preds, indent=2))
logging.info(f"Writing logits to {logits_file}")
Path(logits_file).write_text(json.dumps(logits, indent=2))
return preds_file, logits_file
def upload_results(token: str, upload_files: list, repo_id: str):
logging.info("Logging into Hugging Face Hub...")
login(token)
api = HfApi()
for file_path in upload_files:
logging.info(f"Uploading {file_path} to {repo_id}")
api.upload_file(
path_or_fileobj=file_path,
path_in_repo=Path(file_path).name,
repo_id=repo_id,
repo_type="dataset",
)
logging.info("Upload complete.")
def main():
parser = argparse.ArgumentParser(
description="Evaluate InternVideo2 sliding-window retrieval."
)
parser.add_argument(
"--branch",
type=str,
default="main",
help="Branch to use for evaluation."
)
parser.add_argument(
"--num_frames",
type=int,
default=None,
help="Manually set the number of frames per window."
)
args = parser.parse_args()
setup_logging()
# ensure IV2 repo
iv2_path = Path('~/IV2').expanduser()
if not iv2_path.exists():
logging.info("Cloning IV2 repository...")
run_command('git clone https://github.com/qingy1337/IV2.git ~/IV2')
os.chdir(iv2_path / 'InternVideo2' / 'multi_modality')
sys.path.append(os.getcwd())
run_command(f'git checkout {args.branch}', cwd=os.getcwd())
MODEL_NAME = '6B'
vision_ckpt = download_checkpoint(
repo_id="OpenGVLab/InternVideo2-Stage2_6B-224p-f4",
filename="internvideo2-s2_6b-224p-f4.pt"
)
config = load_config('scripts/pretraining/stage2/6B/config.py', vision_ckpt)
from demo.utils import setup_internvideo2
model, tokenizer = setup_internvideo2(config)
if not Path('photography-model').exists():
run_command('git clone https://github.com/ruo2019/photography-model.git')
prefix = f"ACT75-V5-InternVideo-{MODEL_NAME}"
preds_file, logits_file = process_videos(
'photography-model/data/ACT75.json',
model, config, prefix,
num_frames_override=args.num_frames
)
upload_results(
os.getenv('HF_TOKEN', ''),
[preds_file, logits_file],
'qingy2024/InternVideo2-Data'
)
if __name__ == '__main__':
main()
|