VidChain-exercise / VTimeLLM /vtimellm /inference_exercise_utils.py
simplecloud's picture
Upload folder using huggingface_hub
fca4fc0 verified
from dvc_eval import eval_dvc, eval_soda
import json
import argparse
import re
import difflib
import os
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
import torch
# Define image transforms
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
transform = Compose([
Resize(224, interpolation=BICUBIC),
CenterCrop(224),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
# Check if model files exist
def check_model_files(config):
"""Check if required model files exist"""
files_to_check = [
config.clip_path,
config.pretrain_mm_mlp_adapter,
config.stage2,
config.stage3,
config.stage4,
config.stage5,
config.model_base
]
missing_files = []
for file_path in files_to_check:
if not os.path.exists(file_path):
missing_files.append(file_path)
if missing_files:
print("⚠ Missing model files:")
for file_path in missing_files:
print(f" - {file_path}")
print("\nPlease download the required model checkpoints.")
return False
else:
print("✓ All model files found")
return True
# CLIP Utility Functions
# Video Utility Functions
# Utility functions for video processing
def extract_video_features(video_path, clip_model, video_loader, transform):
"""Extract features from a video file"""
try:
# Extract frames from video
_, images = video_loader.extract({'id': None, 'video': video_path})
# Apply transforms
images = transform(images / 255.0)
images = images.to(torch.float16)
# Encode with CLIP
with torch.no_grad():
features = clip_model.encode_image(images.to('cuda'))
return features
except Exception as e:
print(f"Error processing video {video_path}: {e}")
return None
def find_video_file(video_id, video_folder):
"""Find video file with various extensions"""
for ext in ['mp4', 'mkv', 'webm', 'avi', 'mov']:
video_path = os.path.join(video_folder, f"{video_id}.{ext}")
if os.path.isfile(video_path):
return video_path
return None
def load_dataset(data_path):
"""Load dataset from JSON file"""
try:
with open(data_path, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print(f"✗ Error loading dataset: {e}")
return None
## EVALUTE FUNCTIONS
def merge_similar_sentences(data):
if not data: return data
merged_data = []
current_sentence = data[0]["sentence"]
current_timestamp = data[0]["timestamp"]
for i in range(1, len(data)):
next_sentence = data[i]["sentence"]
next_timestamp = data[i]["timestamp"]
if difflib.SequenceMatcher(None, current_sentence, next_sentence).ratio() > 0.98 and -1 <= next_timestamp[0] - current_timestamp[1] <= 1:
current_timestamp = [current_timestamp[0], next_timestamp[1]]
else:
merged_data.append({"sentence": current_sentence, "timestamp": current_timestamp})
current_sentence = next_sentence
current_timestamp = next_timestamp
merged_data.append({"sentence": current_sentence, "timestamp": current_timestamp})
return merged_data
def evaluate(id, event, timestamps, answer, js):
pred = {}
pred[id] = []
for num in range(len(event)):
pred[id].append({
'timestamp': timestamps[num],
'sentence': event[num]
})
refined_pred = []
for num_pred, curr_pred in enumerate(pred[id]):
duplicate = False
for curr_pred2 in pred[id][num_pred + 1:]:
if curr_pred2 == curr_pred:
num_duplicates+=1
duplicate=True
if not duplicate:
refined_pred.append(curr_pred)
pred[id] = refined_pred
gt_js = {k: v for k, v in js.items() if k in pred.keys()}
for id, items in list(pred.items()):
items = merge_similar_sentences(items)
duration = gt_js[id]['duration']
for item in items:
item['timestamp'][0] = item['timestamp'][0] * duration / 100
item['timestamp'][1] = (item['timestamp'][1] + 1) * duration / 100
pred[id] = items
pred_result = {'results': pred}
metrics = eval_soda(pred_result, [gt_js], print_matrix=False)
metrics.update(eval_dvc(pred_result, [gt_js],
tious=[0.3, 0.5, 0.7],
distances=[],
max_proposals_per_video=1000,
verbose=False,
no_lang_eval=False))
print(f"Found {len(pred)} logs")
metrics = {k: v.item() * 100 for k, v in metrics.items() if k in ['soda_c', 'METEOR', 'CIDEr']}
return metrics