File size: 5,069 Bytes
fca4fc0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
from dvc_eval import eval_dvc, eval_soda
import json
import argparse
import re
import difflib
import os
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
import torch
# Define image transforms
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
transform = Compose([
Resize(224, interpolation=BICUBIC),
CenterCrop(224),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
# Check if model files exist
def check_model_files(config):
"""Check if required model files exist"""
files_to_check = [
config.clip_path,
config.pretrain_mm_mlp_adapter,
config.stage2,
config.stage3,
config.stage4,
config.stage5,
config.model_base
]
missing_files = []
for file_path in files_to_check:
if not os.path.exists(file_path):
missing_files.append(file_path)
if missing_files:
print("⚠ Missing model files:")
for file_path in missing_files:
print(f" - {file_path}")
print("\nPlease download the required model checkpoints.")
return False
else:
print("✓ All model files found")
return True
# CLIP Utility Functions
# Video Utility Functions
# Utility functions for video processing
def extract_video_features(video_path, clip_model, video_loader, transform):
"""Extract features from a video file"""
try:
# Extract frames from video
_, images = video_loader.extract({'id': None, 'video': video_path})
# Apply transforms
images = transform(images / 255.0)
images = images.to(torch.float16)
# Encode with CLIP
with torch.no_grad():
features = clip_model.encode_image(images.to('cuda'))
return features
except Exception as e:
print(f"Error processing video {video_path}: {e}")
return None
def find_video_file(video_id, video_folder):
"""Find video file with various extensions"""
for ext in ['mp4', 'mkv', 'webm', 'avi', 'mov']:
video_path = os.path.join(video_folder, f"{video_id}.{ext}")
if os.path.isfile(video_path):
return video_path
return None
def load_dataset(data_path):
"""Load dataset from JSON file"""
try:
with open(data_path, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print(f"✗ Error loading dataset: {e}")
return None
## EVALUTE FUNCTIONS
def merge_similar_sentences(data):
if not data: return data
merged_data = []
current_sentence = data[0]["sentence"]
current_timestamp = data[0]["timestamp"]
for i in range(1, len(data)):
next_sentence = data[i]["sentence"]
next_timestamp = data[i]["timestamp"]
if difflib.SequenceMatcher(None, current_sentence, next_sentence).ratio() > 0.98 and -1 <= next_timestamp[0] - current_timestamp[1] <= 1:
current_timestamp = [current_timestamp[0], next_timestamp[1]]
else:
merged_data.append({"sentence": current_sentence, "timestamp": current_timestamp})
current_sentence = next_sentence
current_timestamp = next_timestamp
merged_data.append({"sentence": current_sentence, "timestamp": current_timestamp})
return merged_data
def evaluate(id, event, timestamps, answer, js):
pred = {}
pred[id] = []
for num in range(len(event)):
pred[id].append({
'timestamp': timestamps[num],
'sentence': event[num]
})
refined_pred = []
for num_pred, curr_pred in enumerate(pred[id]):
duplicate = False
for curr_pred2 in pred[id][num_pred + 1:]:
if curr_pred2 == curr_pred:
num_duplicates+=1
duplicate=True
if not duplicate:
refined_pred.append(curr_pred)
pred[id] = refined_pred
gt_js = {k: v for k, v in js.items() if k in pred.keys()}
for id, items in list(pred.items()):
items = merge_similar_sentences(items)
duration = gt_js[id]['duration']
for item in items:
item['timestamp'][0] = item['timestamp'][0] * duration / 100
item['timestamp'][1] = (item['timestamp'][1] + 1) * duration / 100
pred[id] = items
pred_result = {'results': pred}
metrics = eval_soda(pred_result, [gt_js], print_matrix=False)
metrics.update(eval_dvc(pred_result, [gt_js],
tious=[0.3, 0.5, 0.7],
distances=[],
max_proposals_per_video=1000,
verbose=False,
no_lang_eval=False))
print(f"Found {len(pred)} logs")
metrics = {k: v.item() * 100 for k, v in metrics.items() if k in ['soda_c', 'METEOR', 'CIDEr']}
return metrics |