| import torch |
| import os |
| import csv |
| from tqdm import tqdm |
| from imagebind import data |
| from imagebind.models import imagebind_model |
| from imagebind.models.imagebind_model import ModalityType |
| from pathlib import Path |
| import pandas as pd |
| import random |
| import torch.nn.functional as F |
|
|
| |
| device = "" |
|
|
| |
| try: |
| model = imagebind_model.imagebind_huge(pretrained=True) |
| model.eval() |
| model.to(device) |
| except Exception as e: |
| print(f"Error loading the model: {e}") |
| exit(1) |
|
|
| |
| audio_folder = "" |
| video_folder = "" |
|
|
| |
| csv_file = '' |
| try: |
| df = pd.read_csv(csv_file) |
| except Exception as e: |
| print(f"Error reading CSV file {csv_file}: {e}") |
| exit(1) |
|
|
| |
| output_csv = 'output.csv' |
| error_log_file = 'test.log' |
|
|
| |
| processed_files = set() |
| if os.path.exists(output_csv): |
| try: |
| with open(output_csv, mode='r') as file: |
| reader = csv.reader(file) |
| for row in reader: |
| file_name = row[0] |
| processed_files.add(file_name) |
| except Exception as e: |
| print(f"Error reading the output CSV file {output_csv}: {e}") |
| exit(1) |
|
|
| |
| paired_audio_paths = [] |
| paired_video_paths = [] |
|
|
| |
| with open(error_log_file, mode='a') as error_log: |
| |
| for file_id in df['file_id']: |
| audio_file = f"{file_id}.flac" |
| video_file = f"000040.jpg" |
| |
| video_path = os.path.join(video_folder, file_id, video_file) |
| audio_path = os.path.join(audio_folder, audio_file) |
|
|
| |
| video_name = os.path.basename(video_path) |
| audio_name = os.path.basename(audio_path) |
| video_name_no_ext = os.path.splitext(video_name)[0] |
| audio_name_no_ext = os.path.splitext(audio_name)[0] |
|
|
| |
| if video_name_no_ext in processed_files or audio_name_no_ext in processed_files: |
| continue |
|
|
| |
| if not os.path.exists(video_path): |
| error_log.write(f"Video directory not found: {video_path}\n") |
| continue |
|
|
| if not os.path.exists(audio_path): |
| error_log.write(f"Audio file not found: {audio_path}\n") |
| continue |
|
|
| paired_audio_paths.append(audio_path) |
| paired_video_paths.append(video_path) |
|
|
| print(f"Successfully matched {len(paired_audio_paths)} audio-video pairs.") |
|
|
| |
| batch_size = 16 |
| num_batches = len(paired_video_paths) // batch_size + 1 |
|
|
| |
| try: |
| with open(output_csv, mode='a', newline='') as file: |
| writer = csv.writer(file) |
|
|
| with torch.no_grad(): |
| for i in tqdm(range(num_batches), desc="Processing Batches"): |
| start_idx = i * batch_size |
| end_idx = min((i + 1) * batch_size, len(paired_video_paths)) |
|
|
| |
| video_batch_paths = paired_video_paths[start_idx:end_idx] |
| audio_batch_paths = paired_audio_paths[start_idx:end_idx] |
|
|
| try: |
| |
| video_batch = data.load_and_transform_vision_data(video_batch_paths, device) |
| audio_batch = data.load_and_transform_audio_data(audio_batch_paths, device) |
| except RuntimeError as e: |
| print(f"Error loading video data in batch {i}: {e}") |
| continue |
| except Exception as e: |
| print(f"Unexpected error in batch {i}: {e}") |
| continue |
|
|
| try: |
| |
| inputs = { |
| ModalityType.VISION: video_batch, |
| ModalityType.AUDIO: audio_batch, |
| } |
|
|
| embeddings = model(inputs) |
|
|
| |
| audio_embedding = embeddings[ModalityType.AUDIO] |
| video_embedding = embeddings[ModalityType.VISION] |
| batch_similarity = F.cosine_similarity(video_embedding, audio_embedding) * 10 |
|
|
| |
| for video_path, similarity in zip(video_batch_paths, batch_similarity.tolist()): |
| video_name = os.path.basename(os.path.dirname(video_path)) |
| writer.writerow([video_name, similarity]) |
| except Exception as e: |
| print(f"Error processing batch {i}: {e}") |
| continue |
| except Exception as e: |
| print(f"Error writing to the output CSV file {output_csv}: {e}") |
| exit(1) |
|
|
| print(f"Similarity scores have been saved to {output_csv}.") |
| print(f"Any missing files have been logged in {error_log_file}.") |