import os import json import argparse import time import sys from typing import Dict, Any from tqdm import tqdm import torch import cv2 import tempfile import torch.backends.cudnn as cudnn import decord decord.bridge.set_bridge('torch') import torch.multiprocessing as mp # --- Worker Initialization for Multiprocessing Pool --- worker_chat = None worker_cfg = None def init_worker(): global worker_chat, worker_cfg setup_seeds(42) worker_chat, worker_cfg = load_affectgpt_model() print(f"[Worker PID: {os.getpid()}] Model loaded successfully.") sys.path.append('AffectGPT') from my_affectgpt.common.config import Config from my_affectgpt.common.registry import registry from my_affectgpt.conversation.conversation_video import Chat # --- Configuration --- LEVEL_DIRS = ["level1", "level2", "level3"] GENERIC_RESULT_PATTERN = "_result.json" def get_media_type(file_path: str) -> str: ext = os.path.splitext(file_path)[1].lower() if ext in ['.mp4', '.avi', '.mov', '.mkv', '.webm']: return 'video' elif ext in ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.webp']: return 'image' else: raise ValueError(f"Unsupported file format: {ext}") def setup_seeds(seed=42): import random random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) cudnn.benchmark = False cudnn.deterministic = True def load_affectgpt_model(): print("Loading AffectGPT model...") import config config.PATH_TO_LLM['Qwen25'] = 'Qwen25' config.PATH_TO_VISUAL['CLIP_VIT_LARGE'] = 'CLIP_VIT_LARGE' config.PATH_TO_AUDIO['HUBERT_LARGE'] = 'HUBRT_LARGE' cfg_path = "CFG_PATH" class Args: def __init__(self): self.cfg_path = cfg_path self.options = ["inference.test_epoch=60"] args = Args() cfg = Config(args) model_cfg = cfg.model_cfg device = 'cuda:7' ckpt_path = "CKPT_PATH" model_cfg.ckpt_3 = ckpt_path model_cls = registry.get_model_class(model_cfg.arch) model = model_cls.from_config(model_cfg) model = model.to(device).eval() chat = Chat(model, model_cfg, device=device) print("AffectGPT model loaded!") return chat, cfg def create_complete_dataset(cfg): from my_affectgpt.processors import BaseProcessor class CompleteDataset: def __init__(self, cfg): self.vis_processor = BaseProcessor() self.img_processor = BaseProcessor() self.n_frms = 8 inference_cfg = cfg.inference_cfg vis_processor_cfg = inference_cfg.get("vis_processor") img_processor_cfg = inference_cfg.get("img_processor") if vis_processor_cfg is not None: self.vis_processor = registry.get_processor_class(vis_processor_cfg.train.name).from_config(vis_processor_cfg.train) if img_processor_cfg is not None: self.img_processor = registry.get_processor_class(img_processor_cfg.train.name).from_config(img_processor_cfg.train) self.n_frms = cfg.model_cfg.vis_processor.train.n_frms def read_frame_face_audio_text(self, video_path, face_npy, audio_path, image_path): sample_data = {} frame, raw_frame = None, None if video_path is not None: from my_affectgpt.processors.video_processor import load_video raw_frame, msg = load_video(video_path=video_path, n_frms=self.n_frms, height=224, width=224, sampling="uniform", return_msg=True) frame = self.vis_processor.transform(raw_frame) sample_data['frame'] = frame sample_data['raw_frame'] = raw_frame sample_data['face'] = None sample_data['raw_face'] = None audio, raw_audio = None, None if audio_path is not None and os.path.exists(audio_path): from my_affectgpt.models.ImageBind.data import load_audio, transform_audio raw_audio = load_audio([audio_path], "cpu", clips_per_video=8)[0] audio = transform_audio(raw_audio, "cpu") sample_data['audio'] = audio sample_data['raw_audio'] = raw_audio image, raw_image = None, None if image_path is not None and os.path.exists(image_path): from PIL import Image as PILImage raw_image = PILImage.open(image_path).convert('RGB') image = self.img_processor(raw_image) sample_data['image'] = image sample_data['raw_image'] = raw_image return sample_data def get_prompt_for_multimodal(self, face_or_frame, subtitle, user_message): prompt = f"{user_message}" if subtitle: prompt = f"Context: {subtitle}\n{prompt}" return prompt return CompleteDataset(cfg) def process_single_sample(chat, cfg, media_full_path, prompt_text): try: media_type = get_media_type(media_full_path) if media_type == 'image': temp_video = tempfile.mktemp(suffix='.mp4') img = cv2.imread(media_full_path) height, width, layers = img.shape fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter(temp_video, fourcc, 1.0, (width, height)) for _ in range(30): video_writer.write(img) video_writer.release() video_path, audio_path, image_path = temp_video, None, None else: video_path, audio_path, image_path = media_full_path, None, None dataset_cls = create_complete_dataset(cfg) sample_data = dataset_cls.read_frame_face_audio_text(video_path, None, audio_path, image_path) _, audio_llms = chat.postprocess_audio(sample_data) _, frame_llms = chat.postprocess_frame(sample_data) _, face_llms = chat.postprocess_face(sample_data) _, image_llms = chat.postprocess_image(sample_data) img_list = {'audio': audio_llms, 'frame': frame_llms, 'face': face_llms, 'image': image_llms, 'multi': None} user_message = prompt_text.replace("", "").replace("