import os os.environ.setdefault("CUDA_VISIBLE_DEVICES", "0") os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True") import re import json import argparse import time import sys from typing import List, Dict, Any from tqdm import tqdm from PIL import Image import torch import torch.backends.cudnn as cudnn import numpy as np import signal import contextlib @contextlib.contextmanager def timeout(seconds: int, error_message: str = 'Function call timed out'): def _handle_timeout(signum, frame): raise TimeoutError(error_message) signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(seconds) try: yield finally: signal.alarm(0) class TimeoutError(Exception): pass EMOTION_LLAMA_PATH = "PATH_TO_EMOTION_LLAMA_PROJECT" if EMOTION_LLAMA_PATH not in sys.path: sys.path.append(EMOTION_LLAMA_PATH) from minigpt4.common.config import Config from minigpt4.common.registry import registry from minigpt4.conversation.conversation import Conversation, Chat, SeparatorStyle from minigpt4.datasets.builders import * # noqa from minigpt4.models import * # noqa from minigpt4.processors import * # noqa from minigpt4.runners import * # noqa from minigpt4.tasks import * # noqa # --- Configuration --- LEVEL_DIRS = ["level1", "level2", "level3"] GENERIC_RESULT_PATTERN = "_result.json" RESULT_SUFFIX = "_emotionllama_result.json" _TAGS = [ r"\s*[INST]\s*", r"[/INST]", r".*?", r".*?", r"", r".*?", r"", r"", r"", r"", r"", r"", r"", ] _TAGS_RE = re.compile("|".join(_TAGS), flags=re.IGNORECASE | re.DOTALL) def clean_prompt_text(s: str) -> str: s = _TAGS_RE.sub("", s).strip() tail = '\nRespond ONLY with: {"emotion":"neutral|negative|positive"}' if "Respond ONLY with" not in s: s += tail return s _JSON_RE = re.compile(r'\{\s*"emotion"\s*:\s*"(neutral|negative|positive)"\s*\}', re.IGNORECASE) def extract_emotion_json(text: str) -> str: m = _JSON_RE.search(text) if m: return json.dumps({"emotion": m.group(1).lower()}, ensure_ascii=False) low = text.lower() if "negative" in low: return json.dumps({"emotion": "negative"}, ensure_ascii=False) if "positive" in low: return json.dumps({"emotion": "positive"}, ensure_ascii=False) return json.dumps({"emotion": "neutral"}, ensure_ascii=False) IMG_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".webp", ".gif"} VID_EXTS = {".mp4", ".avi", ".mov", ".mkv", ".webm"} def get_first_frame_pil(video_path: str): import cv2 from PIL import Image cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise IOError(f"Cannot open video file: {video_path}") ret, frame = cap.read() cap.release() if not ret: raise IOError(f"Cannot read frame from video file: {video_path}") return Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) def get_media_type(file_path: str) -> str: ext = os.path.splitext(file_path)[1].lower() if ext in VID_EXTS: return 'video' elif ext in IMG_EXTS: return 'image' else: return 'unknown' @torch.inference_mode() def process_single_sample(chat: Chat, media_full_path: str, prompt_text: str) -> str: try: chat_state = Conversation( system="", roles=("[INST] ", " [/INST]"), messages=[], offset=2, sep_style=SeparatorStyle.SINGLE, sep="" ) img_list = [] media_type = get_media_type(media_full_path) if media_type == 'unknown': raise ValueError(f"Unsupported media type: {media_full_path}") if media_type == 'video': pil_image = get_first_frame_pil(media_full_path) else: # image from PIL import Image pil_image = Image.open(media_full_path).convert("RGB") chat.upload_img(pil_image, chat_state, img_list) if len(img_list) > 0: chat.encode_img(img_list) clean_prompt = prompt_text.replace("", "").replace("