import os import json import base64 import argparse import time import re import traceback from datetime import datetime from functools import partial from openai import AzureOpenAI, OpenAI from volcenginesdkarkruntime import Ark import concurrent.futures from tqdm import tqdm # New system prompt for the agent AGENT_SYSTEM_PROMPT = """ You are an intelligent AI assistant specialized in video question answering. Your task is to answer a multiple-choice question based on a video. You must use the `get_frames_by_id` tool to request specific frames to view. You will be told the total number of frames available in the video (e.g., "The video has 1250 frames, numbered 1 to 1250."). Your strategy should be efficient: 1. Based on the task query, think about which part of the video will be related, and then get the frames of this part. If the query’s description is fairly general and you can’t effectively infer the temporal regions where the target visual evidence might appear, you can first uniformly sample some frames for analysis to identify the time intervals where the target visual evidence is likely to appear. 2. Analyze the retrieved frames and the user's question. 3. If you don't have enough information, form a hypothesis about where the answer might be and use the tool again to request more specific frames from that segment. 4. Continue this process of reasoning and tool use until you are confident in your answer. Avoid requesting all frames at once. 5. Please make sure that you find the relevant visual cues and then answer the question instead of guessing the answer. 6. You can access 10 frames at most in each tool call. Please note that if you have insufficient visual information at the beginning, you can first sample more frames uniformly to understand the video (e.g., sampling 10 frames per tool call). You can then gradually refine the subsequent steps and adopt a coarse-to-fine strategy overall. For example, the question is "What is the main subject of the video?" You can first sample 10 frames uniformly from the video (e.g., frame 100, 200, ..., 1200). After analyzing these frames, you might notice that the main subject is a person in the middle of the screen (between frame 500 and 600). You can then sample more frames from this region (e.g., frame 500, 520, ..., 590) to get more detailed information. Finally, you can reason based on the visual cues you have gathered and provide the final answer. This process might be multi-turn. After your reasoning, provide the final answer in a JSON block. The JSON object must contain a single key "answer" with the value being one of 'A', 'B', 'C', or 'D'. Remember that You can access 10 frames at most in each tool call. Your output should follow this format exactly: ```json {"answer": "X"} ``` Do not include any other text after the JSON block. """ # Tool schema for the get_frames_by_id function GET_FRAMES_TOOL_SCHEMA = { "type": "function", "function": { "name": "get_frames_by_id", "description": "Retrieves specific video frames by their numerical IDs. Use this to get visual information from the video.", "parameters": { "type": "object", "properties": { "frame_ids": { "type": "array", "items": {"type": "integer"}, "description": "A list of frame numbers to retrieve. You can access 10 frames at most in each tool call.", }, }, "required": ["frame_ids"], }, }, } def parse_arguments(): """ Parse command line arguments for evaluation configuration. """ parser = argparse.ArgumentParser( description="Video QA Evaluation Framework with Agentic Frame Selection (Refactored)" ) parser.add_argument( "--target-model", "-tm", type=str, required=True, help="Model to be evaluated (e.g., gpt-4o)", ) parser.add_argument( "--frames-path", "-fp", type=str, required=True, help="Absolute path to the base directory for video frames.", ) parser.add_argument( "--data-file", "-df", type=str, required=True, help="Absolute path to the JSON evaluation dataset.", ) parser.add_argument( "--max-retry-times", "-mr", type=int, default=10, help="Maximum retries for API calls.", ) parser.add_argument( "--pool-processes", "-pp", type=int, default=20, help="Number of parallel processes.", ) parser.add_argument( "--base_url", type=str, required=True, help="Azure OpenAI endpoint URL." ) parser.add_argument( "--api_key", type=str, required=True, help="Azure OpenAI API key." ) return parser.parse_args() def save_json_file(data, output_file): """Saves data to a JSON file.""" with open(output_file, "w", encoding="utf-8") as f: json.dump(data, f, indent=4) def extract_json_from_response(response): """Extracts a JSON object from a model's response string.""" if not response: return None match = re.search(r"```json\s*(\{.*?\})\s*```", response, re.DOTALL) if match: try: return json.loads(match.group(1)) except (json.JSONDecodeError, IndexError): return None return None def calculate_metrics(results): """Calculates accuracy and other metrics from evaluation results.""" # Filter out potential error results before calculating valid_results = [r for r in results if "error" not in r] total_samples = len(valid_results) if total_samples == 0: return { "total_samples": 0, "answered_samples": 0, "correct_answers": 0, "accuracy": 0.0, } answered_samples = sum( 1 for x in valid_results if x.get("model_answer") is not None ) correct_answers = sum(1 for x in valid_results if x.get("is_correct")) accuracy = correct_answers / answered_samples if answered_samples > 0 else 0.0 return { "total_samples": total_samples, "answered_samples": answered_samples, "correct_answers": correct_answers, "accuracy": accuracy, } def call_single_model(client, messages, model, item_id, max_retry_times, tools=None): """Makes a single API call with retry logic and tool support.""" if "o4" in model: params = {"model": model, "messages": messages, "max_tokens": 65535} elif "Qwen" in model: params = { "model": model, "messages": messages, "max_tokens": 2048, "temperature": 0, } else: params = {"model": model, "messages": messages, "max_tokens": 32768} if tools: params["tools"] = tools params["tool_choice"] = "auto" retry_times = 0 while retry_times < max_retry_times: try: completion = client.chat.completions.create(**params) return completion.choices[0].message except Exception as e: retry_times += 1 print( f"API Error for item {item_id}: {str(e)}. Retrying ({retry_times}/{max_retry_times})..." ) if retry_times == max_retry_times: # Instead of writing to a file here, we'll let the worker return the error raise e # Reraise the exception to be caught by the worker's main try-except block time.sleep(5) def get_frames_by_id(frame_ids: list, all_frame_paths: list): """Tool implementation: Retrieves and formats frames based on a list of IDs.""" retrieved_frames = [] frame_map = { int(re.search(r"frame_(\d+)\.jpg", os.path.basename(p)).group(1)): p for p in all_frame_paths if re.search(r"frame_(\d+)\.jpg", os.path.basename(p)) } for fid in frame_ids: path = frame_map.get(fid) if path and os.path.exists(path): b64_image = encode_image(path) retrieved_frames.append( { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64_image}"}, } ) return retrieved_frames def evaluate_single_item_agentic( data_item, all_frame_paths, target_model, api_key, base_url, max_retry_times ): """Evaluates a single item using an agentic loop for dynamic frame selection.""" if "ark" in base_url: client = Ark( base_url=base_url, api_key=api_key, ) elif "aliyun" in base_url or "127.0.0.1" in base_url: client = OpenAI(api_key=api_key, base_url=base_url) else: client = AzureOpenAI( api_version="2023-05-15", api_key=api_key, azure_endpoint=base_url ) tools = [GET_FRAMES_TOOL_SCHEMA] available_functions = {"get_frames_by_id": get_frames_by_id} total_frames = len(all_frame_paths) minutes = data_item["video_info"]["duration_minutes"] seconds = int(minutes * 60) initial_prompt = ( f"The video has {total_frames} frames, numbered 1 to {total_frames}. This video is {seconds} seconds long. " f"Please answer the following question:\n{data_item['question']}" ) messages = [ {"role": "system", "content": AGENT_SYSTEM_PROMPT}, {"role": "user", "content": initial_prompt}, ] response_content = None max_tool_calls = 10 for i in range(max_tool_calls): response_message = call_single_model( client, messages, target_model, data_item["key"], max_retry_times, tools=tools, ) if response_message is None: return None messages.append(response_message.model_dump()) if response_message.tool_calls: for tool_call in response_message.tool_calls: function_name = tool_call.function.name function_to_call = available_functions.get(function_name) if function_to_call: function_args = json.loads(tool_call.function.arguments) retrieved_frames = function_to_call( **function_args, all_frame_paths=all_frame_paths ) tool_response_content = [ { "type": "text", "text": f"Here are the frames you requested (IDs: {function_args.get('frame_ids', [])}).", } ] tool_response_content.extend(retrieved_frames) messages.append( { "tool_call_id": tool_call.id, "role": "tool", "name": function_name, "content": json.dumps( { "status": "success", "retrieved_frame_count": len(retrieved_frames), } ), } ) messages.append({"role": "user", "content": tool_response_content}) else: response_content = response_message.content break if response_content is None and response_message and response_message.tool_calls: print( f"\nMax tool calls reached for item {data_item['key']}. Forcing a final answer." ) final_prompt = "You have reached the maximum number of tool calls. Please provide a final answer in the specified JSON format based on the information you have gathered so far." messages.append({"role": "user", "content": final_prompt}) final_response_message = call_single_model( client, messages, target_model, data_item["key"], max_retry_times, tools=None, ) if final_response_message: messages.append(final_response_message) response_content = final_response_message.content elif response_content is None and response_message: response_content = response_message.content is_correct = False model_answer_cleaned = None parsed_json = extract_json_from_response(response_content) if parsed_json and "answer" in parsed_json: model_answer_cleaned = str(parsed_json["answer"]).strip().upper() gold_answer = data_item["answer"].strip().upper() if model_answer_cleaned == gold_answer: is_correct = True return { **data_item, "agent_conversation": [ msg.model_dump() if hasattr(msg, "model_dump") else msg for msg in messages ], "model_reasoning_and_answer": response_content, "model_answer": model_answer_cleaned, "is_correct": is_correct, } def encode_image(image_path): """Encodes an image file to a base64 string.""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def process_single_data(data_item, args): """ Main processing function for a single video. This function is executed by each worker process. It is self-contained. """ item_key = data_item["key"] try: specific_frames_path = os.path.join(args.frames_path, item_key) if not os.path.isdir(specific_frames_path): raise FileNotFoundError(f"Frame directory not found for key '{item_key}'") all_frame_paths = sorted( [ os.path.join(specific_frames_path, f) for f in os.listdir(specific_frames_path) if f.endswith(".jpg") ], key=lambda x: int(re.search(r"frame_(\d+)\.jpg", x).group(1)), ) if not all_frame_paths: raise FileNotFoundError(f"No frames found for key '{item_key}'") # The core evaluation logic is called here result = evaluate_single_item_agentic( data_item, all_frame_paths, args.target_model, args.api_key, args.base_url, args.max_retry_times, ) return result except Exception as e: # If any error occurs, catch it and return an error dictionary. # This prevents the worker process from crashing and allows the main # process to log the error gracefully. print(f"\nCRITICAL ERROR on key {item_key}: {str(e)}") traceback.print_exc() return { "key": item_key, "uid": data_item.get("uid"), "error": str(e), "traceback": traceback.format_exc(), } def load_test_data(json_file): """Loads the evaluation data from a JSON file.""" try: with open(json_file, "r", encoding="utf-8") as f: return json.load(f) except FileNotFoundError: print(f"Error: Data file not found: {json_file}") exit(1) except json.JSONDecodeError: print(f"Error: Malformed JSON in {json_file}") exit(1) def main(): """Main function to orchestrate the evaluation framework.""" args = parse_arguments() print("--- Agentic Video QA Evaluation (Refactored) ---") print(f"Target Model: {args.target_model}") print(f"Frames Base Path: {args.frames_path}") print(f"Data File: {args.data_file}") model_name_safe = args.target_model.replace("/", "_") data_filename_base = os.path.splitext(os.path.basename(args.data_file))[0] output_prefix = f"{model_name_safe}_{data_filename_base}_agent_results" results_output_file = f"{output_prefix}.json" metrics_output_file = f"{output_prefix}_metrics.json" error_log_file = f"{output_prefix}_errors.log" with open(error_log_file, "a", encoding="utf-8") as f: f.write( f"\n=== Log Session Started at {datetime.now()} for {args.target_model} ===\n" ) all_test_data = load_test_data(args.data_file) completed_ids = set() existing_results = [] if os.path.exists(results_output_file): try: with open(results_output_file, "r", encoding="utf-8") as f: existing_results = json.load(f) if isinstance(existing_results, list): completed_ids = { item["uid"] for item in existing_results if "uid" in item } print( f"Found {len(completed_ids)} completed tasks in '{results_output_file}'. Resuming..." ) else: existing_results = [] except (json.JSONDecodeError, IOError) as e: print(f"Warning: Could not read results file: {e}. Starting fresh.") existing_results = [] tasks_to_process = [ item for item in all_test_data if item.get("uid") not in completed_ids ] if not tasks_to_process: print("All tasks are already completed. Calculating final metrics.") final_metrics = calculate_metrics(existing_results) save_json_file(final_metrics, metrics_output_file) print(f"\nFinal metrics saved to: {metrics_output_file}") print(json.dumps(final_metrics, indent=4)) return print( f"Total tasks: {len(all_test_data)}. Completed: {len(completed_ids)}. To process: {len(tasks_to_process)}." ) # This list will hold all results, both old and new. all_results = list(existing_results) # Using ProcessPoolExecutor for robust, modern multiprocessing. with concurrent.futures.ProcessPoolExecutor( max_workers=args.pool_processes ) as executor: # partial is used to pass the constant `args` to each call of process_single_data func = partial(process_single_data, args=args) # executor.map processes the tasks in parallel. # tqdm provides a progress bar. results_iterator = executor.map(func, tasks_to_process) for result in tqdm( results_iterator, total=len(tasks_to_process), desc="Processing Videos" ): if result: if "error" in result: # Log errors centrally with open(error_log_file, "a", encoding="utf-8") as f: f.write(f"Error on key {result.get('key', 'N/A')}:\n") f.write(f" Error: {result['error']}\n") f.write(f" Traceback: {result['traceback']}\n---\n") # Append every result (success or error) to the main list all_results.append(result) # Periodically save results for resilience if len(all_results) % 10 == 0: save_json_file(all_results, results_output_file) print("\n\nProcessing complete.") # Final save of all combined results save_json_file(all_results, results_output_file) print(f"Detailed results saved to: {results_output_file}") # Calculate and save final metrics final_metrics = calculate_metrics(all_results) save_json_file(final_metrics, metrics_output_file) print(f"\nMetrics saved to: {metrics_output_file}") print(json.dumps(final_metrics, indent=4)) if __name__ == "__main__": # To run this script, you'll need to install tqdm: # pip install tqdm main()