| | |
| | """ |
| | VBench Leaderboard Access and Model Recommendation Module |
| | |
| | This module provides functionality to: |
| | 1. Access the VBench leaderboard from Hugging Face |
| | 2. Parse and analyze model performance data |
| | 3. Recommend models based on user evaluation questions |
| | """ |
| |
|
| | import requests |
| | import pandas as pd |
| | import numpy as np |
| | from typing import Dict, List, Tuple, Optional, Any |
| | import json |
| | import re |
| | from dataclasses import dataclass |
| | from enum import Enum |
| | import logging |
| |
|
| | |
| | logging.basicConfig(level=logging.INFO) |
| | logger = logging.getLogger(__name__) |
| |
|
| |
|
| | class VBenchDimension(Enum): |
| | """VBench evaluation dimensions with descriptions""" |
| | SUBJECT_CONSISTENCY = ("subject_consistency", "Subject Consistency", |
| | "consistency of subject appearance throughout video") |
| | BACKGROUND_CONSISTENCY = ("background_consistency", "Background Consistency", |
| | "consistency of background scene throughout video") |
| | TEMPORAL_FLICKERING = ("temporal_flickering", "Temporal Flickering", |
| | "absence of flickering artifacts over time") |
| | MOTION_SMOOTHNESS = ("motion_smoothness", "Motion Smoothness", |
| | "smoothness and naturalness of motion") |
| | DYNAMIC_DEGREE = ("dynamic_degree", "Dynamic Degree", |
| | "level of motion and dynamic content") |
| | AESTHETIC_QUALITY = ("aesthetic_quality", "Aesthetic Quality", |
| | "overall aesthetic appeal and visual quality") |
| | IMAGING_QUALITY = ("imaging_quality", "Imaging Quality", |
| | "clarity and absence of distortions") |
| | OBJECT_CLASS = ("object_class", "Object Class", |
| | "accuracy in generating specific object classes") |
| | MULTIPLE_OBJECTS = ("multiple_objects", "Multiple Objects", |
| | "ability to generate multiple distinct objects") |
| | HUMAN_ACTION = ("human_action", "Human Action", |
| | "accuracy of human action representation") |
| | COLOR = ("color", "Color", |
| | "accuracy of color generation matching prompts") |
| | SPATIAL_RELATIONSHIP = ("spatial_relationship", "Spatial Relationship", |
| | "accuracy of spatial arrangements") |
| | SCENE = ("scene", "Scene", |
| | "accuracy of scene representation") |
| | APPEARANCE_STYLE = ("appearance_style", "Appearance Style", |
| | "consistency of visual style") |
| | TEMPORAL_STYLE = ("temporal_style", "Temporal Style", |
| | "consistency of temporal effects and camera motion") |
| | OVERALL_CONSISTENCY = ("overall_consistency", "Overall Consistency", |
| | "overall alignment with input prompt") |
| | |
| | def __init__(self, key: str, display_name: str, description: str): |
| | self.key = key |
| | self.display_name = display_name |
| | self.description = description |
| |
|
| |
|
| | @dataclass |
| | class ModelScore: |
| | """Data class for model scores""" |
| | model_name: str |
| | dimension_scores: Dict[str, float] |
| | overall_score: float |
| | metadata: Dict[str, Any] = None |
| |
|
| |
|
| | class VBenchLeaderboard: |
| | """Interface to VBench Leaderboard on Hugging Face""" |
| | |
| | def __init__(self, cache_duration: int = 18000): |
| | """ |
| | Initialize VBench Leaderboard interface |
| | |
| | Args: |
| | cache_duration: Duration in seconds to cache leaderboard data |
| | """ |
| | self.cache_duration = cache_duration |
| | self.cached_data = None |
| | self.cache_timestamp = None |
| | self.dimension_weights = self._initialize_dimension_weights() |
| | self.client = None |
| | |
| | def _initialize_dimension_weights(self) -> Dict[str, float]: |
| | """Initialize default weights for each dimension""" |
| | |
| | weights = {} |
| | for dim in VBenchDimension: |
| | weights[dim.key] = 1.0 |
| | return weights |
| | |
| | def _get_client(self): |
| | """Get or initialize the Gradio client""" |
| | if self.client is None: |
| | from gradio_client import Client |
| | self.client = Client("Vchitect/VBench_Leaderboard") |
| | return self.client |
| | |
| | def list_api_endpoints(self): |
| | """List all available API endpoints from the Gradio Space""" |
| | client = self._get_client() |
| | api_info = client.view_api() |
| | print("Available API endpoints:") |
| | print("=" * 60) |
| | print(api_info) |
| | return api_info |
| | |
| | def fetch_leaderboard_data(self, force_refresh: bool = False, leaderboard_type: str = "baseline") -> pd.DataFrame: |
| | """ |
| | Fetch leaderboard data from Hugging Face Space using Gradio Client |
| | |
| | Args: |
| | force_refresh: Force refresh even if cache is valid |
| | leaderboard_type: Type of leaderboard to fetch |
| | ("baseline", "quality", "semantic", "i2v", "long") |
| | |
| | Returns: |
| | DataFrame containing model scores |
| | """ |
| | import time |
| | |
| | |
| | if not force_refresh and self.cached_data is not None: |
| | if time.time() - self.cache_timestamp < self.cache_duration: |
| | logger.info("Using cached leaderboard data") |
| | return self.cached_data |
| | |
| | try: |
| | logger.info(f"Fetching {leaderboard_type} leaderboard data from Hugging Face Space...") |
| | |
| | |
| | client = self._get_client() |
| | |
| | |
| | api_endpoints = { |
| | "baseline": "/get_baseline_df", |
| | "quality": "/get_baseline_df_quality", |
| | "semantic": "/get_baseline_df_2", |
| | "i2v": "/get_baseline_df_i2v", |
| | "long": "/get_baseline_df_long" |
| | } |
| | |
| | api_name = api_endpoints.get(leaderboard_type, "/get_baseline_df") |
| | |
| | |
| | result = client.predict(api_name=api_name) |
| | |
| | if result and isinstance(result, dict): |
| | |
| | headers = result.get('headers', []) |
| | data = result.get('data', []) |
| | |
| | if headers and data: |
| | df = pd.DataFrame(data, columns=headers) |
| | self.cached_data = df |
| | self.cache_timestamp = time.time() |
| | logger.info(f"Successfully fetched {len(df)} models from {leaderboard_type} leaderboard") |
| | return df |
| | else: |
| | logger.warning("Gradio returned empty data, using synthetic data") |
| | return self._generate_synthetic_data() |
| | else: |
| | logger.warning("Gradio client returned unexpected format, using synthetic data") |
| | return self._generate_synthetic_data() |
| | |
| | except Exception as e: |
| | logger.error(f"Error fetching leaderboard: {e}, using synthetic data") |
| | return self._generate_synthetic_data() |
| | |
| | def _generate_synthetic_data(self) -> pd.DataFrame: |
| | """Generate synthetic leaderboard data for demonstration""" |
| | models = [ |
| | "Latte-1", "ModelScope", "VideoCrafter2", "VideoCrafter-0.9", |
| | "CogVideo", "Show-1", "Gen-2", "Pika", "AnimateDiff", |
| | "SVD", "LaVie", "VideoLDM", "MagicVideo", "Make-A-Video" |
| | ] |
| | |
| | np.random.seed(42) |
| | data = [] |
| | |
| | for model in models: |
| | scores = {} |
| | |
| | for dim in VBenchDimension: |
| | |
| | base_score = np.random.uniform(0.65, 0.95) |
| | if "Latte" in model and dim.key in ["aesthetic_quality", "imaging_quality"]: |
| | base_score += 0.05 |
| | elif "VideoCrafter2" in model and dim.key in ["motion_smoothness", "temporal_style"]: |
| | base_score += 0.08 |
| | elif "ModelScope" in model and dim.key in ["subject_consistency", "background_consistency"]: |
| | base_score += 0.06 |
| | |
| | scores[dim.key] = min(base_score, 1.0) |
| | |
| | |
| | scores["overall"] = np.mean(list(scores.values())) |
| | scores["model"] = model |
| | data.append(scores) |
| | |
| | return pd.DataFrame(data) |
| | |
| | def parse_user_query(self, query: str) -> Dict[str, float]: |
| | """ |
| | Parse user query to identify relevant dimensions and weights |
| | |
| | Args: |
| | query: User's evaluation question |
| | |
| | Returns: |
| | Dictionary of dimension keys and their weights |
| | """ |
| | query_lower = query.lower() |
| | dimension_weights = {} |
| | |
| | |
| | keyword_mapping = { |
| | "subject_consistency": ["subject", "character", "person", "object consistency", "subject consistent"], |
| | "background_consistency": ["background", "scene consistent", "environment", "backdrop"], |
| | "temporal_flickering": ["flicker", "artifact", "temporal artifact", "jitter"], |
| | "motion_smoothness": ["smooth", "motion", "movement", "fluid", "natural motion"], |
| | "dynamic_degree": ["dynamic", "action", "movement", "static", "motion level"], |
| | "aesthetic_quality": ["aesthetic", "beautiful", "quality", "visual appeal", "artistic"], |
| | "imaging_quality": ["clear", "sharp", "distortion", "noise", "blur", "quality"], |
| | "object_class": ["object", "class", "generate specific", "accurate object"], |
| | "multiple_objects": ["multiple", "several", "many objects", "two objects"], |
| | "human_action": ["human", "person", "action", "activity", "behavior"], |
| | "color": ["color", "hue", "saturation", "vibrant", "colorful"], |
| | "spatial_relationship": ["spatial", "position", "layout", "arrangement", "relative position"], |
| | "scene": ["scene", "environment", "setting", "location", "place"], |
| | "appearance_style": ["style", "artistic style", "visual style", "oil painting", "watercolor"], |
| | "temporal_style": ["camera", "temporal", "time", "camera motion", "zoom", "pan"], |
| | "overall_consistency": ["overall", "prompt", "alignment", "follow", "consistency"] |
| | } |
| | |
| | |
| | for dim_key, keywords in keyword_mapping.items(): |
| | weight = 0.0 |
| | for keyword in keywords: |
| | if keyword in query_lower: |
| | weight += 1.0 |
| | |
| | if weight > 0: |
| | dimension_weights[dim_key] = weight |
| | |
| | |
| | if not dimension_weights: |
| | logger.info("No specific dimensions identified, using all dimensions equally") |
| | for dim in VBenchDimension: |
| | dimension_weights[dim.key] = 1.0 |
| | |
| | |
| | total_weight = sum(dimension_weights.values()) |
| | if total_weight > 0: |
| | for key in dimension_weights: |
| | dimension_weights[key] /= total_weight |
| | |
| | return dimension_weights |
| | |
| | def recommend_model(self, |
| | query: str, |
| | top_k: int = 3, |
| | min_score_threshold: float = 0.0) -> List[Tuple[str, float, Dict]]: |
| | """ |
| | Recommend models based on user query |
| | |
| | Args: |
| | query: User's evaluation question |
| | top_k: Number of top models to recommend |
| | min_score_threshold: Minimum score threshold for recommendations |
| | |
| | Returns: |
| | List of tuples (model_name, weighted_score, dimension_scores) |
| | """ |
| | |
| | df = self.fetch_leaderboard_data() |
| | |
| | |
| | dimension_weights = self.parse_user_query(query) |
| | |
| | logger.info(f"Identified dimension weights: {dimension_weights}") |
| | |
| | |
| | model_scores = [] |
| | |
| | for _, row in df.iterrows(): |
| | |
| | model_name = row.get("Model Name (clickable)", row.get("model", "Unknown")) |
| | |
| | if model_name.startswith("[") and "](" in model_name: |
| | model_name = model_name.split("]")[0][1:] |
| | |
| | weighted_score = 0.0 |
| | dimension_scores = {} |
| | |
| | for dim_key, weight in dimension_weights.items(): |
| | |
| | col_name = dim_key.replace('_', ' ') |
| | if col_name in row: |
| | score = row[col_name] |
| | |
| | try: |
| | if isinstance(score, str): |
| | |
| | if score.endswith('%'): |
| | score = float(score.rstrip('%')) / 100.0 |
| | else: |
| | score = float(score) |
| | else: |
| | score = float(score) if score is not None else 0.0 |
| | except (ValueError, TypeError): |
| | continue |
| | weighted_score += score * weight |
| | dimension_scores[dim_key] = score |
| | |
| | if weighted_score >= min_score_threshold: |
| | model_scores.append((model_name, weighted_score, dimension_scores)) |
| | |
| | |
| | model_scores.sort(key=lambda x: x[1], reverse=True) |
| | |
| | |
| | return model_scores[:top_k] |
| | |
| | def generate_recommendation_report(self, |
| | query: str, |
| | recommendations: List[Tuple[str, float, Dict]]) -> str: |
| | """ |
| | Generate a detailed recommendation report |
| | |
| | Args: |
| | query: User's evaluation question |
| | recommendations: List of recommended models |
| | |
| | Returns: |
| | Formatted recommendation report |
| | """ |
| | report = [] |
| | report.append("="*60) |
| | report.append("VBench Model Recommendation Report") |
| | report.append("="*60) |
| | report.append(f"\nQuery: {query}\n") |
| | |
| | |
| | dimension_weights = self.parse_user_query(query) |
| | if dimension_weights: |
| | report.append("Relevant Evaluation Dimensions:") |
| | for dim_key, weight in sorted(dimension_weights.items(), key=lambda x: x[1], reverse=True): |
| | for dim in VBenchDimension: |
| | if dim.key == dim_key: |
| | report.append(f" • {dim.display_name}: {weight:.2%} weight") |
| | break |
| | |
| | report.append(f"\nTop {len(recommendations)} Recommended Models:") |
| | report.append("-"*40) |
| | |
| | for i, (model_name, score, dim_scores) in enumerate(recommendations, 1): |
| | report.append(f"\n{i}. {model_name}") |
| | report.append(f" Overall Score: {score:.4f}") |
| | |
| | if dim_scores: |
| | report.append(" Dimension Scores:") |
| | for dim_key, dim_score in sorted(dim_scores.items(), key=lambda x: x[1], reverse=True): |
| | for dim in VBenchDimension: |
| | if dim.key == dim_key: |
| | report.append(f" • {dim.display_name}: {dim_score:.4f}") |
| | break |
| | |
| | report.append("\n" + "="*60) |
| | return "\n".join(report) |
| |
|
| |
|
| | def interactive_recommendation(): |
| | """Interactive model recommendation interface""" |
| | print("\n" + "="*60) |
| | print("VBench Model Recommendation System") |
| | print("="*60) |
| | print("\nThis system recommends video generation models based on your") |
| | print("evaluation requirements using the VBench leaderboard.\n") |
| | |
| | leaderboard = VBenchLeaderboard() |
| | |
| | while True: |
| | print("\nEnter your evaluation question (or 'quit' to exit):") |
| | query = input("> ").strip() |
| | |
| | if query.lower() in ['quit', 'exit', 'q']: |
| | print("\nThank you for using the VBench Model Recommendation System!") |
| | break |
| | |
| | if not query: |
| | print("Please enter a valid question.") |
| | continue |
| | |
| | try: |
| | |
| | recommendations = leaderboard.recommend_model(query, top_k=5) |
| | |
| | if recommendations: |
| | |
| | report = leaderboard.generate_recommendation_report(query, recommendations) |
| | print(report) |
| | else: |
| | print("\nNo models found matching your criteria.") |
| | |
| | except Exception as e: |
| | print(f"\nError processing query: {e}") |
| | logger.error(f"Error in recommendation: {e}", exc_info=True) |
| |
|
| |
|
| | def main(): |
| | """Main function for testing""" |
| | |
| | example_queries = [ |
| | "Which model is best for generating videos with consistent human actions?", |
| | "I need a model that excels at aesthetic quality and smooth motion", |
| | "What model should I use for generating multiple objects with accurate spatial relationships?", |
| | "Which model has the best overall consistency with prompts?", |
| | "I want to generate videos with beautiful artistic styles" |
| | ] |
| | |
| | leaderboard = VBenchLeaderboard() |
| | |
| | for query in example_queries: |
| | print(f"\nQuery: {query}") |
| | print("-" * 40) |
| | |
| | recommendations = leaderboard.recommend_model(query, top_k=3) |
| | report = leaderboard.generate_recommendation_report(query, recommendations) |
| | print(report) |
| | print("\n" + "="*80) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | import sys |
| | if len(sys.argv) > 1 and sys.argv[1] == "--interactive": |
| | interactive_recommendation() |
| | else: |
| | main() |