Datasets:

ArXiv:
File size: 13,971 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
#!/usr/bin/env python3
"""
Script to check if each query has at least 10 eval_results.json files
across all subfolders (numbered and supplementary) in VBench evaluation results.
"""

import os
import json
import re
from pathlib import Path
from typing import Dict, List, Set
from collections import defaultdict


queries_to_evaluate = [
    "How does the model perform in terms of aesthetics?",
    "How well does the model ensure that the subject maintains a consistent appearance throughout the video?",
    "How effectively does the model maintain a consistent background scene throughout the video?",
    "How well does the model produce smooth and natural motion that follows the physical laws of the real world?",
    "To what extent are distortions like over-exposure, noise, and blur present in the generated frames?",
    "How consistently does the visual style (e.g., oil painting, black and white, watercolor) align with the specified look throughout the video?",
    "How consistent are the time-based effects and camera motions throughout the video?",
    "How well does the generated video demonstrate overall consistency with the input prompt?",
    "How effectively does the model generate multiple distinct objects in a single scene?",
    "How accurately does the model generate specific object classes as described in the text prompt?",
    "To what extent does the video exhibit dynamic movement rather than being overly static?",
    "How accurately do human subjects in the video perform the actions described in the text prompt?",
    "How accurately do the colors of the generated objects match the specifications in the text prompt?",
    "How accurately does the spatial arrangement of objects reflect the positioning and relationships described in the text prompt?",
    "How accurately does the generated video represent the scene described in the text prompt?",
]

def extract_query_from_folder_name(folder_name: str) -> str:
    """
    Extract query from a dimension folder name.
    Format: date-time-query_with_underscores
    """
    # Look for pattern like "HH:MM:SS-query"
    match = re.search(r'\d{2}:\d{2}:\d{2}-(.+)', folder_name)
    if match:
        query = match.group(1).replace('_', ' ')
        if not query.endswith('?'):
            query += '?'
        return query
    
    # For folders without timestamp, try direct extraction
    # This handles cases where the folder name is just the query
    if '_' in folder_name or ' ' in folder_name:
        query = folder_name.replace('_', ' ')
        if not query.endswith('?'):
            query += '?'
        return query
    
    return None


def count_query_occurrences(model_path: str, min_required: int = 10, expected_queries: List[str] = None) -> Dict[str, Dict]:
    """
    Count how many eval_results.json files exist for each query across all subfolders.
    Recursively searches all directories for eval_results.json files.
    
    Args:
        model_path: Path to the model folder (e.g., eval_vbench_results/modelscope)
        min_required: Minimum number of eval_results.json files required per query (default: 10)
    
    Returns:
        Dictionary with query statistics and missing queries
    """
    model_path = Path(model_path)
    
    if not model_path.exists():
        print(f"Error: Path {model_path} does not exist!")
        return {}
    
    # Track query occurrences: query -> list of (path, folder) where it exists
    query_occurrences = defaultdict(list)
    
    # Track all unique queries found
    all_queries = set()
    
    print(f"Scanning model folder: {model_path.name}")
    print("=" * 80)
    
    # Find all eval_results.json files recursively
    eval_files = list(model_path.rglob("eval_results.json"))
    print(f"Found {len(eval_files)} eval_results.json files")
    
    # Process each eval_results.json file
    for eval_file in eval_files:
        # Get the parent folder that contains this eval_results.json
        parent_folder = eval_file.parent
        
        # Try to extract query from the folder name
        # Check if it's in a videos subfolder
        if parent_folder.name == "videos":
            query_folder = parent_folder.parent
        else:
            query_folder = parent_folder
            
        # Extract query from folder name
        query = extract_query_from_folder_name(query_folder.name)
        
        if query:
            all_queries.add(query)
            
            # Validate the JSON file
            try:
                with open(eval_file, 'r') as f:
                    json.load(f)
                
                # Get relative path from model folder
                relative_path = eval_file.relative_to(model_path)
                
                # Record this occurrence with the relative path
                query_occurrences[query].append(str(relative_path))
                
            except (json.JSONDecodeError, Exception) as e:
                # Don't count invalid JSON files
                print(f"  Invalid JSON in {eval_file}: {e}")
    
    # Analyze results
    results = {
        'all_queries': sorted(all_queries),
        'query_counts': {},
        'insufficient_queries': [],
        'missing_completely': [],
        'statistics': {
            'total_unique_queries': len(all_queries),
            'queries_with_sufficient_results': 0,
            'queries_with_insufficient_results': 0,
            'queries_missing_completely': 0
        }
    }
    
    # If expected queries provided, check for completely missing ones
    if expected_queries:
        for query in expected_queries:
            if query not in all_queries:
                all_queries.add(query)
                results['missing_completely'].append(query)
                results['statistics']['queries_missing_completely'] += 1
                results['query_counts'][query] = {
                    'count': 0,
                    'locations': []
                }
    
    # Check each query
    for query in all_queries:
        if query not in results['missing_completely']:  # Skip if already marked as missing
            count = len(query_occurrences[query])
            results['query_counts'][query] = {
                'count': count,
                'locations': query_occurrences[query]
            }
            
            if count == 0:
                results['missing_completely'].append(query)
                results['statistics']['queries_missing_completely'] += 1
            elif count < min_required:
                results['insufficient_queries'].append({
                    'query': query,
                    'count': count,
                    'needed': min_required - count
                })
                results['statistics']['queries_with_insufficient_results'] += 1
            else:
                results['statistics']['queries_with_sufficient_results'] += 1
    
    # Update total unique queries count
    results['statistics']['total_unique_queries'] = len(all_queries)
    
    return results


def save_insufficient_queries(results: Dict, output_file: str, min_required: int = 10):
    """
    Save queries with insufficient eval_results.json files to a text file.
    If a query appears less than min_required times, it will be repeated 
    to indicate how many more times it needs to be evaluated.
    """
    with open(output_file, 'w') as f:
        # Write insufficient queries (repeated based on how many more are needed)
        for item in results.get('insufficient_queries', []):
            query = item['query']
            needed = item['needed']
            # Write the query 'needed' times
            for _ in range(needed):
                f.write(query + '\n')
        
        # Write completely missing queries min_required times
        for query in results.get('missing_completely', []):
            for _ in range(min_required):
                f.write(query + '\n')
    
    total_lines = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
                  len(results.get('missing_completely', [])) * min_required
    
    print(f"\nSaved {total_lines} query lines to {output_file}")
    print(f"(Queries needing multiple evaluations are repeated)")


def generate_completeness_report(results: Dict, min_required: int = 10) -> str:
    """Generate a detailed report of query completeness."""
    
    if not results:
        return "No results to report."
    
    report = []
    report.append("=" * 80)
    report.append("QUERY COMPLETENESS REPORT")
    report.append(f"Minimum required eval_results.json files per query: {min_required}")
    report.append("=" * 80)
    
    stats = results['statistics']
    report.append("\n📊 STATISTICS:")
    report.append(f"  Total unique queries found: {stats['total_unique_queries']}")
    report.append(f"  Queries with sufficient results (>={min_required}): {stats['queries_with_sufficient_results']}")
    report.append(f"  Queries with insufficient results (<{min_required}): {stats['queries_with_insufficient_results']}")
    report.append(f"  Queries missing completely: {stats['queries_missing_completely']}")
    
    # Report insufficient queries
    if results['insufficient_queries']:
        report.append("\n" + "-" * 80)
        report.append("⚠️  QUERIES WITH INSUFFICIENT RESULTS:")
        report.append("-" * 80)
        for item in results['insufficient_queries']:
            query = item['query']
            count = item['count']
            needed = item['needed']
            report.append(f"\n  Query: {query[:80]}...")
            report.append(f"    Current count: {count}/{min_required} (needs {needed} more)")
            locations = results['query_counts'][query]['locations']
            # Extract round/folder names from paths
            location_names = []
            for loc in locations[:5]:
                parts = loc.split('/')
                if len(parts) > 0:
                    location_names.append(parts[0])  # Get the first folder (round/subfolder)
            report.append(f"    Found in: {', '.join(location_names)}")
            if len(locations) > 5:
                report.append(f"              ... and {len(locations) - 5} more")
    
    # Report completely missing queries
    if results['missing_completely']:
        report.append("\n" + "-" * 80)
        report.append("❌ QUERIES MISSING COMPLETELY:")
        report.append("-" * 80)
        for query in results['missing_completely'][:10]:  # Show first 10
            report.append(f"  • {query}")
        if len(results['missing_completely']) > 10:
            report.append(f"  ... and {len(results['missing_completely']) - 10} more")
    
    # Add query count distribution
    if results.get('query_counts'):
        report.append("\n" + "-" * 80)
        report.append("📈 QUERY COUNT DISTRIBUTION:")
        report.append("-" * 80)
        count_distribution = defaultdict(list)
        for query, data in results['query_counts'].items():
            count = data['count']
            count_distribution[count].append(query)
        
        for count in sorted(count_distribution.keys()):
            queries_at_count = count_distribution[count]
            report.append(f"  {count} eval_results.json: {len(queries_at_count)} queries")
            if count < min_required and len(queries_at_count) <= 3:
                for q in queries_at_count:
                    report.append(f"    - {q[:70]}...")
    
    # Summary
    total_missing = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
                   len(results.get('missing_completely', [])) * min_required
    
    report.append("\n" + "=" * 80)
    report.append(f"SUMMARY: Need {total_missing} more evaluations to reach {min_required} per query")
    report.append("=" * 80)
    
    return "\n".join(report)


def main():
    """Main function to run the script."""
    import argparse
    
    parser = argparse.ArgumentParser(
        description="Check if each query has at least N eval_results.json files across all subfolders"
    )
    parser.add_argument(
        "path",
        type=str,
        help="Path to the model folder (e.g., eval_vbench_results/modelscope)"
    )
    parser.add_argument(
        "--min-required",
        type=int,
        default=10,
        help="Minimum number of eval_results.json files required per query (default: 10)"
    )
    parser.add_argument(
        "--output",
        type=str,
        help="Save report to file"
    )
    parser.add_argument(
        "--queries-output",
        type=str,
        default="queries_to_evaluate.txt",
        help="Save queries that need more evaluations to a text file (repeated as needed)"
    )
    
    args = parser.parse_args()
    
    print(f"Checking query completeness in: {args.path}")
    print(f"Minimum required results per query: {args.min_required}")
    print("-" * 80)
    
    # Count query occurrences with expected queries
    results = count_query_occurrences(args.path, args.min_required, queries_to_evaluate)
    
    # Generate report
    report = generate_completeness_report(results, args.min_required)
    print("\n" + report)
    
    # Save report if requested
    if args.output:
        output_path = Path(args.path) / args.output
        with open(output_path, 'w') as f:
            f.write(report)
        print(f"\nReport saved to: {output_path}")
    
    # Save queries that need more evaluations
    if args.queries_output and (results.get('insufficient_queries') or results.get('missing_completely')):
        output_path = Path(args.path) / args.queries_output
        save_insufficient_queries(results, output_path, args.min_required)
    
    # Return exit code
    insufficient_count = len(results.get('insufficient_queries', [])) + len(results.get('missing_completely', []))
    return 0 if insufficient_count == 0 else 1


if __name__ == "__main__":
    exit(main())