#!/usr/bin/env python3 """ OpenRouter API Pricing Analysis Analyzes context windows, token pricing, and output/input ratios """ import json import numpy as np from pathlib import Path from typing import Dict, List, Any import statistics def load_data(file_path: str) -> List[Dict[str, Any]]: """Load the models data from JSON file""" with open(file_path, 'r') as f: return json.load(f) def calculate_distribution_stats(values: List[float], name: str) -> Dict[str, Any]: """Calculate comprehensive distribution statistics""" if not values: return {"error": "No data"} values = sorted(values) n = len(values) stats = { "name": name, "count": n, "min": min(values), "max": max(values), "mean": statistics.mean(values), "median": statistics.median(values), "std_dev": statistics.stdev(values) if n > 1 else 0, } # Quartiles stats["q1"] = np.percentile(values, 25) stats["q3"] = np.percentile(values, 75) stats["iqr"] = stats["q3"] - stats["q1"] # Additional percentiles stats["p10"] = np.percentile(values, 10) stats["p90"] = np.percentile(values, 90) stats["p95"] = np.percentile(values, 95) stats["p99"] = np.percentile(values, 99) return stats def calculate_stratification(values: List[float], name: str) -> Dict[str, Any]: """Calculate stratification by creating bins/ranges""" if not values: return {"error": "No data"} values_array = np.array(values) # Determine appropriate bins based on data range min_val = values_array.min() max_val = values_array.max() # Create stratification bins if max_val - min_val < 1: # For very small ranges (like some pricing) bins = np.linspace(min_val, max_val, 11) else: # Use log scale for wide ranges if min_val > 0: bins = np.logspace(np.log10(max(min_val, 0.0001)), np.log10(max_val), 11) else: bins = np.linspace(min_val, max_val, 11) hist, bin_edges = np.histogram(values_array, bins=bins) stratification = { "name": name, "bins": [] } for i in range(len(hist)): stratification["bins"].append({ "range": f"{bin_edges[i]:.6f} - {bin_edges[i+1]:.6f}", "count": int(hist[i]), "percentage": float(hist[i] / len(values) * 100) }) return stratification def analyze_context_windows(models: List[Dict[str, Any]]) -> tuple: """Analyze context window distribution""" context_lengths = [ m['context_length'] for m in models if m.get('context_length') and m['context_length'] > 0 ] distribution = calculate_distribution_stats(context_lengths, "Context Windows") stratification = calculate_stratification(context_lengths, "Context Windows") return distribution, stratification def analyze_input_pricing(models: List[Dict[str, Any]]) -> tuple: """Analyze input token pricing distribution""" input_prices = [ m['prompt_price_per_1m_tokens'] for m in models if m.get('prompt_price_per_1m_tokens') is not None and m['prompt_price_per_1m_tokens'] > 0 ] distribution = calculate_distribution_stats(input_prices, "Input Token Pricing (per 1M tokens)") stratification = calculate_stratification(input_prices, "Input Token Pricing") return distribution, stratification def analyze_output_pricing(models: List[Dict[str, Any]]) -> tuple: """Analyze output token pricing distribution""" output_prices = [ m['completion_price_per_1m_tokens'] for m in models if m.get('completion_price_per_1m_tokens') is not None and m['completion_price_per_1m_tokens'] > 0 ] distribution = calculate_distribution_stats(output_prices, "Output Token Pricing (per 1M tokens)") stratification = calculate_stratification(output_prices, "Output Token Pricing") return distribution, stratification def analyze_output_input_ratio(models: List[Dict[str, Any]]) -> tuple: """Analyze output/input token price ratio""" ratios = [] for m in models: input_price = m.get('prompt_price_per_1m_tokens', 0) output_price = m.get('completion_price_per_1m_tokens', 0) # Calculate ratio only for non-free models if input_price > 0 and output_price > 0: ratio = output_price / input_price ratios.append(ratio) distribution = calculate_distribution_stats(ratios, "Output/Input Price Ratio") stratification = calculate_stratification(ratios, "Output/Input Price Ratio") return distribution, stratification def format_distribution_report(dist: Dict[str, Any]) -> str: """Format distribution statistics as readable text""" if "error" in dist: return f"Error: {dist['error']}" report = f""" {dist['name']} {'=' * len(dist['name'])} Sample Size: {dist['count']:,} Central Tendency: Mean: {dist['mean']:.6f} Median: {dist['median']:.6f} Spread: Min: {dist['min']:.6f} Max: {dist['max']:.6f} Std Dev: {dist['std_dev']:.6f} IQR: {dist['iqr']:.6f} Quartiles: Q1 (25th percentile): {dist['q1']:.6f} Q2 (50th percentile): {dist['median']:.6f} Q3 (75th percentile): {dist['q3']:.6f} Additional Percentiles: 10th: {dist['p10']:.6f} 90th: {dist['p90']:.6f} 95th: {dist['p95']:.6f} 99th: {dist['p99']:.6f} """ return report def format_stratification_report(strat: Dict[str, Any]) -> str: """Format stratification as readable text""" if "error" in strat: return f"Error: {strat['error']}" report = f""" {strat['name']} - Stratification {'=' * (len(strat['name']) + 18)} Range Distribution: """ for bin_data in strat['bins']: report += f" {bin_data['range']:>40s}: {bin_data['count']:>6,} models ({bin_data['percentage']:>5.1f}%)\n" return report def main(): # Load data data_file = Path(__file__).parent.parent / 'raw' / 'models.json' models = load_data(data_file) print(f"Loaded {len(models)} models\n") # Create output directory output_dir = Path(__file__).parent output_dir.mkdir(exist_ok=True) # Analyze each metric analyses = { 'context_windows': analyze_context_windows(models), 'input_pricing': analyze_input_pricing(models), 'output_pricing': analyze_output_pricing(models), 'output_input_ratio': analyze_output_input_ratio(models) } # Generate reports for each analysis for metric_name, (distribution, stratification) in analyses.items(): # Create individual report files dist_report = format_distribution_report(distribution) strat_report = format_stratification_report(stratification) # Write distribution report dist_file = output_dir / f"{metric_name}_distribution.txt" with open(dist_file, 'w') as f: f.write(dist_report) print(f"Created: {dist_file}") # Write stratification report strat_file = output_dir / f"{metric_name}_stratification.txt" with open(strat_file, 'w') as f: f.write(strat_report) print(f"Created: {strat_file}") # Also save as JSON for programmatic access json_file = output_dir / f"{metric_name}_stats.json" with open(json_file, 'w') as f: json.dump({ 'distribution': distribution, 'stratification': stratification }, f, indent=2) print(f"Created: {json_file}") # Create comprehensive summary report summary_file = output_dir / "summary_report.txt" with open(summary_file, 'w') as f: f.write("OpenRouter API Pricing Analysis - Summary Report\n") f.write("=" * 60 + "\n\n") f.write(f"Dataset: {len(models)} models analyzed\n\n") for metric_name, (distribution, stratification) in analyses.items(): f.write("\n" + "=" * 60 + "\n") f.write(format_distribution_report(distribution)) f.write("\n") f.write(format_stratification_report(stratification)) f.write("\n") print(f"\nCreated: {summary_file}") print("\nAnalysis complete!") if __name__ == "__main__": main()