des137's picture
Deploy Quantum Finance Analyzer
6413ecc
#!/usr/bin/env python3
"""
Main entry point for the Quantum Finance Feasibility Analysis Framework.
Usage:
python main.py # Run demo evaluation
python main.py --idea "Your idea here" # Evaluate specific idea
python main.py --batch # Run batch evaluation on test cases
"""
import argparse
import sys
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent / "src"))
from quantum_finance_crew import QuantumFinanceCrew
from tools import (
QuantumResourceEstimator,
collect_experiment_data,
ClassicalPortfolioOptimizer
)
from utils import MetricsCollector, EvaluationMetrics
# Standard test cases for experiments
TEST_CASES = [
{
'id': 'portfolio_optimization',
'idea': 'Quantum portfolio optimization using QAOA for a 100-asset portfolio with mean-variance objective',
'category': 'optimization'
},
{
'id': 'option_pricing',
'idea': 'Quantum amplitude estimation for pricing European call options with 8-bit precision',
'category': 'pricing'
},
{
'id': 'fraud_detection',
'idea': 'Quantum machine learning classifier for real-time credit card fraud detection',
'category': 'ml'
},
{
'id': 'risk_analysis',
'idea': 'Quantum Monte Carlo for Value-at-Risk (VaR) calculation on a derivatives portfolio',
'category': 'risk'
},
{
'id': 'credit_scoring',
'idea': 'Variational quantum classifier for credit scoring using customer financial data',
'category': 'ml'
}
]
def run_resource_estimation_demo():
"""Demonstrate quantum resource estimation capabilities."""
print("\n" + "=" * 60)
print("QUANTUM RESOURCE ESTIMATION DEMO")
print("=" * 60)
estimator = QuantumResourceEstimator('ibm_osprey')
# Portfolio optimization
print("\n1. QAOA for 50-Asset Portfolio (p=3 layers):")
print("-" * 50)
result = estimator.estimate_qaoa_resources(num_assets=50, p_layers=3)
for key, value in result.items():
print(f" {key}: {value}")
# Option pricing
print("\n2. Amplitude Estimation for Option Pricing (8-bit precision):")
print("-" * 50)
result = estimator.estimate_amplitude_estimation_resources(
precision_bits=8, num_qubits_oracle=20
)
for key, value in result.items():
print(f" {key}: {value}")
# Fraud detection
print("\n3. Grover's Search for Pattern Matching (1M records):")
print("-" * 50)
result = estimator.estimate_grover_resources(search_space_size=1_000_000)
for key, value in result.items():
print(f" {key}: {value}")
def run_classical_benchmark_demo():
"""Demonstrate classical benchmarking capabilities."""
print("\n" + "=" * 60)
print("CLASSICAL BENCHMARK DEMO")
print("=" * 60)
print("\nCollecting financial data...")
data = collect_experiment_data(n_assets=15)
print(f"Loaded {data['n_observations']} days of data for {data['n_assets']} assets")
print("\nRunning classical portfolio optimization:")
print("-" * 50)
optimizer = ClassicalPortfolioOptimizer(
data['expected_returns'].values,
data['covariance_matrix'].values
)
result = optimizer.optimize_markowitz()
print(f" Expected Return: {result['expected_return']:.4f}")
print(f" Volatility: {result['volatility']:.4f}")
print(f" Sharpe Ratio: {result['sharpe_ratio']:.4f}")
print("\nTiming benchmark (100 runs):")
timing = optimizer.benchmark_timing(n_trials=100)
print(f" Mean time: {timing['mean_time_ms']:.3f} ms")
print(f" Std time: {timing['std_time_ms']:.3f} ms")
def run_crew_evaluation(idea: str, verbose: bool = True):
"""Run the full crew evaluation on an idea."""
print("\n" + "=" * 60)
print("QUANTUM FINANCE FEASIBILITY ANALYSIS")
print("=" * 60)
print(f"\nIdea: {idea}")
print("-" * 60)
crew = QuantumFinanceCrew()
result = crew.evaluate(idea, verbose=verbose)
print("\n" + "=" * 60)
print("EVALUATION COMPLETE")
print("=" * 60)
return result
def run_batch_evaluation(verbose: bool = False):
"""Run evaluation on all test cases."""
print("\n" + "=" * 60)
print("BATCH EVALUATION - ALL TEST CASES")
print("=" * 60)
crew = QuantumFinanceCrew()
metrics_collector = MetricsCollector()
results = []
for i, case in enumerate(TEST_CASES):
print(f"\n[{i+1}/{len(TEST_CASES)}] Evaluating: {case['id']}")
print(f" {case['idea'][:60]}...")
metrics = metrics_collector.start_evaluation(case['idea'])
try:
result = crew.evaluate(case['idea'], verbose=verbose)
metrics.total_agent_rounds = result.get('tasks_completed', 0)
result['case_id'] = case['id']
results.append(result)
except Exception as e:
print(f" ERROR: {e}")
result = {'case_id': case['id'], 'error': str(e)}
results.append(result)
metrics.complete()
print("\n" + "=" * 60)
print("BATCH EVALUATION SUMMARY")
print("=" * 60)
summary = metrics_collector.get_summary_statistics()
for key, value in summary.items():
print(f" {key}: {value}")
return results
def main():
parser = argparse.ArgumentParser(
description='Quantum Finance Feasibility Analysis Framework'
)
parser.add_argument(
'--idea', type=str,
help='Specific quantum finance idea to evaluate'
)
parser.add_argument(
'--batch', action='store_true',
help='Run batch evaluation on all test cases'
)
parser.add_argument(
'--demo', action='store_true',
help='Run resource estimation and benchmark demos'
)
parser.add_argument(
'--verbose', '-v', action='store_true',
help='Enable verbose output'
)
args = parser.parse_args()
if args.demo:
run_resource_estimation_demo()
run_classical_benchmark_demo()
elif args.batch:
run_batch_evaluation(verbose=args.verbose)
elif args.idea:
run_crew_evaluation(args.idea, verbose=args.verbose)
else:
# Default: run demo
print("Quantum Finance Feasibility Analysis Framework")
print("=" * 50)
print("\nUsage:")
print(" python main.py --demo # Resource estimation demos")
print(" python main.py --idea 'text' # Evaluate specific idea")
print(" python main.py --batch # Batch evaluate test cases")
print("\nRunning demo mode...")
run_resource_estimation_demo()
run_classical_benchmark_demo()
if __name__ == "__main__":
main()