Spaces:
Sleeping
Sleeping
| """ | |
| Data Export Utility for AI Trading Trust Experiment | |
| Run this locally after downloading experiment_data.db from HF Spaces | |
| """ | |
| import sqlite3 | |
| import pandas as pd | |
| from pathlib import Path | |
| def export_data(db_path: str = "experiment_data.db", output_dir: str = "exports"): | |
| """Export all experiment data to CSV files for analysis.""" | |
| Path(output_dir).mkdir(exist_ok=True) | |
| conn = sqlite3.connect(db_path) | |
| # Export participants | |
| participants_df = pd.read_sql_query(""" | |
| SELECT | |
| participant_id, | |
| session_start, | |
| session_end, | |
| final_portfolio_value, | |
| total_decisions, | |
| ai_reliance_score, | |
| completed | |
| FROM participants | |
| """, conn) | |
| participants_df.to_csv(f"{output_dir}/participants.csv", index=False) | |
| print(f"Exported {len(participants_df)} participants") | |
| # Export decisions | |
| decisions_df = pd.read_sql_query(""" | |
| SELECT | |
| participant_id, | |
| scenario_id, | |
| scenario_order, | |
| timestamp, | |
| ai_confidence_setting, | |
| ai_explanation_setting, | |
| ai_risk_setting, | |
| ai_advice_direction, | |
| ai_advice_accuracy, | |
| decision, | |
| decision_amount, | |
| confidence_in_decision, | |
| response_time_ms, | |
| scenario_outcome, | |
| profit_loss, | |
| portfolio_value_after, | |
| followed_ai | |
| FROM decisions | |
| """, conn) | |
| decisions_df.to_csv(f"{output_dir}/decisions.csv", index=False) | |
| print(f"Exported {len(decisions_df)} decisions") | |
| # Create summary statistics | |
| summary = decisions_df.groupby('participant_id').agg({ | |
| 'followed_ai': ['sum', 'count', 'mean'], | |
| 'confidence_in_decision': 'mean', | |
| 'response_time_ms': 'mean', | |
| 'ai_confidence_setting': 'mean', | |
| 'ai_explanation_setting': 'mean', | |
| 'ai_risk_setting': 'mean', | |
| }).round(2) | |
| summary.columns = ['_'.join(col).strip() for col in summary.columns] | |
| summary.to_csv(f"{output_dir}/summary_by_participant.csv") | |
| print(f"Created summary statistics") | |
| # AI accuracy analysis | |
| accuracy_analysis = decisions_df.groupby(['ai_advice_accuracy', 'followed_ai']).size().unstack(fill_value=0) | |
| accuracy_analysis.to_csv(f"{output_dir}/ai_accuracy_analysis.csv") | |
| print(f"Created AI accuracy analysis") | |
| conn.close() | |
| print(f"\nAll files exported to {output_dir}/") | |
| def quick_stats(db_path: str = "experiment_data.db"): | |
| """Print quick statistics from the database.""" | |
| conn = sqlite3.connect(db_path) | |
| # Participant count | |
| result = pd.read_sql_query("SELECT COUNT(*) as n, SUM(completed) as completed FROM participants", conn) | |
| print(f"\n=== Quick Stats ===") | |
| print(f"Total sessions: {result['n'].iloc[0]}") | |
| print(f"Completed sessions: {result['completed'].iloc[0]}") | |
| # Decision stats | |
| result = pd.read_sql_query(""" | |
| SELECT | |
| AVG(followed_ai) * 100 as ai_follow_rate, | |
| AVG(confidence_in_decision) as avg_confidence, | |
| AVG(response_time_ms) / 1000 as avg_response_sec | |
| FROM decisions | |
| """, conn) | |
| print(f"\nAI Follow Rate: {result['ai_follow_rate'].iloc[0]:.1f}%") | |
| print(f"Avg Confidence: {result['avg_confidence'].iloc[0]:.1f}/100") | |
| print(f"Avg Response Time: {result['avg_response_sec'].iloc[0]:.1f}s") | |
| # By AI accuracy | |
| result = pd.read_sql_query(""" | |
| SELECT | |
| ai_advice_accuracy, | |
| AVG(followed_ai) * 100 as follow_rate, | |
| COUNT(*) as n | |
| FROM decisions | |
| GROUP BY ai_advice_accuracy | |
| """, conn) | |
| print(f"\nFollow rate by AI accuracy:") | |
| for _, row in result.iterrows(): | |
| print(f" {row['ai_advice_accuracy']}: {row['follow_rate']:.1f}% (n={row['n']})") | |
| conn.close() | |
| if __name__ == "__main__": | |
| import sys | |
| if len(sys.argv) > 1 and sys.argv[1] == "stats": | |
| quick_stats() | |
| else: | |
| export_data() | |