Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- README.md +37 -7
- export_data.py +120 -0
- requirements.txt +1 -0
README.md
CHANGED
|
@@ -1,14 +1,44 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
-
short_description: TestJoshGame
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: AI Trading Trust Experiment
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.44.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# AI Trading Trust Experiment
|
| 14 |
+
|
| 15 |
+
A psychology research game studying trust in AI advice under varying conditions.
|
| 16 |
+
|
| 17 |
+
## About
|
| 18 |
+
|
| 19 |
+
This is a behavioral experiment disguised as a trading simulation. Participants:
|
| 20 |
+
|
| 21 |
+
1. Configure an AI advisor's characteristics (confidence, explanation depth, risk tolerance)
|
| 22 |
+
2. Receive AI-generated trading advice for fictional companies
|
| 23 |
+
3. Make trading decisions (follow or ignore the AI)
|
| 24 |
+
4. Rate their confidence in their decisions
|
| 25 |
+
|
| 26 |
+
## Data Collected
|
| 27 |
+
|
| 28 |
+
- Participant ID (anonymized)
|
| 29 |
+
- AI tuning preferences (slider positions)
|
| 30 |
+
- Trading decisions and response times
|
| 31 |
+
- Confidence ratings
|
| 32 |
+
- Portfolio outcomes
|
| 33 |
+
|
| 34 |
+
## For Researchers
|
| 35 |
+
|
| 36 |
+
Data is stored in SQLite (`experiment_data.db`). Download from the Space's Files tab.
|
| 37 |
+
|
| 38 |
+
Tables:
|
| 39 |
+
- `participants`: Session-level data
|
| 40 |
+
- `decisions`: Turn-by-turn data with all variables
|
| 41 |
+
|
| 42 |
+
## Fictional World
|
| 43 |
+
|
| 44 |
+
All companies and countries are entirely fictional to prevent participants from using external knowledge.
|
export_data.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data Export Utility for AI Trading Trust Experiment
|
| 3 |
+
Run this locally after downloading experiment_data.db from HF Spaces
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sqlite3
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
def export_data(db_path: str = "experiment_data.db", output_dir: str = "exports"):
|
| 11 |
+
"""Export all experiment data to CSV files for analysis."""
|
| 12 |
+
|
| 13 |
+
Path(output_dir).mkdir(exist_ok=True)
|
| 14 |
+
conn = sqlite3.connect(db_path)
|
| 15 |
+
|
| 16 |
+
# Export participants
|
| 17 |
+
participants_df = pd.read_sql_query("""
|
| 18 |
+
SELECT
|
| 19 |
+
participant_id,
|
| 20 |
+
session_start,
|
| 21 |
+
session_end,
|
| 22 |
+
final_portfolio_value,
|
| 23 |
+
total_decisions,
|
| 24 |
+
ai_reliance_score,
|
| 25 |
+
completed
|
| 26 |
+
FROM participants
|
| 27 |
+
""", conn)
|
| 28 |
+
participants_df.to_csv(f"{output_dir}/participants.csv", index=False)
|
| 29 |
+
print(f"Exported {len(participants_df)} participants")
|
| 30 |
+
|
| 31 |
+
# Export decisions
|
| 32 |
+
decisions_df = pd.read_sql_query("""
|
| 33 |
+
SELECT
|
| 34 |
+
participant_id,
|
| 35 |
+
scenario_id,
|
| 36 |
+
scenario_order,
|
| 37 |
+
timestamp,
|
| 38 |
+
ai_confidence_setting,
|
| 39 |
+
ai_explanation_setting,
|
| 40 |
+
ai_risk_setting,
|
| 41 |
+
ai_advice_direction,
|
| 42 |
+
ai_advice_accuracy,
|
| 43 |
+
decision,
|
| 44 |
+
decision_amount,
|
| 45 |
+
confidence_in_decision,
|
| 46 |
+
response_time_ms,
|
| 47 |
+
scenario_outcome,
|
| 48 |
+
profit_loss,
|
| 49 |
+
portfolio_value_after,
|
| 50 |
+
followed_ai
|
| 51 |
+
FROM decisions
|
| 52 |
+
""", conn)
|
| 53 |
+
decisions_df.to_csv(f"{output_dir}/decisions.csv", index=False)
|
| 54 |
+
print(f"Exported {len(decisions_df)} decisions")
|
| 55 |
+
|
| 56 |
+
# Create summary statistics
|
| 57 |
+
summary = decisions_df.groupby('participant_id').agg({
|
| 58 |
+
'followed_ai': ['sum', 'count', 'mean'],
|
| 59 |
+
'confidence_in_decision': 'mean',
|
| 60 |
+
'response_time_ms': 'mean',
|
| 61 |
+
'ai_confidence_setting': 'mean',
|
| 62 |
+
'ai_explanation_setting': 'mean',
|
| 63 |
+
'ai_risk_setting': 'mean',
|
| 64 |
+
}).round(2)
|
| 65 |
+
summary.columns = ['_'.join(col).strip() for col in summary.columns]
|
| 66 |
+
summary.to_csv(f"{output_dir}/summary_by_participant.csv")
|
| 67 |
+
print(f"Created summary statistics")
|
| 68 |
+
|
| 69 |
+
# AI accuracy analysis
|
| 70 |
+
accuracy_analysis = decisions_df.groupby(['ai_advice_accuracy', 'followed_ai']).size().unstack(fill_value=0)
|
| 71 |
+
accuracy_analysis.to_csv(f"{output_dir}/ai_accuracy_analysis.csv")
|
| 72 |
+
print(f"Created AI accuracy analysis")
|
| 73 |
+
|
| 74 |
+
conn.close()
|
| 75 |
+
print(f"\nAll files exported to {output_dir}/")
|
| 76 |
+
|
| 77 |
+
def quick_stats(db_path: str = "experiment_data.db"):
|
| 78 |
+
"""Print quick statistics from the database."""
|
| 79 |
+
conn = sqlite3.connect(db_path)
|
| 80 |
+
|
| 81 |
+
# Participant count
|
| 82 |
+
result = pd.read_sql_query("SELECT COUNT(*) as n, SUM(completed) as completed FROM participants", conn)
|
| 83 |
+
print(f"\n=== Quick Stats ===")
|
| 84 |
+
print(f"Total sessions: {result['n'].iloc[0]}")
|
| 85 |
+
print(f"Completed sessions: {result['completed'].iloc[0]}")
|
| 86 |
+
|
| 87 |
+
# Decision stats
|
| 88 |
+
result = pd.read_sql_query("""
|
| 89 |
+
SELECT
|
| 90 |
+
AVG(followed_ai) * 100 as ai_follow_rate,
|
| 91 |
+
AVG(confidence_in_decision) as avg_confidence,
|
| 92 |
+
AVG(response_time_ms) / 1000 as avg_response_sec
|
| 93 |
+
FROM decisions
|
| 94 |
+
""", conn)
|
| 95 |
+
print(f"\nAI Follow Rate: {result['ai_follow_rate'].iloc[0]:.1f}%")
|
| 96 |
+
print(f"Avg Confidence: {result['avg_confidence'].iloc[0]:.1f}/100")
|
| 97 |
+
print(f"Avg Response Time: {result['avg_response_sec'].iloc[0]:.1f}s")
|
| 98 |
+
|
| 99 |
+
# By AI accuracy
|
| 100 |
+
result = pd.read_sql_query("""
|
| 101 |
+
SELECT
|
| 102 |
+
ai_advice_accuracy,
|
| 103 |
+
AVG(followed_ai) * 100 as follow_rate,
|
| 104 |
+
COUNT(*) as n
|
| 105 |
+
FROM decisions
|
| 106 |
+
GROUP BY ai_advice_accuracy
|
| 107 |
+
""", conn)
|
| 108 |
+
print(f"\nFollow rate by AI accuracy:")
|
| 109 |
+
for _, row in result.iterrows():
|
| 110 |
+
print(f" {row['ai_advice_accuracy']}: {row['follow_rate']:.1f}% (n={row['n']})")
|
| 111 |
+
|
| 112 |
+
conn.close()
|
| 113 |
+
|
| 114 |
+
if __name__ == "__main__":
|
| 115 |
+
import sys
|
| 116 |
+
|
| 117 |
+
if len(sys.argv) > 1 and sys.argv[1] == "stats":
|
| 118 |
+
quick_stats()
|
| 119 |
+
else:
|
| 120 |
+
export_data()
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|