Update app.py
Browse files
app.py
CHANGED
|
@@ -2,11 +2,10 @@ import gradio as gr
|
|
| 2 |
from cear_model import CEARModel
|
| 3 |
import pandas as pd
|
| 4 |
|
| 5 |
-
#
|
| 6 |
cear_analyzer = CEARModel()
|
| 7 |
|
| 8 |
-
# --- Configuration: Default Values
|
| 9 |
-
# Users will see these rows filled in when the app loads
|
| 10 |
DEFAULT_DATA = [
|
| 11 |
["TikTok", 0],
|
| 12 |
["Instagram", 0],
|
|
@@ -17,85 +16,124 @@ DEFAULT_DATA = [
|
|
| 17 |
["LinkedIn", 0]
|
| 18 |
]
|
| 19 |
|
| 20 |
-
def
|
| 21 |
"""
|
| 22 |
-
|
| 23 |
"""
|
| 24 |
-
# 1.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
user_data_df = pd.DataFrame(input_table, columns=['platform_name', 'minutes_per_week'])
|
| 26 |
-
|
| 27 |
-
# Clean inputs: coerce non-numbers to 0
|
| 28 |
user_data_df['minutes_per_week'] = pd.to_numeric(user_data_df['minutes_per_week'], errors='coerce').fillna(0)
|
| 29 |
|
| 30 |
-
# 2.
|
| 31 |
raw_scores = cear_analyzer.calculate_scores(user_data_df)
|
| 32 |
-
|
| 33 |
-
# 3. Generate Analysis Summary
|
| 34 |
-
# We now integrate the survey scores into the interpretation
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
|
| 38 |
-
fomo_status = "High" if fomo_rating >= 4 else ("Low" if fomo_rating <= 2 else "Moderate")
|
| 39 |
|
|
|
|
| 40 |
summary = f"""
|
| 41 |
-
## π Analysis
|
| 42 |
-
|
| 43 |
-
### π§ Core Metrics
|
| 44 |
-
- **Cultural Connectedness Score (C-Score):** **{raw_scores['C_Score']:.2f}**
|
| 45 |
-
- **Algorithmic Risk Score (A-Risk):** **{raw_scores['A_Risk']:.2f}**
|
| 46 |
-
- **Platform Diversity Index (D-Index):** **{raw_scores['D_Index']:.2f}**
|
| 47 |
|
| 48 |
-
###
|
| 49 |
-
|
| 50 |
-
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
| 58 |
"""
|
| 59 |
|
| 60 |
-
# Return the summary text and the efficiency table
|
| 61 |
return summary, pd.DataFrame(raw_scores['Per_Platform_Efficiency'])
|
| 62 |
|
| 63 |
-
# ---
|
| 64 |
-
with gr.Blocks(title="CEAR Model") as demo:
|
| 65 |
gr.Markdown("# π§ Cultural Exposure & Algorithmic Risk (CEAR) Model")
|
| 66 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
with gr.Row():
|
| 69 |
with gr.Column(scale=2):
|
| 70 |
-
# The Dataframe with pre-populated values
|
| 71 |
input_table = gr.Dataframe(
|
| 72 |
headers=['platform_name', 'minutes_per_week'],
|
| 73 |
-
value=DEFAULT_DATA,
|
| 74 |
datatype=["str", "number"],
|
| 75 |
row_count=7,
|
| 76 |
col_count=(2, 'fixed'),
|
| 77 |
-
label="Weekly Screen Time
|
| 78 |
)
|
| 79 |
|
| 80 |
with gr.Column(scale=1):
|
| 81 |
-
# New Survey Questions
|
| 82 |
gr.Markdown("### π§ Self-Reflection")
|
| 83 |
-
variety_slider = gr.Slider(1, 5, step=1, value=3, label="
|
| 84 |
-
fomo_slider = gr.Slider(1, 5, step=1, value=3, label="
|
| 85 |
|
| 86 |
-
analyze_btn = gr.Button("Analyze
|
| 87 |
|
| 88 |
with gr.Row():
|
| 89 |
-
output_text = gr.Markdown(label="
|
| 90 |
output_table = gr.Dataframe(label="Cultural Efficiency (Score per Minute)")
|
| 91 |
|
| 92 |
-
# Connect the inputs to the function
|
| 93 |
analyze_btn.click(
|
| 94 |
fn=analyze_user_data,
|
| 95 |
inputs=[input_table, variety_slider, fomo_slider],
|
| 96 |
outputs=[output_text, output_table]
|
| 97 |
)
|
| 98 |
|
| 99 |
-
# Launch with the required server binding
|
| 100 |
if __name__ == "__main__":
|
| 101 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 2 |
from cear_model import CEARModel
|
| 3 |
import pandas as pd
|
| 4 |
|
| 5 |
+
# Initialize Model
|
| 6 |
cear_analyzer = CEARModel()
|
| 7 |
|
| 8 |
+
# --- Configuration: Default Values ---
|
|
|
|
| 9 |
DEFAULT_DATA = [
|
| 10 |
["TikTok", 0],
|
| 11 |
["Instagram", 0],
|
|
|
|
| 16 |
["LinkedIn", 0]
|
| 17 |
]
|
| 18 |
|
| 19 |
+
def interpret_scores(c_score, a_risk, d_index):
|
| 20 |
"""
|
| 21 |
+
Converts raw numbers into human-readable context.
|
| 22 |
"""
|
| 23 |
+
# --- 1. Interpret Algorithmic Risk (A-Risk) ---
|
| 24 |
+
# Thresholds: <150 (Low), 150-500 (Medium), >500 (High)
|
| 25 |
+
if a_risk < 150:
|
| 26 |
+
a_status = "π’ Low Risk"
|
| 27 |
+
a_desc = "Your usage is within a safe range, likely minimizing algorithmic manipulation."
|
| 28 |
+
elif a_risk < 500:
|
| 29 |
+
a_status = "π‘ Moderate Risk"
|
| 30 |
+
a_desc = "You have significant exposure to algorithmic feeds. Be mindful of 'rabbit holes'."
|
| 31 |
+
else:
|
| 32 |
+
a_status = "π΄ High Risk"
|
| 33 |
+
a_desc = "Your attention is heavily concentrated in high-risk algorithmic environments."
|
| 34 |
+
|
| 35 |
+
# --- 2. Interpret Cultural Connectedness (C-Score) ---
|
| 36 |
+
# Thresholds: <2.0 (Disconnected), 2.0-5.0 (Connected), >5.0 (Very High)
|
| 37 |
+
if c_score < 2.0:
|
| 38 |
+
c_status = "βͺ Low Exposure"
|
| 39 |
+
c_desc = "You likely miss many viral trends, which protects focus but reduces cultural sync."
|
| 40 |
+
elif c_score < 5.0:
|
| 41 |
+
c_status = "π΅ Balanced Exposure"
|
| 42 |
+
c_desc = "You are plugged into main trends without being overwhelmed."
|
| 43 |
+
else:
|
| 44 |
+
c_status = "π£ High Saturation"
|
| 45 |
+
c_desc = "You are extremely plugged in. Ensure this doesn't lead to information overload."
|
| 46 |
+
|
| 47 |
+
# --- 3. Interpret Diversity (D-Index) ---
|
| 48 |
+
# Thresholds: 1.0 (Monoculture) to >3.0 (Diverse)
|
| 49 |
+
if d_index < 1.5:
|
| 50 |
+
d_status = "β οΈ Concentrated"
|
| 51 |
+
d_desc = "Your time is spent almost entirely on one platform (Echo Chamber risk)."
|
| 52 |
+
elif d_index < 3.0:
|
| 53 |
+
d_status = "βοΈ Moderate Spread"
|
| 54 |
+
d_desc = "You split time across a few apps, offering some variety."
|
| 55 |
+
else:
|
| 56 |
+
d_status = "π High Diversity"
|
| 57 |
+
d_desc = "Your usage is well-distributed, reducing reliance on any single algorithm."
|
| 58 |
+
|
| 59 |
+
return {
|
| 60 |
+
"A_Status": a_status, "A_Desc": a_desc,
|
| 61 |
+
"C_Status": c_status, "C_Desc": c_desc,
|
| 62 |
+
"D_Status": d_status, "D_Desc": d_desc
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
def analyze_user_data(input_table, variety_rating, fomo_rating):
|
| 66 |
+
# 1. Process Input
|
| 67 |
user_data_df = pd.DataFrame(input_table, columns=['platform_name', 'minutes_per_week'])
|
|
|
|
|
|
|
| 68 |
user_data_df['minutes_per_week'] = pd.to_numeric(user_data_df['minutes_per_week'], errors='coerce').fillna(0)
|
| 69 |
|
| 70 |
+
# 2. Run Model
|
| 71 |
raw_scores = cear_analyzer.calculate_scores(user_data_df)
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
+
# 3. Get Interpretation
|
| 74 |
+
context = interpret_scores(raw_scores['C_Score'], raw_scores['A_Risk'], raw_scores['D_Index'])
|
|
|
|
| 75 |
|
| 76 |
+
# 4. Generate Rich Markdown Output
|
| 77 |
summary = f"""
|
| 78 |
+
## π Analysis Results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
### 1. Algorithmic Risk: {context['A_Status']}
|
| 81 |
+
*Score: {raw_scores['A_Risk']:.0f}*
|
| 82 |
+
> {context['A_Desc']}
|
| 83 |
|
| 84 |
+
### 2. Cultural Connectedness: {context['C_Status']}
|
| 85 |
+
*Score: {raw_scores['C_Score']:.2f}*
|
| 86 |
+
> {context['C_Desc']}
|
| 87 |
+
|
| 88 |
+
### 3. Platform Diversity: {context['D_Status']}
|
| 89 |
+
*Index: {raw_scores['D_Index']:.2f}*
|
| 90 |
+
> {context['D_Desc']}
|
| 91 |
|
| 92 |
+
---
|
| 93 |
+
### π£οΈ Self-Reported Context
|
| 94 |
+
* **Variety:** {variety_rating}/5
|
| 95 |
+
* **FOMO:** {fomo_rating}/5
|
| 96 |
"""
|
| 97 |
|
|
|
|
| 98 |
return summary, pd.DataFrame(raw_scores['Per_Platform_Efficiency'])
|
| 99 |
|
| 100 |
+
# --- Interface Definition ---
|
| 101 |
+
with gr.Blocks(title="CEAR Model", theme=gr.themes.Soft()) as demo:
|
| 102 |
gr.Markdown("# π§ Cultural Exposure & Algorithmic Risk (CEAR) Model")
|
| 103 |
+
gr.Markdown("""
|
| 104 |
+
**Instructions:**
|
| 105 |
+
1. Enter your estimated **minutes per week** for each app (e.g., check your phone's Screen Time).
|
| 106 |
+
2. Adjust the sliders for your personal experience.
|
| 107 |
+
3. Click **Analyze** to see if your habits align with your goals.
|
| 108 |
+
""")
|
| 109 |
|
| 110 |
with gr.Row():
|
| 111 |
with gr.Column(scale=2):
|
|
|
|
| 112 |
input_table = gr.Dataframe(
|
| 113 |
headers=['platform_name', 'minutes_per_week'],
|
| 114 |
+
value=DEFAULT_DATA,
|
| 115 |
datatype=["str", "number"],
|
| 116 |
row_count=7,
|
| 117 |
col_count=(2, 'fixed'),
|
| 118 |
+
label="Weekly Screen Time Inputs"
|
| 119 |
)
|
| 120 |
|
| 121 |
with gr.Column(scale=1):
|
|
|
|
| 122 |
gr.Markdown("### π§ Self-Reflection")
|
| 123 |
+
variety_slider = gr.Slider(1, 5, step=1, value=3, label="Perceived Variety (1=Samey, 5=Diverse)")
|
| 124 |
+
fomo_slider = gr.Slider(1, 5, step=1, value=3, label="FOMO Intensity (1=None, 5=High)")
|
| 125 |
|
| 126 |
+
analyze_btn = gr.Button("π Analyze Risks", variant="primary")
|
| 127 |
|
| 128 |
with gr.Row():
|
| 129 |
+
output_text = gr.Markdown(label="Detailed Report")
|
| 130 |
output_table = gr.Dataframe(label="Cultural Efficiency (Score per Minute)")
|
| 131 |
|
|
|
|
| 132 |
analyze_btn.click(
|
| 133 |
fn=analyze_user_data,
|
| 134 |
inputs=[input_table, variety_slider, fomo_slider],
|
| 135 |
outputs=[output_text, output_table]
|
| 136 |
)
|
| 137 |
|
|
|
|
| 138 |
if __name__ == "__main__":
|
| 139 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|