Saurabh502 commited on
Commit
d659b9e
Β·
verified Β·
1 Parent(s): cca7cbb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +234 -0
app.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import plotly.express as px
5
+ import plotly.graph_objects as go
6
+
7
+ # Set page config
8
+ st.set_page_config(
9
+ page_title="LLM Evaluation Framework",
10
+ page_icon="πŸ€–",
11
+ layout="wide"
12
+ )
13
+
14
+ # Title and description
15
+ st.title("πŸ€– LLM Quantitative Evaluation Framework")
16
+ st.markdown("Data-driven decision making for Large Language Model selection")
17
+
18
+ # Model data
19
+ models_data = {
20
+ "Model": ["GPT-4 Turbo", "Claude 3 Opus", "Claude 3 Sonnet", "Gemini Pro", "Llama 2 70B", "Mistral 7B"],
21
+ "Provider": ["OpenAI", "Anthropic", "Anthropic", "Google", "Meta", "Mistral AI"],
22
+ "Open Source": [False, False, False, False, True, True],
23
+ "Parameters (B)": [1700, 500, 200, 340, 70, 7],
24
+ "Context Length (K)": [128, 200, 200, 32, 4, 8],
25
+ "Input Cost ($/1K tokens)": [0.01, 0.015, 0.003, 0.0005, 0.0007, 0.0002],
26
+ "Output Cost ($/1K tokens)": [0.03, 0.075, 0.015, 0.0015, 0.0009, 0.0002],
27
+ "Speed (tokens/s)": [40, 35, 45, 50, 30, 60],
28
+ "Latency (s)": [2.5, 3.0, 2.0, 1.8, 4.0, 1.5],
29
+ "Uptime (%)": [99.9, 99.8, 99.8, 99.9, 95.0, 94.0],
30
+ "Rate Limit (req/min)": [500, 400, 600, 1000, 200, 100],
31
+ "Knowledge Cutoff": ["2023-04", "2023-08", "2023-08", "2023-11", "2023-07", "2023-09"]
32
+ }
33
+
34
+ df = pd.DataFrame(models_data)
35
+
36
+ # Sidebar for weights
37
+ st.sidebar.header("🎯 Evaluation Criteria Weights")
38
+ st.sidebar.markdown("Adjust the importance of each factor (total should equal 100%)")
39
+
40
+ weights = {}
41
+ weights['performance'] = st.sidebar.slider("Performance", 0, 50, 25)
42
+ weights['cost'] = st.sidebar.slider("Cost Efficiency", 0, 50, 25)
43
+ weights['speed'] = st.sidebar.slider("Speed", 0, 50, 20)
44
+ weights['reliability'] = st.sidebar.slider("Reliability", 0, 50, 15)
45
+ weights['compliance'] = st.sidebar.slider("Compliance/Open Source", 0, 50, 10)
46
+ weights['integration'] = st.sidebar.slider("Integration Ease", 0, 50, 5)
47
+
48
+ total_weights = sum(weights.values())
49
+ st.sidebar.write(f"**Total: {total_weights}%**")
50
+ if total_weights != 100:
51
+ st.sidebar.warning("⚠️ Weights should total 100%")
52
+
53
+ # Usage scenario
54
+ st.sidebar.header("πŸ“Š Usage Scenario")
55
+ monthly_requests = st.sidebar.number_input("Monthly Requests", value=100000, step=10000)
56
+ avg_input_tokens = st.sidebar.number_input("Avg Input Tokens", value=500, step=50)
57
+ avg_output_tokens = st.sidebar.number_input("Avg Output Tokens", value=200, step=50)
58
+
59
+ # Scoring functions
60
+ def calculate_performance_score(row):
61
+ param_score = min((row['Parameters (B)'] / 1700) * 100, 100)
62
+ context_score = min((row['Context Length (K)'] / 200) * 100, 100)
63
+ freshness_score = 100 if row['Knowledge Cutoff'] >= "2023-08" else 70
64
+ return param_score * 0.4 + context_score * 0.4 + freshness_score * 0.2
65
+
66
+ def calculate_cost_score(row):
67
+ monthly_cost = monthly_requests * (
68
+ (avg_input_tokens / 1000) * row['Input Cost ($/1K tokens)'] +
69
+ (avg_output_tokens / 1000) * row['Output Cost ($/1K tokens)']
70
+ )
71
+ max_cost = 5000
72
+ return max(0, 100 - (monthly_cost / max_cost) * 100)
73
+
74
+ def calculate_speed_score(row):
75
+ speed_score = (row['Speed (tokens/s)'] / 60) * 50
76
+ latency_score = max(0, 50 - (row['Latency (s)'] / 5) * 50)
77
+ return speed_score + latency_score
78
+
79
+ def calculate_reliability_score(row):
80
+ uptime_score = (row['Uptime (%)'] / 100) * 60
81
+ rate_limit_score = min((row['Rate Limit (req/min)'] / 1000) * 40, 40)
82
+ return uptime_score + rate_limit_score
83
+
84
+ def calculate_compliance_score(row):
85
+ open_source_bonus = 40 if row['Open Source'] else 0
86
+ return open_source_bonus + 60
87
+
88
+ def calculate_integration_score(row):
89
+ api_score = 70 if not row['Open Source'] else 30
90
+ support_score = 30 if row['Provider'] in ["OpenAI", "Google"] else 20
91
+ return min(api_score + support_score, 100)
92
+
93
+ # Calculate scores
94
+ df['Performance Score'] = df.apply(calculate_performance_score, axis=1)
95
+ df['Cost Score'] = df.apply(calculate_cost_score, axis=1)
96
+ df['Speed Score'] = df.apply(calculate_speed_score, axis=1)
97
+ df['Reliability Score'] = df.apply(calculate_reliability_score, axis=1)
98
+ df['Compliance Score'] = df.apply(calculate_compliance_score, axis=1)
99
+ df['Integration Score'] = df.apply(calculate_integration_score, axis=1)
100
+
101
+ # Calculate weighted overall score
102
+ if total_weights > 0:
103
+ df['Overall Score'] = (
104
+ df['Performance Score'] * weights['performance'] / 100 +
105
+ df['Cost Score'] * weights['cost'] / 100 +
106
+ df['Speed Score'] * weights['speed'] / 100 +
107
+ df['Reliability Score'] * weights['reliability'] / 100 +
108
+ df['Compliance Score'] * weights['compliance'] / 100 +
109
+ df['Integration Score'] * weights['integration'] / 100
110
+ ) * (100 / total_weights)
111
+ else:
112
+ df['Overall Score'] = 0
113
+
114
+ # Sort by overall score
115
+ df_sorted = df.sort_values('Overall Score', ascending=False).reset_index(drop=True)
116
+
117
+ # Calculate monthly costs
118
+ df_sorted['Monthly Cost ($)'] = monthly_requests * (
119
+ (avg_input_tokens / 1000) * df_sorted['Input Cost ($/1K tokens)'] +
120
+ (avg_output_tokens / 1000) * df_sorted['Output Cost ($/1K tokens)']
121
+ )
122
+
123
+ # Main content area
124
+ col1, col2 = st.columns([2, 1])
125
+
126
+ with col1:
127
+ st.header("πŸ† Model Rankings")
128
+
129
+ # Display top 3 models with medals
130
+ medals = ["πŸ₯‡", "πŸ₯ˆ", "πŸ₯‰"]
131
+ for i in range(min(3, len(df_sorted))):
132
+ with st.container():
133
+ st.markdown(f"""
134
+ <div style="border: 2px solid {'gold' if i==0 else 'silver' if i==1 else '#CD7F32'};
135
+ border-radius: 10px; padding: 15px; margin: 10px 0;
136
+ background-color: {'#FFF8DC' if i==0 else '#F8F8FF' if i==1 else '#FDF5E6'}">
137
+ <h3>{medals[i]} {df_sorted.iloc[i]['Model']} - {df_sorted.iloc[i]['Provider']}</h3>
138
+ <p><strong>Overall Score: {df_sorted.iloc[i]['Overall Score']:.1f}/100</strong></p>
139
+ <p>Monthly Cost: ${df_sorted.iloc[i]['Monthly Cost ($)']:.2f} |
140
+ Parameters: {df_sorted.iloc[i]['Parameters (B)']}B |
141
+ Context: {df_sorted.iloc[i]['Context Length (K)']}K tokens</p>
142
+ </div>
143
+ """, unsafe_allow_html=True)
144
+
145
+ with col2:
146
+ st.header("πŸ’° Cost Analysis")
147
+
148
+ # Cost comparison chart
149
+ fig_cost = px.bar(
150
+ df_sorted,
151
+ x='Monthly Cost ($)',
152
+ y='Model',
153
+ orientation='h',
154
+ title="Monthly Cost Comparison",
155
+ color='Monthly Cost ($)',
156
+ color_continuous_scale='RdYlGn_r'
157
+ )
158
+ fig_cost.update_layout(height=400)
159
+ st.plotly_chart(fig_cost, use_container_width=True)
160
+
161
+ # Detailed comparison table
162
+ st.header("πŸ“Š Detailed Comparison")
163
+ display_cols = ['Model', 'Provider', 'Overall Score', 'Monthly Cost ($)',
164
+ 'Performance Score', 'Cost Score', 'Speed Score',
165
+ 'Reliability Score', 'Compliance Score', 'Integration Score']
166
+ st.dataframe(df_sorted[display_cols].round(1), use_container_width=True)
167
+
168
+ # Radar chart for top 3 models
169
+ st.header("🎯 Multi-Dimensional Analysis")
170
+ categories = ['Performance', 'Cost', 'Speed', 'Reliability', 'Compliance', 'Integration']
171
+
172
+ fig_radar = go.Figure()
173
+
174
+ colors = ['gold', 'silver', '#CD7F32']
175
+ for i in range(min(3, len(df_sorted))):
176
+ model = df_sorted.iloc[i]
177
+ values = [
178
+ model['Performance Score'],
179
+ model['Cost Score'],
180
+ model['Speed Score'],
181
+ model['Reliability Score'],
182
+ model['Compliance Score'],
183
+ model['Integration Score']
184
+ ]
185
+
186
+ fig_radar.add_trace(go.Scatterpolar(
187
+ r=values,
188
+ theta=categories,
189
+ fill='toself',
190
+ name=model['Model'],
191
+ line_color=colors[i]
192
+ ))
193
+
194
+ fig_radar.update_layout(
195
+ polar=dict(
196
+ radialaxis=dict(
197
+ visible=True,
198
+ range=[0, 100]
199
+ )),
200
+ showlegend=True,
201
+ title="Top 3 Models - Multi-Dimensional Comparison"
202
+ )
203
+
204
+ st.plotly_chart(fig_radar, use_container_width=True)
205
+
206
+ # Methodology
207
+ st.header("πŸ”¬ Scoring Methodology")
208
+ st.markdown("""
209
+ **Performance Score (0-100):**
210
+ - Parameters: 40% weight (normalized to GPT-4's 1.7T)
211
+ - Context Length: 40% weight (normalized to 200K tokens)
212
+ - Knowledge Freshness: 20% weight (post-Aug 2023 = 100, else 70)
213
+
214
+ **Cost Efficiency Score (0-100):**
215
+ - Based on total monthly cost for your usage scenario
216
+ - Normalized against $5,000/month baseline
217
+ - Higher score = lower cost
218
+
219
+ **Speed Score (0-100):**
220
+ - Tokens/second: 50% weight (normalized to 60 tok/s)
221
+ - Latency (inverse): 50% weight (normalized to 5s max)
222
+
223
+ **Reliability Score (0-100):**
224
+ - Uptime percentage: 60% weight
225
+ - Rate limits: 40% weight (normalized to 1000 req/min)
226
+
227
+ **Compliance Score (0-100):**
228
+ - Open source availability: 40 points
229
+ - License permissiveness: 60 points
230
+
231
+ **Integration Score (0-100):**
232
+ - API availability: 70 points (closed source) or 30 points (open source)
233
+ - Provider support quality: 30 points
234
+ """)