entropy25 commited on
Commit
0056f17
·
verified ·
1 Parent(s): 7959abc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +340 -196
app.py CHANGED
@@ -4,8 +4,8 @@ import numpy as np
4
  import plotly.express as px
5
  import plotly.graph_objects as go
6
  from datetime import datetime
7
- import google.generativeai as genai
8
  import json
 
9
 
10
  # Page config
11
  st.set_page_config(
@@ -14,72 +14,137 @@ st.set_page_config(
14
  layout="wide"
15
  )
16
 
17
- # Initialize Gemini
18
  @st.cache_resource
19
  def init_gemini():
20
- api_key = st.secrets.get("GOOGLE_API_KEY", "")
21
- if api_key:
22
- genai.configure(api_key=api_key)
23
- return genai.GenerativeModel('gemini-1.5-flash')
24
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Data processing functions
27
  @st.cache_data
28
  def process_data(df):
29
  """Process and analyze production data"""
30
- df['date'] = pd.to_datetime(df['date'], format='%m/%d/%Y')
31
- df['day_of_week'] = df['date'].dt.day_name()
32
- df['week'] = df['date'].dt.isocalendar().week
33
- df['month'] = df['date'].dt.month
34
- df['is_weekend'] = df['day_of_week'].isin(['Saturday', 'Sunday'])
35
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  def generate_summary(df):
38
  """Generate summary statistics"""
39
- total_production = df['weight_kg'].sum()
40
- total_items = len(df)
41
- daily_avg = df.groupby('date')['weight_kg'].sum().mean()
42
-
43
- summary = {
44
- 'total_production': total_production,
45
- 'total_items': total_items,
46
- 'daily_avg': daily_avg,
47
- 'date_range': f"{df['date'].min().strftime('%Y-%m-%d')} to {df['date'].max().strftime('%Y-%m-%d')}",
48
- 'production_days': df['date'].nunique()
49
- }
50
-
51
- # Material breakdown
52
- material_stats = {}
53
- for material in df['material_type'].unique():
54
- mat_data = df[df['material_type'] == material]
55
- material_stats[material] = {
56
- 'total': mat_data['weight_kg'].sum(),
57
- 'percentage': mat_data['weight_kg'].sum() / total_production * 100,
58
- 'count': len(mat_data)
59
  }
60
-
61
- summary['materials'] = material_stats
62
- return summary
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  def detect_anomalies(df):
65
  """Detect production anomalies"""
66
  anomalies = {}
67
- for material in df['material_type'].unique():
68
- mat_data = df[df['material_type'] == material]
69
- Q1 = mat_data['weight_kg'].quantile(0.25)
70
- Q3 = mat_data['weight_kg'].quantile(0.75)
71
- IQR = Q3 - Q1
72
- lower_bound = Q1 - 1.5 * IQR
73
- upper_bound = Q3 + 1.5 * IQR
74
-
75
- outliers = mat_data[(mat_data['weight_kg'] < lower_bound) |
76
- (mat_data['weight_kg'] > upper_bound)]
77
-
78
- anomalies[material] = {
79
- 'count': len(outliers),
80
- 'normal_range': f"{lower_bound:.1f} - {upper_bound:.1f} kg",
81
- 'dates': outliers['date'].dt.strftime('%Y-%m-%d').tolist()[:5]
82
- }
 
 
 
 
83
 
84
  return anomalies
85
 
@@ -87,67 +152,116 @@ def create_plots(df):
87
  """Create all visualization plots"""
88
  plots = {}
89
 
90
- # Daily production trend
91
- daily_total = df.groupby('date')['weight_kg'].sum().reset_index()
92
- plots['overview'] = px.line(
93
- daily_total, x='date', y='weight_kg',
94
- title='Daily Production Trend',
95
- labels={'weight_kg': 'Total Weight (kg)', 'date': 'Date'}
96
- )
97
-
98
- # Material comparison
99
- daily_by_material = df.groupby(['date', 'material_type'])['weight_kg'].sum().reset_index()
100
- plots['materials'] = px.line(
101
- daily_by_material, x='date', y='weight_kg', color='material_type',
102
- title='Production by Material Type'
103
- )
104
-
105
- # Weekly pattern
106
- weekly_pattern = df.groupby(['day_of_week', 'material_type'])['weight_kg'].mean().reset_index()
107
- day_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
108
- weekly_pattern['day_of_week'] = pd.Categorical(weekly_pattern['day_of_week'], categories=day_order, ordered=True)
109
- weekly_pattern = weekly_pattern.sort_values('day_of_week')
110
-
111
- plots['weekly'] = px.bar(
112
- weekly_pattern, x='day_of_week', y='weight_kg', color='material_type',
113
- title='Weekly Production Pattern'
114
- )
115
-
116
- # Correlation matrix
117
- daily_pivot = df.groupby(['date', 'material_type'])['weight_kg'].sum().unstack(fill_value=0)
118
- if len(daily_pivot.columns) > 1:
119
- corr_matrix = daily_pivot.corr()
120
- plots['correlation'] = px.imshow(
121
- corr_matrix, title='Material Type Correlation Matrix',
122
- color_continuous_scale='RdBu'
123
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  return plots
126
 
127
  def query_llm(model, data_summary, user_question):
128
  """Query Gemini with production data context"""
129
- context = f"""
130
- You are a production data analyst. Here's the current production data summary:
131
-
132
- Production Overview:
133
- - Total Production: {data_summary['total_production']:,.0f} kg
134
- - Production Period: {data_summary['date_range']}
135
- - Daily Average: {data_summary['daily_avg']:,.0f} kg
136
- - Production Days: {data_summary['production_days']}
137
-
138
- Material Breakdown:
139
- """
140
-
141
- for material, stats in data_summary['materials'].items():
142
- context += f"- {material.title()}: {stats['total']:,.0f} kg ({stats['percentage']:.1f}%)\n"
143
-
144
- context += f"\nUser Question: {user_question}\n\nPlease provide a concise, data-driven answer based on this production data."
145
 
146
  try:
147
- response = model.generate_content(context)
148
- return response.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  except Exception as e:
150
- return f"Error querying AI: {str(e)}"
 
151
 
152
  # Main app
153
  def main():
@@ -165,105 +279,135 @@ def main():
165
  if model:
166
  st.success("🤖 AI Assistant Ready")
167
  else:
168
- st.warning("⚠️ AI Assistant unavailable (API key needed)")
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  if uploaded_file is not None:
171
  # Load and process data
172
- try:
173
- df = pd.read_csv(uploaded_file, sep='\t')
174
- df = process_data(df)
175
-
176
- # Generate analysis
177
- summary = generate_summary(df)
178
- anomalies = detect_anomalies(df)
179
- plots = create_plots(df)
180
-
181
- # Display results
182
- col1, col2, col3, col4 = st.columns(4)
183
-
184
- with col1:
185
- st.metric("Total Production", f"{summary['total_production']:,.0f} kg")
186
- with col2:
187
- st.metric("Daily Average", f"{summary['daily_avg']:,.0f} kg")
188
- with col3:
189
- st.metric("Production Days", summary['production_days'])
190
- with col4:
191
- st.metric("Material Types", len(summary['materials']))
192
-
193
- # Charts
194
- st.subheader("📈 Production Trends")
195
- col1, col2 = st.columns(2)
196
-
197
- with col1:
198
- st.plotly_chart(plots['overview'], use_container_width=True)
199
- with col2:
200
- st.plotly_chart(plots['materials'], use_container_width=True)
201
-
202
- col3, col4 = st.columns(2)
203
- with col3:
204
- st.plotly_chart(plots['weekly'], use_container_width=True)
205
- with col4:
206
- if 'correlation' in plots:
207
- st.plotly_chart(plots['correlation'], use_container_width=True)
208
-
209
- # Material breakdown
210
- st.subheader("📋 Material Analysis")
211
- for material, stats in summary['materials'].items():
212
- with st.expander(f"{material.title()} - {stats['total']:,.0f} kg ({stats['percentage']:.1f}%)"):
213
- col1, col2, col3 = st.columns(3)
214
- with col1:
215
- st.metric("Total Weight", f"{stats['total']:,.0f} kg")
216
- with col2:
217
- st.metric("Percentage", f"{stats['percentage']:.1f}%")
218
- with col3:
219
- st.metric("Records", stats['count'])
220
-
221
- # Anomaly detection
222
- st.subheader("⚠️ Anomaly Detection")
223
- for material, anom in anomalies.items():
224
- if anom['count'] > 0:
225
- st.warning(f"**{material.title()}**: {anom['count']} anomalies detected")
226
- st.caption(f"Normal range: {anom['normal_range']}")
227
- if anom['dates']:
228
- st.caption(f"Recent anomaly dates: {', '.join(anom['dates'])}")
229
- else:
230
- st.success(f"**{material.title()}**: No anomalies detected")
231
-
232
- # AI Chat Interface
233
- if model:
234
- st.subheader("🤖 AI Production Assistant")
235
 
236
- # Predefined questions
237
- st.markdown("**Quick Questions:**")
238
- quick_questions = [
239
- "What are the key production trends?",
240
- "Which material type shows the best consistency?",
241
- "Are there any concerning patterns in the data?",
242
- "What recommendations do you have for optimization?"
243
- ]
244
 
245
- cols = st.columns(2)
246
- for i, question in enumerate(quick_questions):
247
- with cols[i % 2]:
248
- if st.button(question, key=f"q_{i}"):
249
- with st.spinner("AI analyzing..."):
250
- answer = query_llm(model, summary, question)
251
- st.success(f"**Q:** {question}")
252
- st.write(f"**A:** {answer}")
253
 
254
- # Custom question
255
- st.markdown("**Ask a Custom Question:**")
256
- user_question = st.text_input("Your question about the production data:")
 
 
 
 
257
 
258
- if user_question and st.button("Get AI Answer"):
259
- with st.spinner("AI analyzing..."):
260
- answer = query_llm(model, summary, user_question)
261
- st.success(f"**Q:** {user_question}")
262
- st.write(f"**A:** {answer}")
263
-
264
- except Exception as e:
265
- st.error(f"Error processing file: {str(e)}")
266
- st.info("Please ensure your CSV file has columns: date, weight_kg, material_type")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  else:
269
  st.info("👆 Please upload a CSV file to begin analysis")
 
4
  import plotly.express as px
5
  import plotly.graph_objects as go
6
  from datetime import datetime
 
7
  import json
8
+ import os
9
 
10
  # Page config
11
  st.set_page_config(
 
14
  layout="wide"
15
  )
16
 
17
+ # Initialize Gemini with better error handling
18
  @st.cache_resource
19
  def init_gemini():
20
+ try:
21
+ import google.generativeai as genai
22
+
23
+ # Try multiple ways to get API key
24
+ api_key = None
25
+
26
+ # Method 1: Streamlit secrets
27
+ try:
28
+ api_key = st.secrets.get("GOOGLE_API_KEY", "")
29
+ except:
30
+ pass
31
+
32
+ # Method 2: Environment variable
33
+ if not api_key:
34
+ api_key = os.environ.get("GOOGLE_API_KEY", "")
35
+
36
+ # Method 3: Streamlit secrets alternative format
37
+ if not api_key:
38
+ try:
39
+ api_key = st.secrets["GOOGLE_API_KEY"]
40
+ except:
41
+ pass
42
+
43
+ if api_key and api_key.strip():
44
+ genai.configure(api_key=api_key.strip())
45
+ model = genai.GenerativeModel('gemini-1.5-flash')
46
+
47
+ # Test the model with a simple query to verify it works
48
+ try:
49
+ test_response = model.generate_content("Hello, respond with 'API working'")
50
+ if test_response and test_response.text:
51
+ return model
52
+ except Exception as e:
53
+ st.warning(f"API key provided but model test failed: {str(e)}")
54
+ return None
55
+
56
+ return None
57
+ except ImportError:
58
+ st.error("Google Generative AI library not installed")
59
+ return None
60
+ except Exception as e:
61
+ st.warning(f"Error initializing Gemini: {str(e)}")
62
+ return None
63
 
64
  # Data processing functions
65
  @st.cache_data
66
  def process_data(df):
67
  """Process and analyze production data"""
68
+ try:
69
+ # Handle different date formats more robustly
70
+ if 'date' in df.columns:
71
+ # Try multiple date formats
72
+ for date_format in ['%m/%d/%Y', '%Y-%m-%d', '%d/%m/%Y', '%m-%d-%Y']:
73
+ try:
74
+ df['date'] = pd.to_datetime(df['date'], format=date_format)
75
+ break
76
+ except:
77
+ continue
78
+
79
+ # If all formats failed, try pandas automatic parsing
80
+ if df['date'].dtype == 'object':
81
+ df['date'] = pd.to_datetime(df['date'], errors='coerce')
82
+
83
+ # Add time-based features
84
+ df['day_of_week'] = df['date'].dt.day_name()
85
+ df['week'] = df['date'].dt.isocalendar().week
86
+ df['month'] = df['date'].dt.month
87
+ df['is_weekend'] = df['day_of_week'].isin(['Saturday', 'Sunday'])
88
+
89
+ return df
90
+ except Exception as e:
91
+ st.error(f"Error processing data: {str(e)}")
92
+ return df
93
 
94
  def generate_summary(df):
95
  """Generate summary statistics"""
96
+ try:
97
+ total_production = df['weight_kg'].sum()
98
+ total_items = len(df)
99
+ daily_avg = df.groupby('date')['weight_kg'].sum().mean()
100
+
101
+ summary = {
102
+ 'total_production': total_production,
103
+ 'total_items': total_items,
104
+ 'daily_avg': daily_avg,
105
+ 'date_range': f"{df['date'].min().strftime('%Y-%m-%d')} to {df['date'].max().strftime('%Y-%m-%d')}",
106
+ 'production_days': df['date'].nunique()
 
 
 
 
 
 
 
 
 
107
  }
108
+
109
+ # Material breakdown
110
+ material_stats = {}
111
+ for material in df['material_type'].unique():
112
+ mat_data = df[df['material_type'] == material]
113
+ material_stats[material] = {
114
+ 'total': mat_data['weight_kg'].sum(),
115
+ 'percentage': mat_data['weight_kg'].sum() / total_production * 100,
116
+ 'count': len(mat_data)
117
+ }
118
+
119
+ summary['materials'] = material_stats
120
+ return summary
121
+ except Exception as e:
122
+ st.error(f"Error generating summary: {str(e)}")
123
+ return {}
124
 
125
  def detect_anomalies(df):
126
  """Detect production anomalies"""
127
  anomalies = {}
128
+ try:
129
+ for material in df['material_type'].unique():
130
+ mat_data = df[df['material_type'] == material]
131
+ if len(mat_data) > 0:
132
+ Q1 = mat_data['weight_kg'].quantile(0.25)
133
+ Q3 = mat_data['weight_kg'].quantile(0.75)
134
+ IQR = Q3 - Q1
135
+ lower_bound = Q1 - 1.5 * IQR
136
+ upper_bound = Q3 + 1.5 * IQR
137
+
138
+ outliers = mat_data[(mat_data['weight_kg'] < lower_bound) |
139
+ (mat_data['weight_kg'] > upper_bound)]
140
+
141
+ anomalies[material] = {
142
+ 'count': len(outliers),
143
+ 'normal_range': f"{lower_bound:.1f} - {upper_bound:.1f} kg",
144
+ 'dates': outliers['date'].dt.strftime('%Y-%m-%d').tolist()[:5]
145
+ }
146
+ except Exception as e:
147
+ st.error(f"Error detecting anomalies: {str(e)}")
148
 
149
  return anomalies
150
 
 
152
  """Create all visualization plots"""
153
  plots = {}
154
 
155
+ try:
156
+ # Daily production trend
157
+ daily_total = df.groupby('date')['weight_kg'].sum().reset_index()
158
+ plots['overview'] = px.line(
159
+ daily_total, x='date', y='weight_kg',
160
+ title='Daily Production Trend',
161
+ labels={'weight_kg': 'Total Weight (kg)', 'date': 'Date'}
162
+ )
163
+
164
+ # Material comparison
165
+ daily_by_material = df.groupby(['date', 'material_type'])['weight_kg'].sum().reset_index()
166
+ plots['materials'] = px.line(
167
+ daily_by_material, x='date', y='weight_kg', color='material_type',
168
+ title='Production by Material Type'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  )
170
+
171
+ # Weekly pattern
172
+ weekly_pattern = df.groupby(['day_of_week', 'material_type'])['weight_kg'].mean().reset_index()
173
+ day_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
174
+ weekly_pattern['day_of_week'] = pd.Categorical(weekly_pattern['day_of_week'], categories=day_order, ordered=True)
175
+ weekly_pattern = weekly_pattern.sort_values('day_of_week')
176
+
177
+ plots['weekly'] = px.bar(
178
+ weekly_pattern, x='day_of_week', y='weight_kg', color='material_type',
179
+ title='Weekly Production Pattern'
180
+ )
181
+
182
+ # Correlation matrix
183
+ daily_pivot = df.groupby(['date', 'material_type'])['weight_kg'].sum().unstack(fill_value=0)
184
+ if len(daily_pivot.columns) > 1:
185
+ corr_matrix = daily_pivot.corr()
186
+ plots['correlation'] = px.imshow(
187
+ corr_matrix, title='Material Type Correlation Matrix',
188
+ color_continuous_scale='RdBu'
189
+ )
190
+ except Exception as e:
191
+ st.error(f"Error creating plots: {str(e)}")
192
 
193
  return plots
194
 
195
  def query_llm(model, data_summary, user_question):
196
  """Query Gemini with production data context"""
197
+ if not model:
198
+ return "AI Assistant is not available. Please check API configuration."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
  try:
201
+ context = f"""
202
+ You are a production data analyst. Here's the current production data summary:
203
+
204
+ Production Overview:
205
+ - Total Production: {data_summary['total_production']:,.0f} kg
206
+ - Production Period: {data_summary['date_range']}
207
+ - Daily Average: {data_summary['daily_avg']:,.0f} kg
208
+ - Production Days: {data_summary['production_days']}
209
+
210
+ Material Breakdown:
211
+ """
212
+
213
+ for material, stats in data_summary['materials'].items():
214
+ context += f"- {material.title()}: {stats['total']:,.0f} kg ({stats['percentage']:.1f}%)\n"
215
+
216
+ context += f"\nUser Question: {user_question}\n\nPlease provide a concise, data-driven answer based on this production data."
217
+
218
+ # Add timeout and retry logic
219
+ response = model.generate_content(
220
+ context,
221
+ generation_config={
222
+ 'temperature': 0.1,
223
+ 'top_p': 0.8,
224
+ 'top_k': 40,
225
+ 'max_output_tokens': 1024,
226
+ }
227
+ )
228
+
229
+ if response and response.text:
230
+ return response.text
231
+ else:
232
+ return "No response received from AI model."
233
+
234
+ except Exception as e:
235
+ error_msg = str(e).lower()
236
+ if "403" in error_msg or "forbidden" in error_msg:
237
+ return "AI service access denied. Please check your API key permissions and quota."
238
+ elif "quota" in error_msg:
239
+ return "AI service quota exceeded. Please try again later."
240
+ elif "timeout" in error_msg:
241
+ return "AI service timeout. Please try again."
242
+ else:
243
+ return f"AI service error: {str(e)}"
244
+
245
+ # Load data with better error handling
246
+ def load_data(uploaded_file):
247
+ """Load data with robust error handling"""
248
+ try:
249
+ # Try different separators and encodings
250
+ for sep in ['\t', ',', ';']:
251
+ for encoding in ['utf-8', 'latin-1', 'cp1252']:
252
+ try:
253
+ df = pd.read_csv(uploaded_file, sep=sep, encoding=encoding)
254
+ if len(df.columns) >= 3: # Minimum expected columns
255
+ return df
256
+ except:
257
+ continue
258
+
259
+ # If all attempts fail, try with default settings
260
+ return pd.read_csv(uploaded_file)
261
+
262
  except Exception as e:
263
+ st.error(f"Error loading file: {str(e)}")
264
+ return None
265
 
266
  # Main app
267
  def main():
 
279
  if model:
280
  st.success("🤖 AI Assistant Ready")
281
  else:
282
+ st.warning("⚠️ AI Assistant unavailable")
283
+ with st.expander("API Configuration Help"):
284
+ st.markdown("""
285
+ To enable AI features:
286
+ 1. Get a Google AI API key from https://ai.google.dev/
287
+ 2. Add it to your Streamlit secrets as `GOOGLE_API_KEY`
288
+ 3. Or set it as environment variable `GOOGLE_API_KEY`
289
+
290
+ Common 403 errors:
291
+ - Invalid API key
292
+ - API key lacks permissions
293
+ - Quota exceeded
294
+ - Service not enabled
295
+ """)
296
 
297
  if uploaded_file is not None:
298
  # Load and process data
299
+ df = load_data(uploaded_file)
300
+
301
+ if df is not None:
302
+ try:
303
+ df = process_data(df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
+ # Validate required columns
306
+ required_cols = ['date', 'weight_kg', 'material_type']
307
+ missing_cols = [col for col in required_cols if col not in df.columns]
 
 
 
 
 
308
 
309
+ if missing_cols:
310
+ st.error(f"Missing required columns: {missing_cols}")
311
+ st.info("Available columns: " + ", ".join(df.columns.tolist()))
312
+ return
 
 
 
 
313
 
314
+ # Generate analysis
315
+ summary = generate_summary(df)
316
+ if not summary:
317
+ return
318
+
319
+ anomalies = detect_anomalies(df)
320
+ plots = create_plots(df)
321
 
322
+ # Display results
323
+ col1, col2, col3, col4 = st.columns(4)
324
+
325
+ with col1:
326
+ st.metric("Total Production", f"{summary['total_production']:,.0f} kg")
327
+ with col2:
328
+ st.metric("Daily Average", f"{summary['daily_avg']:,.0f} kg")
329
+ with col3:
330
+ st.metric("Production Days", summary['production_days'])
331
+ with col4:
332
+ st.metric("Material Types", len(summary['materials']))
333
+
334
+ # Charts
335
+ st.subheader("📈 Production Trends")
336
+ col1, col2 = st.columns(2)
337
+
338
+ with col1:
339
+ if 'overview' in plots:
340
+ st.plotly_chart(plots['overview'], use_container_width=True)
341
+ with col2:
342
+ if 'materials' in plots:
343
+ st.plotly_chart(plots['materials'], use_container_width=True)
344
+
345
+ col3, col4 = st.columns(2)
346
+ with col3:
347
+ if 'weekly' in plots:
348
+ st.plotly_chart(plots['weekly'], use_container_width=True)
349
+ with col4:
350
+ if 'correlation' in plots:
351
+ st.plotly_chart(plots['correlation'], use_container_width=True)
352
+
353
+ # Material breakdown
354
+ st.subheader("📋 Material Analysis")
355
+ for material, stats in summary['materials'].items():
356
+ with st.expander(f"{material.title()} - {stats['total']:,.0f} kg ({stats['percentage']:.1f}%)"):
357
+ col1, col2, col3 = st.columns(3)
358
+ with col1:
359
+ st.metric("Total Weight", f"{stats['total']:,.0f} kg")
360
+ with col2:
361
+ st.metric("Percentage", f"{stats['percentage']:.1f}%")
362
+ with col3:
363
+ st.metric("Records", stats['count'])
364
+
365
+ # Anomaly detection
366
+ st.subheader("⚠️ Anomaly Detection")
367
+ for material, anom in anomalies.items():
368
+ if anom['count'] > 0:
369
+ st.warning(f"**{material.title()}**: {anom['count']} anomalies detected")
370
+ st.caption(f"Normal range: {anom['normal_range']}")
371
+ if anom['dates']:
372
+ st.caption(f"Recent anomaly dates: {', '.join(anom['dates'])}")
373
+ else:
374
+ st.success(f"**{material.title()}**: No anomalies detected")
375
+
376
+ # AI Chat Interface
377
+ if model:
378
+ st.subheader("🤖 AI Production Assistant")
379
+
380
+ # Predefined questions
381
+ st.markdown("**Quick Questions:**")
382
+ quick_questions = [
383
+ "What are the key production trends?",
384
+ "Which material type shows the best consistency?",
385
+ "Are there any concerning patterns in the data?",
386
+ "What recommendations do you have for optimization?"
387
+ ]
388
+
389
+ cols = st.columns(2)
390
+ for i, question in enumerate(quick_questions):
391
+ with cols[i % 2]:
392
+ if st.button(question, key=f"q_{i}"):
393
+ with st.spinner("AI analyzing..."):
394
+ answer = query_llm(model, summary, question)
395
+ st.success(f"**Q:** {question}")
396
+ st.write(f"**A:** {answer}")
397
+
398
+ # Custom question
399
+ st.markdown("**Ask a Custom Question:**")
400
+ user_question = st.text_input("Your question about the production data:")
401
+
402
+ if user_question and st.button("Get AI Answer"):
403
+ with st.spinner("AI analyzing..."):
404
+ answer = query_llm(model, summary, user_question)
405
+ st.success(f"**Q:** {user_question}")
406
+ st.write(f"**A:** {answer}")
407
+
408
+ except Exception as e:
409
+ st.error(f"Error processing file: {str(e)}")
410
+ st.info("Please ensure your CSV file has the required format.")
411
 
412
  else:
413
  st.info("👆 Please upload a CSV file to begin analysis")