entropy25 commited on
Commit
1f60a67
·
verified ·
1 Parent(s): 8f25013

Create ai_engine.py

Browse files
Files changed (1) hide show
  1. ai_engine.py +134 -0
ai_engine.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import pandas as pd
4
+ from typing import Optional, Dict
5
+
6
+ from config import get_settings
7
+
8
+ @st.cache_resource
9
+ def init_ai_model():
10
+ settings = get_settings()
11
+ api_key = settings.google_api_key
12
+
13
+ if not api_key:
14
+ return None
15
+
16
+ try:
17
+ genai.configure(api_key=api_key)
18
+ return genai.GenerativeModel('gemini-1.5-flash')
19
+ except Exception as e:
20
+ st.error(f"AI configuration failed: {str(e)}")
21
+ return None
22
+
23
+ def generate_ai_summary(model, df: pd.DataFrame, stats: Dict, outliers: Dict) -> str:
24
+ if not model:
25
+ return "AI analysis unavailable - Google API key not configured."
26
+
27
+ try:
28
+ materials = [k for k in stats.keys() if k != '_total_']
29
+
30
+ context_parts = [
31
+ "# Production Data Analysis Context",
32
+ "## Overview",
33
+ f"- Total Production: {stats['_total_']['total']:,.0f} kg",
34
+ f"- Production Period: {stats['_total_']['work_days']} working days",
35
+ f"- Daily Average: {stats['_total_']['daily_avg']:,.0f} kg",
36
+ f"- Materials Tracked: {len(materials)}",
37
+ "",
38
+ "## Material Breakdown:"
39
+ ]
40
+
41
+ for material in materials:
42
+ info = stats[material]
43
+ context_parts.append(
44
+ f"- {material.title()}: {info['total']:,.0f} kg ({info['percentage']:.1f}%), "
45
+ f"avg {info['daily_avg']:,.0f} kg/day"
46
+ )
47
+
48
+ daily_data = df.groupby('date')['weight_kg'].sum()
49
+ trend_direction = "increasing" if daily_data.iloc[-1] > daily_data.iloc[0] else "decreasing"
50
+ volatility = daily_data.std() / daily_data.mean() * 100
51
+
52
+ context_parts.extend([
53
+ "",
54
+ "## Trend Analysis:",
55
+ f"- Overall trend: {trend_direction}",
56
+ f"- Production volatility: {volatility:.1f}% coefficient of variation",
57
+ f"- Peak production: {daily_data.max():,.0f} kg",
58
+ f"- Lowest production: {daily_data.min():,.0f} kg"
59
+ ])
60
+
61
+ total_outliers = sum(info['count'] for info in outliers.values())
62
+ context_parts.extend([
63
+ "",
64
+ "## Quality Control:",
65
+ f"- Total outliers detected: {total_outliers}",
66
+ f"- Materials with quality issues: {sum(1 for info in outliers.values() if info['count'] > 0)}"
67
+ ])
68
+
69
+ if 'shift' in df.columns:
70
+ shift_stats = df.groupby('shift')['weight_kg'].sum()
71
+ context_parts.extend([
72
+ "",
73
+ "## Shift Performance:",
74
+ f"- Day shift: {shift_stats.get('day', 0):,.0f} kg",
75
+ f"- Night shift: {shift_stats.get('night', 0):,.0f} kg"
76
+ ])
77
+
78
+ context_text = "\n".join(context_parts)
79
+
80
+ prompt = f"""
81
+ {context_text}
82
+
83
+ As an expert AI analyst for the Production Monitor platform, provide concise analysis.
84
+
85
+ Structure your response:
86
+
87
+ **PRODUCTION ASSESSMENT**
88
+ Evaluate status (Excellent/Good/Needs Attention) with brief justification.
89
+
90
+ **KEY FINDINGS**
91
+ Identify 3-4 critical insights. Reference platform features like Quality Check module or Production Trend chart.
92
+
93
+ **RECOMMENDATIONS**
94
+ Provide 2-3 actionable steps for management.
95
+
96
+ Keep under 300 words.
97
+ """
98
+
99
+ response = model.generate_content(prompt)
100
+ return response.text
101
+
102
+ except Exception as e:
103
+ return f"AI analysis error: {str(e)}"
104
+
105
+ def query_ai(model, stats: Dict, question: str, df: Optional[pd.DataFrame] = None) -> str:
106
+ if not model:
107
+ return "AI assistant not available - Please configure Google API key"
108
+
109
+ context_parts = [
110
+ "Production Data Summary:",
111
+ *[f"- {mat.title()}: {info['total']:,.0f}kg ({info['percentage']:.1f}%)"
112
+ for mat, info in stats.items() if mat != '_total_'],
113
+ f"\nTotal Production: {stats['_total_']['total']:,.0f}kg across {stats['_total_']['work_days']} work days"
114
+ ]
115
+
116
+ if df is not None:
117
+ available_cols = list(df.columns)
118
+ context_parts.append(f"\nAvailable data fields: {', '.join(available_cols)}")
119
+
120
+ if 'shift' in df.columns:
121
+ shift_stats = df.groupby('shift')['weight_kg'].sum()
122
+ context_parts.append(f"Shift breakdown: {dict(shift_stats)}")
123
+
124
+ if 'day_name' in df.columns:
125
+ day_stats = df.groupby('day_name')['weight_kg'].mean()
126
+ context_parts.append(f"Average daily production: {dict(day_stats.round(0))}")
127
+
128
+ context = "\n".join(context_parts) + f"\n\nQuestion: {question}\nAnswer based on available data:"
129
+
130
+ try:
131
+ response = model.generate_content(context)
132
+ return response.text
133
+ except Exception as e:
134
+ return f"Error getting AI response: {str(e)}"