File size: 5,908 Bytes
1f60a67 7ee1f4e 1f60a67 8d783a2 1f60a67 7ee1f4e 1f60a67 4a8fcf0 1f60a67 8d783a2 1f60a67 4a8fcf0 1f60a67 4a8fcf0 1f60a67 8d783a2 1f60a67 8d783a2 1f60a67 4a8fcf0 1f60a67 8d783a2 1f60a67 4a8fcf0 1f60a67 4a8fcf0 1f60a67 8d783a2 1f60a67 8d783a2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | import streamlit as st
import pandas as pd
from typing import Optional, Dict
import os
from config import get_settings
@st.cache_resource
def init_ai_model():
settings = get_settings()
api_key = settings.groq_api_key
if not api_key:
return None
try:
from groq import Groq
os.environ["GROQ_API_KEY"] = api_key
return Groq()
except Exception as e:
st.error(f"AI configuration failed: {str(e)}")
return None
def generate_ai_summary(model, df: pd.DataFrame, stats: Dict, outliers: Dict, lang: str = 'English') -> str:
if not model:
return "AI analysis unavailable - Groq API key not configured."
language_instruction = "Respond in Norwegian." if lang == 'Norsk' else "Respond in English."
try:
materials = [k for k in stats.keys() if k != '_total_']
context_parts = [
"# Production Data Analysis Context",
"## Overview",
f"- Total Production: {stats['_total_']['total']:,.0f} kg",
f"- Production Period: {stats['_total_']['work_days']} working days",
f"- Daily Average: {stats['_total_']['daily_avg']:,.0f} kg",
f"- Materials Tracked: {len(materials)}",
"",
"## Material Breakdown:"
]
for material in materials:
info = stats[material]
context_parts.append(
f"- {material.title()}: {info['total']:,.0f} kg ({info['percentage']:.1f}%), "
f"avg {info['daily_avg']:,.0f} kg/day"
)
daily_data = df.groupby('date')['weight_kg'].sum()
trend_direction = "increasing" if daily_data.iloc[-1] > daily_data.iloc[0] else "decreasing"
volatility = daily_data.std() / daily_data.mean() * 100
context_parts.extend([
"",
"## Trend Analysis:",
f"- Overall trend: {trend_direction}",
f"- Production volatility: {volatility:.1f}% coefficient of variation",
f"- Peak production: {daily_data.max():,.0f} kg",
f"- Lowest production: {daily_data.min():,.0f} kg"
])
total_outliers = sum(info['count'] for info in outliers.values())
context_parts.extend([
"",
"## Quality Control:",
f"- Total outliers detected: {total_outliers}",
f"- Materials with quality issues: {sum(1 for info in outliers.values() if info['count'] > 0)}"
])
if 'shift' in df.columns:
shift_stats = df.groupby('shift')['weight_kg'].sum()
context_parts.extend([
"",
"## Shift Performance:",
f"- Day shift: {shift_stats.get('day', 0):,.0f} kg",
f"- Night shift: {shift_stats.get('night', 0):,.0f} kg"
])
context_text = "\n".join(context_parts)
prompt = f"""
{language_instruction}
{context_text}
As an expert AI analyst for the Production Monitor platform, provide concise analysis.
Structure your response:
**PRODUCTION ASSESSMENT**
Evaluate status (Excellent/Good/Needs Attention) with brief justification.
**KEY FINDINGS**
Identify 3-4 critical insights. Reference platform features like Quality Check module or Production Trend chart.
**RECOMMENDATIONS**
Provide 2-3 actionable steps for management.
Keep under 300 words.
"""
response = model.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama-3.3-70b-versatile",
temperature=0.7,
max_tokens=1000
)
return response.choices[0].message.content
except Exception as e:
error_msg = str(e)
if '429' in error_msg or 'quota' in error_msg.lower():
return "AI analysis temporarily unavailable due to API quota limits. Please try again later."
return f"AI analysis error: {error_msg}"
def query_ai(model, stats: Dict, question: str, df: Optional[pd.DataFrame] = None, lang: str = 'English') -> str:
if not model:
return "AI assistant not available - Please configure Groq API key"
language_instruction = "Respond in Norwegian." if lang == 'Norsk' else "Respond in English."
context_parts = [
"Production Data Summary:",
*[f"- {mat.title()}: {info['total']:,.0f}kg ({info['percentage']:.1f}%)"
for mat, info in stats.items() if mat != '_total_'],
f"\nTotal Production: {stats['_total_']['total']:,.0f}kg across {stats['_total_']['work_days']} work days"
]
if df is not None:
available_cols = list(df.columns)
context_parts.append(f"\nAvailable data fields: {', '.join(available_cols)}")
if 'shift' in df.columns:
shift_stats = df.groupby('shift')['weight_kg'].sum()
context_parts.append(f"Shift breakdown: {dict(shift_stats)}")
if 'day_name' in df.columns:
day_stats = df.groupby('day_name')['weight_kg'].mean()
context_parts.append(f"Average daily production: {dict(day_stats.round(0))}")
context = "\n".join(context_parts) + f"\n\n{language_instruction}\n\nQuestion: {question}\nAnswer based on available data:"
try:
response = model.chat.completions.create(
messages=[{"role": "user", "content": context}],
model="llama-3.3-70b-versatile",
temperature=0.7,
max_tokens=500
)
return response.choices[0].message.content
except Exception as e:
error_msg = str(e)
if '429' in error_msg or 'quota' in error_msg.lower():
return "AI assistant temporarily unavailable due to API quota limits. Please try again later."
return f"Error getting AI response: {error_msg}" |