entropy25's picture
Update ai_engine.py
7ee1f4e verified
import streamlit as st
import pandas as pd
from typing import Optional, Dict
import os
from config import get_settings
@st.cache_resource
def init_ai_model():
settings = get_settings()
api_key = settings.groq_api_key
if not api_key:
return None
try:
from groq import Groq
os.environ["GROQ_API_KEY"] = api_key
return Groq()
except Exception as e:
st.error(f"AI configuration failed: {str(e)}")
return None
def generate_ai_summary(model, df: pd.DataFrame, stats: Dict, outliers: Dict, lang: str = 'English') -> str:
if not model:
return "AI analysis unavailable - Groq API key not configured."
language_instruction = "Respond in Norwegian." if lang == 'Norsk' else "Respond in English."
try:
materials = [k for k in stats.keys() if k != '_total_']
context_parts = [
"# Production Data Analysis Context",
"## Overview",
f"- Total Production: {stats['_total_']['total']:,.0f} kg",
f"- Production Period: {stats['_total_']['work_days']} working days",
f"- Daily Average: {stats['_total_']['daily_avg']:,.0f} kg",
f"- Materials Tracked: {len(materials)}",
"",
"## Material Breakdown:"
]
for material in materials:
info = stats[material]
context_parts.append(
f"- {material.title()}: {info['total']:,.0f} kg ({info['percentage']:.1f}%), "
f"avg {info['daily_avg']:,.0f} kg/day"
)
daily_data = df.groupby('date')['weight_kg'].sum()
trend_direction = "increasing" if daily_data.iloc[-1] > daily_data.iloc[0] else "decreasing"
volatility = daily_data.std() / daily_data.mean() * 100
context_parts.extend([
"",
"## Trend Analysis:",
f"- Overall trend: {trend_direction}",
f"- Production volatility: {volatility:.1f}% coefficient of variation",
f"- Peak production: {daily_data.max():,.0f} kg",
f"- Lowest production: {daily_data.min():,.0f} kg"
])
total_outliers = sum(info['count'] for info in outliers.values())
context_parts.extend([
"",
"## Quality Control:",
f"- Total outliers detected: {total_outliers}",
f"- Materials with quality issues: {sum(1 for info in outliers.values() if info['count'] > 0)}"
])
if 'shift' in df.columns:
shift_stats = df.groupby('shift')['weight_kg'].sum()
context_parts.extend([
"",
"## Shift Performance:",
f"- Day shift: {shift_stats.get('day', 0):,.0f} kg",
f"- Night shift: {shift_stats.get('night', 0):,.0f} kg"
])
context_text = "\n".join(context_parts)
prompt = f"""
{language_instruction}
{context_text}
As an expert AI analyst for the Production Monitor platform, provide concise analysis.
Structure your response:
**PRODUCTION ASSESSMENT**
Evaluate status (Excellent/Good/Needs Attention) with brief justification.
**KEY FINDINGS**
Identify 3-4 critical insights. Reference platform features like Quality Check module or Production Trend chart.
**RECOMMENDATIONS**
Provide 2-3 actionable steps for management.
Keep under 300 words.
"""
response = model.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama-3.3-70b-versatile",
temperature=0.7,
max_tokens=1000
)
return response.choices[0].message.content
except Exception as e:
error_msg = str(e)
if '429' in error_msg or 'quota' in error_msg.lower():
return "AI analysis temporarily unavailable due to API quota limits. Please try again later."
return f"AI analysis error: {error_msg}"
def query_ai(model, stats: Dict, question: str, df: Optional[pd.DataFrame] = None, lang: str = 'English') -> str:
if not model:
return "AI assistant not available - Please configure Groq API key"
language_instruction = "Respond in Norwegian." if lang == 'Norsk' else "Respond in English."
context_parts = [
"Production Data Summary:",
*[f"- {mat.title()}: {info['total']:,.0f}kg ({info['percentage']:.1f}%)"
for mat, info in stats.items() if mat != '_total_'],
f"\nTotal Production: {stats['_total_']['total']:,.0f}kg across {stats['_total_']['work_days']} work days"
]
if df is not None:
available_cols = list(df.columns)
context_parts.append(f"\nAvailable data fields: {', '.join(available_cols)}")
if 'shift' in df.columns:
shift_stats = df.groupby('shift')['weight_kg'].sum()
context_parts.append(f"Shift breakdown: {dict(shift_stats)}")
if 'day_name' in df.columns:
day_stats = df.groupby('day_name')['weight_kg'].mean()
context_parts.append(f"Average daily production: {dict(day_stats.round(0))}")
context = "\n".join(context_parts) + f"\n\n{language_instruction}\n\nQuestion: {question}\nAnswer based on available data:"
try:
response = model.chat.completions.create(
messages=[{"role": "user", "content": context}],
model="llama-3.3-70b-versatile",
temperature=0.7,
max_tokens=500
)
return response.choices[0].message.content
except Exception as e:
error_msg = str(e)
if '429' in error_msg or 'quota' in error_msg.lower():
return "AI assistant temporarily unavailable due to API quota limits. Please try again later."
return f"Error getting AI response: {error_msg}"