syncmaster8 / ai_questions.py
aseelflihan's picture
Initial commit without node_modules
33d3592
# ai_questions.py - AI-Powered Question Engine for SyncMaster
import time
import hashlib
from datetime import datetime
from typing import Dict, List, Optional, Tuple, Any
from dataclasses import dataclass, field
import streamlit as st
@dataclass
class QAPair:
"""Represents a question-answer pair"""
question: str
answer: str
timestamp: datetime
question_type: str # 'template' or 'custom'
response_time_ms: int
model_used: str = "Unknown" # Which AI model was used
@dataclass
class QuestionSession:
"""Represents a question session for a specific text segment"""
session_id: str
selected_text: str
segment_id: str
start_timestamp: int
ui_language: str
conversation: List[QAPair] = field(default_factory=list)
created_at: datetime = field(default_factory=datetime.now)
@dataclass
class TextSelection:
"""Represents selected text from broadcast"""
text: str
segment_id: str
start_ms: int
end_ms: int
translations: Dict[str, str] = field(default_factory=dict)
selection_timestamp: int = field(default_factory=lambda: int(time.time() * 1000))
class AIQuestionEngine:
"""
AI-powered question engine for interactive learning
"""
def __init__(self, translator_instance=None):
self.translator = translator_instance
self.conversation_history: Dict[str, QuestionSession] = {}
self.model_usage_stats = {
'Gemini AI': 0,
'Groq AI': 0,
'OpenRouter AI': 0,
'Simple Response': 0
}
# Question templates in multiple languages
self.question_templates = {
'ar': [
"اشرح هذا النص بالتفصيل",
"أعطني أمثلة عملية على هذا",
"ما معنى هذا المصطلح؟",
"كيف يُستخدم هذا في الواقع؟",
"ما أهمية هذا الموضوع؟",
"ما هي النقاط الرئيسية هنا؟",
"اربط هذا بمفاهيم أخرى",
"ما هي التطبيقات العملية؟"
],
'en': [
"Explain this text in detail",
"Give me practical examples of this",
"What does this term mean?",
"How is this used in practice?",
"Why is this topic important?",
"What are the key points here?",
"Connect this to other concepts",
"What are the practical applications?"
],
'fr': [
"Expliquez ce texte en détail",
"Donnez-moi des exemples pratiques",
"Que signifie ce terme?",
"Comment cela est-il utilisé en pratique?",
"Pourquoi ce sujet est-il important?",
"Quels sont les points clés ici?",
"Reliez cela à d'autres concepts",
"Quelles sont les applications pratiques?"
],
'es': [
"Explica este texto en detalle",
"Dame ejemplos prácticos de esto",
"¿Qué significa este término?",
"¿Cómo se usa esto en la práctica?",
"¿Por qué es importante este tema?",
"¿Cuáles son los puntos clave aquí?",
"Conecta esto con otros conceptos",
"¿Cuáles son las aplicaciones prácticas?"
],
'de': [
"Erkläre diesen Text im Detail",
"Gib mir praktische Beispiele dafür",
"Was bedeutet dieser Begriff?",
"Wie wird das in der Praxis verwendet?",
"Warum ist dieses Thema wichtig?",
"Was sind die wichtigsten Punkte hier?",
"Verbinde das mit anderen Konzepten",
"Was sind die praktischen Anwendungen?"
],
'zh': [
"详细解释这段文字",
"给我一些实际例子",
"这个术语是什么意思?",
"这在实践中如何使用?",
"为什么这个话题很重要?",
"这里的要点是什么?",
"将此与其他概念联系起来",
"实际应用有哪些?"
]
}
# Response formatting templates
self.response_templates = {
'ar': {
'context_intro': "بناءً على النص المحدد:",
'explanation_intro': "الشرح:",
'examples_intro': "أمثلة:",
'importance_intro': "الأهمية:",
'applications_intro': "التطبيقات:",
'error_message': "عذراً، حدث خطأ في معالجة سؤالك. يرجى المحاولة مرة أخرى."
},
'en': {
'context_intro': "Based on the selected text:",
'explanation_intro': "Explanation:",
'examples_intro': "Examples:",
'importance_intro': "Importance:",
'applications_intro': "Applications:",
'error_message': "Sorry, there was an error processing your question. Please try again."
}
}
def get_question_templates(self, ui_language: str = 'ar') -> List[str]:
"""Get pre-defined question templates for the specified language"""
return self.question_templates.get(ui_language, self.question_templates['ar'])
def create_session_id(self, selected_text: str, segment_id: str) -> str:
"""Create a unique session ID for a text selection"""
content = f"{selected_text}_{segment_id}_{int(time.time())}"
return hashlib.md5(content.encode()).hexdigest()[:12]
def process_question(self,
selected_text: str,
question: str,
segment_info: Dict[str, Any],
ui_language: str = 'ar',
session_id: Optional[str] = None,
preferred_model: str = 'auto') -> Tuple[Optional[str], Optional[str], str]:
"""
Process a user question about selected text
Args:
selected_text: The text the user selected
question: The user's question
segment_info: Information about the broadcast segment
ui_language: UI language ('ar' or 'en')
session_id: Existing session ID or None for new session
Returns:
Tuple of (answer, error_message, session_id)
"""
if not self.translator:
error_msg = self.response_templates[ui_language]['error_message']
return None, error_msg, session_id or ""
try:
# Create or get session
if not session_id:
session_id = self.create_session_id(selected_text, segment_info.get('id', ''))
session = QuestionSession(
session_id=session_id,
selected_text=selected_text,
segment_id=segment_info.get('id', ''),
start_timestamp=segment_info.get('start_ms', 0),
ui_language=ui_language
)
self.conversation_history[session_id] = session
else:
session = self.conversation_history.get(session_id)
if not session:
# Session not found, create new one
session = QuestionSession(
session_id=session_id,
selected_text=selected_text,
segment_id=segment_info.get('id', ''),
start_timestamp=segment_info.get('start_ms', 0),
ui_language=ui_language
)
self.conversation_history[session_id] = session
# Prepare context for AI
context = self._prepare_question_context(selected_text, question, session, ui_language)
# Get AI response with preferred model
start_time = time.time()
ai_response, error, model_used = self.get_ai_response_with_model(context, ui_language, preferred_model)
response_time = int((time.time() - start_time) * 1000)
if ai_response:
# Format response
formatted_response = self.format_ai_response(ai_response, ui_language)
# Save to conversation history with model info
question_type = 'template' if question in self.get_question_templates(ui_language) else 'custom'
qa_pair = QAPair(
question=question,
answer=formatted_response,
timestamp=datetime.now(),
question_type=question_type,
response_time_ms=response_time
)
# Add model info to the QA pair
qa_pair.model_used = model_used
session.conversation.append(qa_pair)
return formatted_response, None, session_id, model_used
else:
error_msg = error or self.response_templates[ui_language]['error_message']
return None, error_msg, session_id, None
except Exception as e:
error_msg = f"{self.response_templates[ui_language]['error_message']} ({str(e)})"
return None, error_msg, session_id or ""
def _prepare_question_context(self,
selected_text: str,
question: str,
session: QuestionSession,
ui_language: str) -> str:
"""Prepare context for AI question processing"""
templates = self.response_templates[ui_language]
# Build context with conversation history
context_parts = []
# Add selected text context
context_parts.append(f"{templates['context_intro']}")
context_parts.append(f'"{selected_text}"')
context_parts.append("")
# Add conversation history if exists
if session.conversation:
context_parts.append("Previous conversation:")
for qa in session.conversation[-3:]: # Last 3 Q&A pairs for context
context_parts.append(f"Q: {qa.question}")
context_parts.append(f"A: {qa.answer[:200]}...") # Truncate long answers
context_parts.append("")
# Add current question
context_parts.append(f"Current question: {question}")
context_parts.append("")
# Add instructions based on language
language_instructions = {
'ar': """
أجب على السؤال بناءً على النص المحدد. اجعل إجابتك:
- واضحة ومفهومة
- مرتبطة بالنص المحدد
- تحتوي على أمثلة عملية إذا كان ذلك مناسباً
- باللغة العربية الفصحى
- منظمة ومنسقة بشكل جيد
إذا كان السؤال يطلب شرحاً، قدم شرحاً مفصلاً.
إذا كان يطلب أمثلة، قدم أمثلة واقعية ومفيدة.
إذا كان يطلب التوضيح، اشرح المفاهيم بطريقة بسيطة.
""",
'en': """
Answer the question based on the selected text. Make your answer:
- Clear and understandable
- Related to the selected text
- Include practical examples when appropriate
- In English
- Well-organized and formatted
If the question asks for explanation, provide detailed explanation.
If it asks for examples, provide real-world, helpful examples.
If it asks for clarification, explain concepts in simple terms.
""",
'fr': """
Répondez à la question basée sur le texte sélectionné. Rendez votre réponse:
- Claire et compréhensible
- Liée au texte sélectionné
- Incluez des exemples pratiques si approprié
- En français
- Bien organisée et formatée
Si la question demande une explication, fournissez une explication détaillée.
Si elle demande des exemples, fournissez des exemples réels et utiles.
Si elle demande des clarifications, expliquez les concepts en termes simples.
""",
'es': """
Responde la pregunta basada en el texto seleccionado. Haz que tu respuesta sea:
- Clara y comprensible
- Relacionada con el texto seleccionado
- Incluye ejemplos prácticos cuando sea apropiado
- En español
- Bien organizada y formateada
Si la pregunta pide explicación, proporciona explicación detallada.
Si pide ejemplos, proporciona ejemplos reales y útiles.
Si pide aclaración, explica conceptos en términos simples.
""",
'de': """
Beantworte die Frage basierend auf dem ausgewählten Text. Mache deine Antwort:
- Klar und verständlich
- Bezogen auf den ausgewählten Text
- Enthalte praktische Beispiele wenn angemessen
- Auf Deutsch
- Gut organisiert und formatiert
Wenn die Frage nach Erklärung fragt, gib detaillierte Erklärung.
Wenn sie nach Beispielen fragt, gib reale und hilfreiche Beispiele.
Wenn sie nach Klarstellung fragt, erkläre Konzepte in einfachen Begriffen.
""",
'zh': """
根据选定的文本回答问题。让你的回答:
- 清晰易懂
- 与选定文本相关
- 适当时包含实际例子
- 用中文
- 组织良好且格式化
如果问题要求解释,提供详细解释。
如果要求例子,提供真实有用的例子。
如果要求澄清,用简单术语解释概念。
"""
}
instructions = language_instructions.get(ui_language, language_instructions['en'])
context_parts.append(instructions)
return "\n".join(context_parts)
def _get_ai_response(self, context: str, ui_language: str) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""Get AI response using multiple AI services with fallback
Returns:
Tuple of (response_text, error_message, model_used)
"""
# Model 1: Try Gemini first
try:
if hasattr(self.translator, 'model') and self.translator.model:
response = self.translator.model.generate_content(context)
if response and hasattr(response, 'text') and response.text:
self.model_usage_stats['Gemini AI'] += 1
return response.text.strip(), None, "Gemini AI"
except Exception as e:
error_msg = str(e)
print(f"Gemini AI failed: {error_msg}") # Debug log
# Continue to next model instead of returning error immediately
# Model 2: Try Groq
try:
if hasattr(self.translator, '_groq_complete'):
response, error = self.translator._groq_complete(context)
if response and response.strip():
self.model_usage_stats['Groq AI'] += 1
return response.strip(), None, "Groq AI"
print(f"Groq failed: {error}") # Debug log
except Exception as e:
print(f"Groq exception: {str(e)}") # Debug log
# Model 3: Try OpenRouter
try:
if hasattr(self.translator, '_openrouter_complete'):
response, error = self.translator._openrouter_complete(context)
if response and response.strip():
self.model_usage_stats['OpenRouter AI'] += 1
return response.strip(), None, "OpenRouter AI"
print(f"OpenRouter failed: {error}") # Debug log
except Exception as e:
print(f"OpenRouter exception: {str(e)}") # Debug log
# Fallback: Simple rule-based response
simple_response, _ = self._generate_simple_response(context, ui_language)
self.model_usage_stats['Simple Response'] += 1
return simple_response, None, "Simple Response (AI services unavailable)"
def _try_fallback_services(self, context: str, ui_language: str) -> Tuple[Optional[str], Optional[str]]:
"""Try fallback AI services when Gemini is unavailable"""
# Try OpenRouter (free models)
if hasattr(self.translator, '_openrouter_complete'):
try:
response, error = self.translator._openrouter_complete(context)
if response:
return response.strip(), None
except Exception:
pass
# Try Groq (free tier)
if hasattr(self.translator, '_groq_complete'):
try:
response, error = self.translator._groq_complete(context)
if response:
return response.strip(), None
except Exception:
pass
# Fallback to simple rule-based responses
return self._generate_simple_response(context, ui_language)
def _generate_simple_response(self, context: str, ui_language: str) -> Tuple[Optional[str], Optional[str]]:
"""Generate simple rule-based response when AI services are unavailable"""
# Extract the question from context
lines = context.split('\n')
question = ""
selected_text = ""
for line in lines:
if line.startswith('Current question:'):
question = line.replace('Current question:', '').strip()
elif line.startswith('"') and line.endswith('"'):
selected_text = line.strip('"')
if not question or not selected_text:
return None, "Could not process question"
# Simple rule-based responses
if ui_language == 'ar':
responses = self._get_arabic_simple_responses(question, selected_text)
else:
responses = self._get_english_simple_responses(question, selected_text)
return responses
def _get_arabic_simple_responses(self, question: str, selected_text: str) -> Tuple[str, None]:
"""Generate simple Arabic responses based on question patterns"""
question_lower = question.lower()
if any(word in question_lower for word in ['اشرح', 'شرح', 'وضح']):
response = f"""بناءً على النص المحدد:
"{selected_text}"
هذا النص يتحدث عن موضوع مهم يحتاج إلى فهم عميق. النقاط الرئيسية تشمل المفاهيم والأفكار المطروحة في النص.
للحصول على شرح أكثر تفصيلاً، يُنصح بمراجعة مصادر إضافية أو طرح أسئلة أكثر تحديداً.
ملاحظة: هذه إجابة مبسطة بسبب عدم توفر خدمة الذكاء الاصطناعي حالياً."""
elif any(word in question_lower for word in ['أمثلة', 'مثال', 'تطبيق']):
response = f"""أمثلة على النص المحدد:
"{selected_text}"
يمكن تطبيق هذا المفهوم في عدة مجالات:
• في الحياة العملية
• في الدراسة والتعلم
• في المشاريع والأعمال
للحصول على أمثلة أكثر تفصيلاً، يُنصح بالبحث في مصادر متخصصة.
ملاحظة: هذه إجابة مبسطة بسبب عدم توفر خدمة الذكاء الاصطناعي حالياً."""
elif any(word in question_lower for word in ['معنى', 'تعريف', 'ما هو']):
response = f"""معنى النص المحدد:
"{selected_text}"
هذا النص يشير إلى مفهوم أو فكرة معينة تحتاج إلى تفسير. المعنى العام يتعلق بالموضوع المطروح في السياق.
للحصول على تعريف أكثر دقة، يُنصح بمراجعة المصادر المتخصصة.
ملاحظة: هذه إجابة مبسطة بسبب عدم توفر خدمة الذكاء الاصطناعي حالياً."""
else:
response = f"""بخصوص سؤالك حول:
"{selected_text}"
هذا موضوع مهم يستحق الدراسة والتفكير. النص المحدد يحتوي على معلومات قيمة يمكن الاستفادة منها.
للحصول على إجابة أكثر تفصيلاً، يُنصح بـ:
• مراجعة مصادر إضافية
• طرح أسئلة أكثر تحديداً
• البحث في المراجع المتخصصة
ملاحظة: هذه إجابة مبسطة بسبب عدم توفر خدمة الذكاء الاصطناعي حالياً."""
return response, None
def _get_english_simple_responses(self, question: str, selected_text: str) -> Tuple[str, None]:
"""Generate simple English responses based on question patterns"""
question_lower = question.lower()
if any(word in question_lower for word in ['explain', 'clarify', 'describe']):
response = f"""Based on the selected text:
"{selected_text}"
This text discusses an important topic that requires deep understanding. The main points include the concepts and ideas presented in the text.
For a more detailed explanation, it's recommended to consult additional sources or ask more specific questions.
Note: This is a simplified response due to AI service being temporarily unavailable."""
elif any(word in question_lower for word in ['example', 'application', 'use']):
response = f"""Examples related to the selected text:
"{selected_text}"
This concept can be applied in several areas:
• In practical life
• In study and learning
• In projects and work
For more detailed examples, it's recommended to search specialized sources.
Note: This is a simplified response due to AI service being temporarily unavailable."""
elif any(word in question_lower for word in ['mean', 'definition', 'what is']):
response = f"""Meaning of the selected text:
"{selected_text}"
This text refers to a specific concept or idea that needs interpretation. The general meaning relates to the topic presented in the context.
For a more precise definition, it's recommended to consult specialized sources.
Note: This is a simplified response due to AI service being temporarily unavailable."""
else:
response = f"""Regarding your question about:
"{selected_text}"
This is an important topic worth studying and thinking about. The selected text contains valuable information that can be beneficial.
For a more detailed answer, it's recommended to:
• Consult additional sources
• Ask more specific questions
• Search specialized references
Note: This is a simplified response due to AI service being temporarily unavailable."""
return response, None
def format_ai_response(self, response: str, ui_language: str) -> str:
"""Format AI response for better display"""
# Clean up response
response = response.strip()
# Remove markdown artifacts
response = response.replace('**', '')
response = response.replace('```', '')
response = response.replace('`', '')
# Remove extra whitespace
lines = [line.strip() for line in response.split('\n')]
response = '\n'.join(line for line in lines if line)
return response
def get_conversation_history(self, session_id: str) -> Optional[QuestionSession]:
"""Get conversation history for a specific session"""
return self.conversation_history.get(session_id)
def clear_conversation(self, session_id: str) -> bool:
"""Clear conversation history for a specific session"""
if session_id in self.conversation_history:
del self.conversation_history[session_id]
return True
return False
def get_all_sessions(self) -> Dict[str, QuestionSession]:
"""Get all active question sessions"""
return self.conversation_history.copy()
def get_model_usage_stats(self) -> Dict[str, int]:
"""Get usage statistics for different AI models"""
return self.model_usage_stats.copy()
def check_model_availability(self) -> Dict[str, Dict[str, Any]]:
"""Check availability status of all AI models"""
models_status = {}
# Check Gemini
try:
if hasattr(self.translator, 'model') and self.translator.model:
# Try a minimal test
test_response = self.translator.model.generate_content("Hi")
models_status['Gemini AI'] = {
'status': 'available',
'icon': '✅',
'message': 'متاح' if st.session_state.get('language', 'ar') == 'ar' else 'Available',
'color': 'green'
}
else:
models_status['Gemini AI'] = {
'status': 'unavailable',
'icon': '❌',
'message': 'غير متاح' if st.session_state.get('language', 'ar') == 'ar' else 'Unavailable',
'color': 'red'
}
except Exception as e:
error_str = str(e)
if "429" in error_str or "quota" in error_str.lower():
models_status['Gemini AI'] = {
'status': 'quota_exceeded',
'icon': '⚠️',
'message': 'انتهت الحصة' if st.session_state.get('language', 'ar') == 'ar' else 'Quota exceeded',
'color': 'orange'
}
else:
models_status['Gemini AI'] = {
'status': 'error',
'icon': '❌',
'message': 'خطأ مؤقت' if st.session_state.get('language', 'ar') == 'ar' else 'Temporary error',
'color': 'red'
}
# Check Groq
try:
if hasattr(self.translator, '_groq_complete') and hasattr(self.translator, 'groq_api_key') and self.translator.groq_api_key:
models_status['Groq AI'] = {
'status': 'available',
'icon': '✅',
'message': 'متاح' if st.session_state.get('language', 'ar') == 'ar' else 'Available',
'color': 'green'
}
else:
models_status['Groq AI'] = {
'status': 'not_configured',
'icon': '⚙️',
'message': 'غير مُعد' if st.session_state.get('language', 'ar') == 'ar' else 'Not configured',
'color': 'gray'
}
except Exception:
models_status['Groq AI'] = {
'status': 'error',
'icon': '❌',
'message': 'خطأ' if st.session_state.get('language', 'ar') == 'ar' else 'Error',
'color': 'red'
}
# Check OpenRouter
try:
if hasattr(self.translator, '_openrouter_complete') and hasattr(self.translator, 'openrouter_api_key') and self.translator.openrouter_api_key:
models_status['OpenRouter AI'] = {
'status': 'available',
'icon': '✅',
'message': 'متاح' if st.session_state.get('language', 'ar') == 'ar' else 'Available',
'color': 'green'
}
else:
models_status['OpenRouter AI'] = {
'status': 'not_configured',
'icon': '⚙️',
'message': 'غير مُعد' if st.session_state.get('language', 'ar') == 'ar' else 'Not configured',
'color': 'gray'
}
except Exception:
models_status['OpenRouter AI'] = {
'status': 'error',
'icon': '❌',
'message': 'خطأ' if st.session_state.get('language', 'ar') == 'ar' else 'Error',
'color': 'red'
}
# Simple Response is always available
models_status['Simple Response'] = {
'status': 'available',
'icon': '🛡️',
'message': 'متاح دائماً' if st.session_state.get('language', 'ar') == 'ar' else 'Always available',
'color': 'blue'
}
return models_status
def get_ai_response_with_model(self, context: str, ui_language: str, preferred_model: str = 'auto') -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""Get AI response using a specific model or auto-selection"""
if preferred_model == 'auto':
return self._get_ai_response(context, ui_language)
# Try specific model first
if preferred_model == 'Gemini AI':
try:
if hasattr(self.translator, 'model') and self.translator.model:
response = self.translator.model.generate_content(context)
if response and hasattr(response, 'text') and response.text:
self.model_usage_stats['Gemini AI'] += 1
return response.text.strip(), None, "Gemini AI"
except Exception as e:
return None, f"Gemini AI error: {str(e)}", None
elif preferred_model == 'Groq AI':
try:
if hasattr(self.translator, '_groq_complete'):
response, error = self.translator._groq_complete(context)
if response and response.strip():
self.model_usage_stats['Groq AI'] += 1
return response.strip(), None, "Groq AI"
else:
return None, f"Groq AI error: {error}", None
except Exception as e:
return None, f"Groq AI error: {str(e)}", None
elif preferred_model == 'OpenRouter AI':
try:
if hasattr(self.translator, '_openrouter_complete'):
response, error = self.translator._openrouter_complete(context)
if response and response.strip():
self.model_usage_stats['OpenRouter AI'] += 1
return response.strip(), None, "OpenRouter AI"
else:
return None, f"OpenRouter AI error: {error}", None
except Exception as e:
return None, f"OpenRouter AI error: {str(e)}", None
elif preferred_model == 'Simple Response':
simple_response, _ = self._generate_simple_response(context, ui_language)
self.model_usage_stats['Simple Response'] += 1
return simple_response, None, "Simple Response"
# If preferred model fails, fall back to auto-selection
return self._get_ai_response(context, ui_language)
def format_conversation_for_export(self, session_id: str, ui_language: str = 'ar') -> Optional[str]:
"""Format conversation for export/copying"""
session = self.conversation_history.get(session_id)
if not session:
return None
export_lines = []
# Header
if ui_language == 'ar':
export_lines.append("محادثة الذكاء الاصطناعي")
export_lines.append(f"النص المحدد: {session.selected_text}")
export_lines.append(f"التاريخ: {session.created_at.strftime('%Y-%m-%d %H:%M:%S')}")
else:
export_lines.append("AI Conversation")
export_lines.append(f"Selected Text: {session.selected_text}")
export_lines.append(f"Date: {session.created_at.strftime('%Y-%m-%d %H:%M:%S')}")
export_lines.append("=" * 50)
export_lines.append("")
# Q&A pairs
for i, qa in enumerate(session.conversation, 1):
if ui_language == 'ar':
export_lines.append(f"السؤال {i}: {qa.question}")
export_lines.append(f"الإجابة {i}: {qa.answer}")
else:
export_lines.append(f"Question {i}: {qa.question}")
export_lines.append(f"Answer {i}: {qa.answer}")
export_lines.append("-" * 30)
export_lines.append("")
return "\n".join(export_lines)
# Global instance
ai_question_engine = None
def get_ai_question_engine():
"""Get or create AI question engine instance"""
global ai_question_engine
if ai_question_engine is None:
from translator import get_translator
translator = get_translator()
ai_question_engine = AIQuestionEngine(translator)
return ai_question_engine