Gyan.AI / src /components /ai_tutor.py
cryogenic22's picture
Update src/components/ai_tutor.py
512f375 verified
import streamlit as st
from src.services.ai_service import AITutorService
from src.utils.session import get_tutor_context
class AITutor:
def __init__(self):
self.service = AITutorService()
self.last_question = None # Store the last question asked
def display_chat_interface(self):
"""Display the enhanced chat interface with voice output"""
st.header("AI Tutor")
# Voice controls
col1, col2 = st.columns([3, 2])
with col1:
# Initialize voice_active in session_state if it doesn't exist
if "voice_active" not in st.session_state:
st.session_state.voice_active = False
voice_active = st.checkbox("Enable Voice", key="voice_active") # Corrected line
if voice_active:
st.markdown("""
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M15 8V16M11 8V16M7 12H5C3.89543 12 3 12.8954 3 14V16C3 17.1046 3.89543 18 5 18H7V12ZM19 12V16C19 17.1046 18.1046 18 17 18H15" stroke="#4A90E2" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
Voice On
""", unsafe_allow_html=True)
else:
st.markdown("""
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M15 8V16M11 8V16M7 12H5C3.89543 12 3 12.8954 3 14V16C3 17.1046 3.89543 18 5 18H7V12ZM19 12V16C19 17.1046 18.1046 18 17 18H15" stroke="#B0B0B0" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
Voice Off
""", unsafe_allow_html=True)
with col2:
if self.service.tts_mode:
st.info(f"Using {self.service.tts_mode.upper()} TTS")
# Topic selection
topics = [None, 'Physics', 'Mathematics', 'Computer Science', 'Artificial Intelligence']
selected_topic = st.selectbox(
"Select Topic",
topics,
format_func=lambda x: 'All Topics' if x is None else x,
key="topic_selector"
)
context = get_tutor_context()
if selected_topic != context['current_topic']:
context['current_topic'] = selected_topic
# Display chat container using st.empty
chat_container = st.empty()
with chat_container.container(): # Use nested container
# Display chat history with voice output
for message in context['chat_history']:
with st.chat_message(message["role"]):
st.write(message["content"])
if message["role"] == "assistant" and st.session_state.voice_active: # Use session state
self.service.speak(message["content"])
# Chat input
prompt = st.text_input("Ask your question...", key="chat_input")
if prompt and prompt != self.last_question: # Check if the question is new
self.handle_user_input(prompt, chat_container) # Pass the container
self.last_question = prompt # Update the last question asked
def handle_user_input(self, user_input: str, chat_container): # Accept the container
"""Process user input and generate response"""
context = get_tutor_context()
# Add user message
context['chat_history'].append({
"role": "user",
"content": user_input
})
# Update the chat container immediately with the user message
with chat_container.container(): # Use nested container
for message in context['chat_history']:
with st.chat_message(message["role"]):
st.write(message["content"])
# Generate and display AI response
response = self.service.generate_response(user_input, context['current_topic'])
# Add AI response
context['chat_history'].append({
"role": "assistant",
"content": response,
"speak": True
})
# Update the chat container again with the AI's response
with chat_container.container(): # Use nested container
for message in context['chat_history']:
with st.chat_message(message["role"]):
st.write(message["content"])