cryogenic22 commited on
Commit
ce2143e
·
verified ·
1 Parent(s): 0d44d75

Create components/ai_tutor.py

Browse files
Files changed (1) hide show
  1. src/components/ai_tutor.py +161 -0
src/components/ai_tutor.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.services.ai_service import AITutorService
3
+ from src.utils.session import get_tutor_context
4
+ from datetime import datetime
5
+ import speech_recognition as sr
6
+ import pyttsx3
7
+ import threading
8
+ import queue
9
+ import time
10
+
11
+ class AITutor:
12
+ def __init__(self):
13
+ self.service = AITutorService()
14
+ self.initialize_speech_components()
15
+
16
+ def initialize_speech_components(self):
17
+ """Initialize text-to-speech and speech recognition"""
18
+ # Initialize text-to-speech engine
19
+ if 'tts_engine' not in st.session_state:
20
+ st.session_state.tts_engine = pyttsx3.init()
21
+ # Configure voice properties
22
+ st.session_state.tts_engine.setProperty('rate', 150)
23
+ st.session_state.tts_engine.setProperty('volume', 0.9)
24
+
25
+ # Get available voices and set a female voice if available
26
+ voices = st.session_state.tts_engine.getProperty('voices')
27
+ female_voice = next((voice for voice in voices if 'female' in voice.name.lower()), voices[0])
28
+ st.session_state.tts_engine.setProperty('voice', female_voice.id)
29
+
30
+ # Initialize speech recognition
31
+ if 'speech_recognizer' not in st.session_state:
32
+ st.session_state.speech_recognizer = sr.Recognizer()
33
+ st.session_state.speech_recognizer.energy_threshold = 4000
34
+ st.session_state.audio_queue = queue.Queue()
35
+
36
+ def speak(self, text: str):
37
+ """Make the AI tutor speak the given text"""
38
+ def speak_text():
39
+ st.session_state.tts_engine.say(text)
40
+ st.session_state.tts_engine.runAndWait()
41
+
42
+ # Run speech in a separate thread to avoid blocking
43
+ thread = threading.Thread(target=speak_text)
44
+ thread.start()
45
+
46
+ def listen(self):
47
+ """Listen for user speech input"""
48
+ try:
49
+ with sr.Microphone() as source:
50
+ st.write("🎤 Listening...")
51
+ audio = st.session_state.speech_recognizer.listen(source, timeout=5)
52
+ text = st.session_state.speech_recognizer.recognize_google(audio)
53
+ return text
54
+ except sr.WaitTimeoutError:
55
+ st.warning("No speech detected. Please try again.")
56
+ except sr.UnknownValueError:
57
+ st.warning("Could not understand audio. Please try again.")
58
+ except sr.RequestError:
59
+ st.error("Could not access speech recognition service. Please try typing instead.")
60
+ return None
61
+
62
+ def display_chat_interface(self):
63
+ """Display the enhanced chat interface with avatar and speech"""
64
+ st.header("AI Tutor")
65
+
66
+ # Voice interaction controls
67
+ col1, col2 = st.columns(2)
68
+ with col1:
69
+ voice_enabled = st.toggle("Enable Voice", value=False, key="voice_enabled")
70
+ with col2:
71
+ if voice_enabled:
72
+ if st.button("🎤 Start Speaking"):
73
+ user_input = self.listen()
74
+ if user_input:
75
+ self.handle_user_input(user_input)
76
+
77
+ # Display avatar
78
+ self.service.display_avatar(state='neutral')
79
+
80
+ # Topic selection
81
+ topics = [None, 'Physics', 'Mathematics', 'Computer Science', 'Artificial Intelligence']
82
+ selected_topic = st.selectbox(
83
+ "Select Topic",
84
+ topics,
85
+ format_func=lambda x: 'All Topics' if x is None else x,
86
+ key="topic_selector"
87
+ )
88
+
89
+ context = get_tutor_context()
90
+ if selected_topic != context['current_topic']:
91
+ context['current_topic'] = selected_topic
92
+
93
+ # Display chat container
94
+ chat_container = st.container()
95
+ with chat_container:
96
+ # Display chat history with avatar states
97
+ for message in context['chat_history']:
98
+ with st.chat_message(message["role"]):
99
+ st.write(message["content"])
100
+ if message["role"] == "assistant":
101
+ self.service.display_avatar(state='happy')
102
+ if voice_enabled and message.get('speak', True):
103
+ self.speak(message["content"])
104
+ message['speak'] = False # Prevent speaking the same message again
105
+
106
+ # Chat input
107
+ if prompt := st.chat_input("Ask your question or click the microphone to speak"):
108
+ self.handle_user_input(prompt)
109
+
110
+ def handle_user_input(self, user_input: str):
111
+ """Process user input and generate response"""
112
+ # Show thinking avatar
113
+ self.service.display_avatar(state='thinking')
114
+
115
+ # Add user message
116
+ context = get_tutor_context()
117
+ context['chat_history'].append({
118
+ "role": "user",
119
+ "content": user_input
120
+ })
121
+
122
+ # Generate and display AI response
123
+ response = self.service.generate_response(user_input)
124
+
125
+ # Add AI response
126
+ context['chat_history'].append({
127
+ "role": "assistant",
128
+ "content": response,
129
+ "speak": True # Mark for speaking
130
+ })
131
+
132
+ # Show happy avatar and rerun
133
+ self.service.display_avatar(state='happy')
134
+ st.rerun()
135
+
136
+ def display_learning_metrics(self):
137
+ """Display learning progress and engagement metrics"""
138
+ with st.sidebar:
139
+ st.subheader("Learning Metrics")
140
+
141
+ context = get_tutor_context()
142
+ # Engagement score
143
+ metrics = context['engagement_metrics']
144
+ if metrics:
145
+ avg_sentiment = sum(m['sentiment_score'] for m in metrics) / len(metrics)
146
+ st.metric(
147
+ "Engagement Score",
148
+ f"{avg_sentiment:.2f}",
149
+ delta="0.1" if avg_sentiment > 0.5 else "-0.1"
150
+ )
151
+
152
+ # Interaction stats
153
+ if context['chat_history']:
154
+ st.metric(
155
+ "Questions Asked",
156
+ len([m for m in context['chat_history'] if m['role'] == 'user'])
157
+ )
158
+
159
+ # Topic focus
160
+ if context['current_topic']:
161
+ st.info(f"Current focus: {context['current_topic']}")