Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,217 +1,273 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import uuid
|
| 3 |
import requests
|
| 4 |
import json
|
| 5 |
import random
|
| 6 |
-
import
|
| 7 |
-
from pydub import AudioSegment
|
| 8 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import speech_recognition as sr
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
try:
|
| 20 |
audio = AudioSegment.from_file(input_path)
|
| 21 |
audio.export(output_path, format="wav")
|
| 22 |
-
log_step(f"Audio successfully converted to {output_path}")
|
| 23 |
return output_path
|
| 24 |
except Exception as e:
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
raise RuntimeError(error_msg)
|
| 28 |
-
|
| 29 |
-
def img_detector(model, url):
|
| 30 |
-
api_keys = [
|
| 31 |
-
os.getenv("OPENROUTER_API_KEY_1"),
|
| 32 |
-
os.getenv("OPENROUTER_API_KEY_2"),
|
| 33 |
-
os.getenv("OPENROUTER_API_KEY_3"),
|
| 34 |
-
os.getenv("OPENROUTER_API_KEY_4"),
|
| 35 |
-
]
|
| 36 |
-
api_keys = [k for k in api_keys if k]
|
| 37 |
-
errors = []
|
| 38 |
-
for api_key in api_keys:
|
| 39 |
-
try:
|
| 40 |
-
response = requests.post(
|
| 41 |
-
url="https://openrouter.ai/api/v1/chat/completions",
|
| 42 |
-
headers={
|
| 43 |
-
"Authorization": f"Bearer {api_key}",
|
| 44 |
-
"Content-Type": "application/json",
|
| 45 |
-
},
|
| 46 |
-
data=json.dumps({
|
| 47 |
-
"model": model,
|
| 48 |
-
"messages": [
|
| 49 |
-
{
|
| 50 |
-
"role": "user",
|
| 51 |
-
"content": [
|
| 52 |
-
{"type": "text", "text": "What is appear in this image? Please provide a detailed description."},
|
| 53 |
-
{"type": "image_url", "image_url": {"url": url}}
|
| 54 |
-
]
|
| 55 |
-
}
|
| 56 |
-
]
|
| 57 |
-
})
|
| 58 |
-
)
|
| 59 |
-
if response.status_code == 200:
|
| 60 |
-
data = response.json()
|
| 61 |
-
if 'choices' in data and len(data['choices']) > 0:
|
| 62 |
-
return data['choices'][0]['message']['content']
|
| 63 |
-
else:
|
| 64 |
-
errors.append(f"API key {api_key[:8]}...: No choices in response.")
|
| 65 |
-
else:
|
| 66 |
-
errors.append(f"API key {api_key[:8]}...: Status {response.status_code}")
|
| 67 |
-
except Exception as e:
|
| 68 |
-
errors.append(f"API key {api_key[:8]}...: Exception {e}")
|
| 69 |
-
return f"All VLM API requests failed: {' | '.join(errors)}"
|
| 70 |
|
| 71 |
def transcribe_audio(audio_path):
|
| 72 |
-
|
| 73 |
-
with sr.AudioFile(audio_path) as source:
|
| 74 |
-
audio = recognizer.record(source)
|
| 75 |
try:
|
|
|
|
|
|
|
| 76 |
text = recognizer.recognize_google(audio, language='en-US')
|
| 77 |
return text
|
| 78 |
-
except
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
return f"Could not request results; {e}"
|
| 82 |
-
|
| 83 |
-
def evaluate_spoken_english(vlm_description, transcript_input, lang="english"):
|
| 84 |
-
"""
|
| 85 |
-
Evaluate spoken English based on a VLM image description and a transcript.
|
| 86 |
-
Returns a dict with feedback and scores.
|
| 87 |
-
"""
|
| 88 |
-
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 89 |
-
if not DEEPSEEK_API_KEY:
|
| 90 |
-
raise EnvironmentError("Missing DEEPSEEK_API_KEY in environment.")
|
| 91 |
-
|
| 92 |
-
prompt = f"""
|
| 93 |
-
You are an expert spoken English tutor evaluating learners' spoken English skills based on CEFR (Common European Framework of Reference for Languages) standards. Your evaluation must consider key criteria such as:
|
| 94 |
-
|
| 95 |
-
- Pronunciation and stress
|
| 96 |
-
- Fluency and rhythm
|
| 97 |
-
- Vocabulary range and appropriateness
|
| 98 |
-
- Coherence and structure
|
| 99 |
-
- Grammar (as inferred from the transcript)
|
| 100 |
-
|
| 101 |
-
Important: The transcript below has **no punctuation**, as it is auto-generated from speech. Focus on what can be evaluated reliably based on the content and structure.
|
| 102 |
-
|
| 103 |
-
The learner was shown the following image (described by a vision-language model):
|
| 104 |
-
---
|
| 105 |
-
{vlm_description}
|
| 106 |
-
---
|
| 107 |
-
|
| 108 |
-
Then the learner described the image by voice. This is the auto-generated transcript:
|
| 109 |
-
---
|
| 110 |
-
{transcript_input}
|
| 111 |
-
---
|
| 112 |
-
|
| 113 |
-
Please evaluate the learner’s spoken English and return a JSON response including:
|
| 114 |
-
1. "relevance_score": Score out of 100 for how relevant the speech was to the image.
|
| 115 |
-
2. "fluency_score": Score out of 100 for fluency and smoothness of speech.
|
| 116 |
-
3. "pronunciation_feedback": Identify any pronunciation issues or common errors.
|
| 117 |
-
4. "mistakes": A list of grammar or vocabulary issues inferred from the transcript.
|
| 118 |
-
5. "corrected_transcript": A corrected version of the transcript with appropriate punctuation and grammar.
|
| 119 |
-
6. "learning_level": Estimated CEFR level (A1–C2).
|
| 120 |
-
7. "tips": Actionable learning advice tailored to the learner’s weaknesses.
|
| 121 |
-
8. "highlight": Something strong or impressive in the learner’s speaking (e.g., good phrasing or word choice).
|
| 122 |
-
9. "motivational_comment": A short, encouraging note to boost the learner’s confidence.
|
| 123 |
-
|
| 124 |
-
Respond in clean JSON format only.
|
| 125 |
-
"""
|
| 126 |
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
response = requests.post(
|
| 129 |
-
|
| 130 |
-
headers={"Authorization": f"Bearer {
|
| 131 |
json={
|
| 132 |
"model": "deepseek-chat",
|
| 133 |
-
"messages": [
|
| 134 |
-
{
|
| 135 |
-
"role": "system",
|
| 136 |
-
"content": "You are a certified spoken English tutor helping learners improve pronunciation, fluency, and confidence. You follow CEFR guidelines and speak warmly to encourage learners."
|
| 137 |
-
},
|
| 138 |
-
{
|
| 139 |
-
"role": "user",
|
| 140 |
-
"content": prompt.strip()
|
| 141 |
-
}
|
| 142 |
-
],
|
| 143 |
"temperature": random.uniform(0.9, 1),
|
| 144 |
-
"
|
| 145 |
}
|
| 146 |
)
|
|
|
|
|
|
|
| 147 |
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
"pronunciation_feedback": "N/A",
|
| 161 |
-
"mistakes": [],
|
| 162 |
-
"corrected_transcript": "N/A",
|
| 163 |
-
"learning_level": "Unknown",
|
| 164 |
-
"highlight": "N/A",
|
| 165 |
-
"motivational_comment": "N/A",
|
| 166 |
-
"tips": [f"Invalid JSON format: {str(decode_err)}", "Raw content:", clean_json_text]
|
| 167 |
-
}
|
| 168 |
-
else:
|
| 169 |
-
return {
|
| 170 |
-
"relevance_score": 0,
|
| 171 |
-
"fluency_score": 0,
|
| 172 |
-
"pronunciation_feedback": "N/A",
|
| 173 |
-
"mistakes": [],
|
| 174 |
-
"corrected_transcript": "N/A",
|
| 175 |
-
"learning_level": "Unknown",
|
| 176 |
-
"highlight": "N/A",
|
| 177 |
-
"motivational_comment": "N/A",
|
| 178 |
-
"tips": [f"API Error: {response.status_code}"]
|
| 179 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
"motivational_comment": "N/A",
|
| 191 |
-
"tips": [f"Exception occurred: {str(e)}"]
|
| 192 |
-
}
|
| 193 |
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
]
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
|
|
|
|
| 216 |
if __name__ == "__main__":
|
| 217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import requests
|
| 2 |
import json
|
| 3 |
import random
|
| 4 |
+
from gradio_client import Client
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
import os
|
| 8 |
+
import uuid
|
| 9 |
+
from pydub import AudioSegment
|
| 10 |
import speech_recognition as sr
|
| 11 |
|
| 12 |
+
# Load environment variables
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
# إعدادات API
|
| 16 |
+
API_URL = "https://api.deepseek.com/v1/chat/completions"
|
| 17 |
+
API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 18 |
+
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 19 |
+
|
| 20 |
+
# إعداد TTS
|
| 21 |
+
TTS_MODEL = os.getenv("TTS_MODEL", "KindSynapse/Youssef-Ahmed-Private-Text-To-Speech-Unlimited")
|
| 22 |
+
TTS_CLIENT = Client(TTS_MODEL, hf_token=HF_TOKEN)
|
| 23 |
+
TTS_PASSWORD = os.getenv("TTS_PASSWORD")
|
| 24 |
+
TTS_VOICE = os.getenv("TTS_VOICE", "coral")
|
| 25 |
+
TTS_SEED = int(os.getenv("TTS_SEED", "12345"))
|
| 26 |
|
| 27 |
+
# إعداد Speech Recognition
|
| 28 |
+
recognizer = sr.Recognizer()
|
| 29 |
+
|
| 30 |
+
# التحقق من وجود المتغيرات المطلوبة
|
| 31 |
+
required_env_vars = {
|
| 32 |
+
"DEEPSEEK_API_KEY": API_KEY,
|
| 33 |
+
"HUGGINGFACE_TOKEN": HF_TOKEN,
|
| 34 |
+
"TTS_PASSWORD": TTS_PASSWORD
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
for var_name, var_value in required_env_vars.items():
|
| 38 |
+
if not var_value:
|
| 39 |
+
raise ValueError(f"Missing required environment variable: {var_name}")
|
| 40 |
+
|
| 41 |
+
def convert_to_wav(input_path):
|
| 42 |
+
"""تحويل الصوت إلى صيغة WAV"""
|
| 43 |
+
output_path = os.path.join("uploads", f"converted_{uuid.uuid4()}.wav")
|
| 44 |
+
os.makedirs("uploads", exist_ok=True)
|
| 45 |
try:
|
| 46 |
audio = AudioSegment.from_file(input_path)
|
| 47 |
audio.export(output_path, format="wav")
|
|
|
|
| 48 |
return output_path
|
| 49 |
except Exception as e:
|
| 50 |
+
print(f"Error converting audio: {e}")
|
| 51 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
def transcribe_audio(audio_path):
|
| 54 |
+
"""تحويل الصوت إلى نص"""
|
|
|
|
|
|
|
| 55 |
try:
|
| 56 |
+
with sr.AudioFile(audio_path) as source:
|
| 57 |
+
audio = recognizer.record(source)
|
| 58 |
text = recognizer.recognize_google(audio, language='en-US')
|
| 59 |
return text
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error in speech recognition: {e}")
|
| 62 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
# البرومبت الرئيسي للشات بوت
|
| 65 |
+
MAIN_SYSTEM_PROMPT = {
|
| 66 |
+
"role": "system",
|
| 67 |
+
"content": (
|
| 68 |
+
"You are Sam, a friendly and encouraging English conversation tutor. "
|
| 69 |
+
"Your responses must be in JSON with these keys: "
|
| 70 |
+
"'response': Your main response to the user, "
|
| 71 |
+
"'corrections': Grammar or pronunciation corrections if needed, "
|
| 72 |
+
"'vocabulary': Suggested alternative words or phrases, "
|
| 73 |
+
"'level': Assessment of user's English level (beginner/intermediate/advanced), "
|
| 74 |
+
"'encouragement': A motivating comment. "
|
| 75 |
+
"\n\nGuidelines:"
|
| 76 |
+
"\n1. Adapt your language to their level"
|
| 77 |
+
"\n2. Keep conversations natural and engaging"
|
| 78 |
+
"\n3. Focus on their interests and context"
|
| 79 |
+
"\n4. Be patient and supportive"
|
| 80 |
+
"\n5. Provide gentle corrections"
|
| 81 |
+
"\n6. Suggest vocabulary improvements naturally"
|
| 82 |
+
"\n7. Keep responses clear and structured"
|
| 83 |
+
)
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
# برومبت خاص بالترحيب (مختصر)
|
| 87 |
+
WELCOME_SYSTEM_PROMPT = {
|
| 88 |
+
"role": "system",
|
| 89 |
+
"content": (
|
| 90 |
+
"You are Sam, a friendly English tutor. Create a short, warm welcome message (2-3 sentences max) that: "
|
| 91 |
+
"1) Introduces yourself briefly "
|
| 92 |
+
"2) Asks for the user's name and what they'd like to practice. "
|
| 93 |
+
"Make it casual and friendly. Return ONLY the greeting in JSON format with a single key 'greeting'."
|
| 94 |
+
"Example: {'greeting': 'Hi! I'm Sam, your English buddy. What's your name and what would you like to practice today? 😊'}"
|
| 95 |
+
)
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
class EnglishTutor:
|
| 99 |
+
def __init__(self):
|
| 100 |
+
self.chat_history = []
|
| 101 |
+
self.user_info = {
|
| 102 |
+
"name": None,
|
| 103 |
+
"level": None,
|
| 104 |
+
"interests": None,
|
| 105 |
+
"goals": None
|
| 106 |
+
}
|
| 107 |
+
# Initialize with welcome message
|
| 108 |
+
self.chat_history = [MAIN_SYSTEM_PROMPT]
|
| 109 |
+
|
| 110 |
+
def get_welcome_message(self):
|
| 111 |
+
"""توليد رسالة ترحيب فريدة"""
|
| 112 |
response = requests.post(
|
| 113 |
+
API_URL,
|
| 114 |
+
headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"},
|
| 115 |
json={
|
| 116 |
"model": "deepseek-chat",
|
| 117 |
+
"messages": [WELCOME_SYSTEM_PROMPT],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
"temperature": random.uniform(0.9, 1),
|
| 119 |
+
"response_format": {"type": "json_object"}
|
| 120 |
}
|
| 121 |
)
|
| 122 |
+
welcome_json = json.loads(response.json()["choices"][0]["message"]["content"])
|
| 123 |
+
return welcome_json["greeting"]
|
| 124 |
|
| 125 |
+
def get_bot_response(self, user_message):
|
| 126 |
+
"""معالجة رسالة المستخدم والحصول على رد"""
|
| 127 |
+
self.chat_history.append({"role": "user", "content": user_message})
|
| 128 |
+
|
| 129 |
+
response = requests.post(
|
| 130 |
+
API_URL,
|
| 131 |
+
headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"},
|
| 132 |
+
json={
|
| 133 |
+
"model": "deepseek-chat",
|
| 134 |
+
"messages": self.chat_history,
|
| 135 |
+
"temperature": random.uniform(0.9, 1.0),
|
| 136 |
+
"response_format": {"type": "json_object"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
}
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
bot_message = response.json()["choices"][0]["message"]["content"]
|
| 141 |
+
bot_json = json.loads(bot_message)
|
| 142 |
+
|
| 143 |
+
# تحديث معلومات المستخدم إذا وجدت
|
| 144 |
+
if "level" in bot_json:
|
| 145 |
+
self.user_info["level"] = bot_json["level"]
|
| 146 |
+
|
| 147 |
+
self.chat_history.append({"role": "assistant", "content": bot_message})
|
| 148 |
+
return bot_json
|
| 149 |
|
| 150 |
+
def text_to_speech(self, text):
|
| 151 |
+
"""تحويل نص إلى صوت مع مراعاة المبتدئين في اللغة الإنجليزية"""
|
| 152 |
+
# تنظيف النص من أي علامات إضافية أو نصوص زائدة
|
| 153 |
+
text = text.strip()
|
| 154 |
+
if text.startswith('"') and text.endswith('"'):
|
| 155 |
+
text = text[1:-1]
|
| 156 |
+
|
| 157 |
+
tts_prompt = text
|
| 158 |
+
tts_emotion = "Warm, encouraging, and clear with a friendly and supportive tone."
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
+
return TTS_CLIENT.predict(
|
| 161 |
+
password=TTS_PASSWORD,
|
| 162 |
+
prompt=tts_prompt,
|
| 163 |
+
voice=TTS_VOICE,
|
| 164 |
+
emotion=tts_emotion,
|
| 165 |
+
use_random_seed=True,
|
| 166 |
+
specific_seed=TTS_SEED,
|
| 167 |
+
api_name="/text_to_speech_app"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Create a single instance of EnglishTutor
|
| 171 |
+
tutor = EnglishTutor()
|
| 172 |
+
|
| 173 |
+
def format_response(response_dict):
|
| 174 |
+
"""Format the response dictionary into a nice HTML string"""
|
| 175 |
+
html = f"<div style='font-size: 16px;'>"
|
| 176 |
+
html += f"<p>{response_dict['response']}</p>"
|
| 177 |
+
|
| 178 |
+
if response_dict['corrections']:
|
| 179 |
+
html += f"<p><b>✍️ Corrections:</b> {response_dict['corrections']}</p>"
|
| 180 |
+
|
| 181 |
+
if response_dict['vocabulary']:
|
| 182 |
+
html += f"<p><b>📚 Vocabulary:</b> {response_dict['vocabulary']}</p>"
|
| 183 |
+
|
| 184 |
+
if response_dict['encouragement']:
|
| 185 |
+
html += f"<p><b>🌟 Encouragement:</b> {response_dict['encouragement']}</p>"
|
| 186 |
+
|
| 187 |
+
html += "</div>"
|
| 188 |
+
return html
|
| 189 |
+
|
| 190 |
+
def chat(audio, history):
|
| 191 |
+
"""Handle chat interactions"""
|
| 192 |
+
if audio is None:
|
| 193 |
+
# Return empty response if no audio
|
| 194 |
+
return history, None
|
| 195 |
+
|
| 196 |
+
# Convert audio to WAV and transcribe
|
| 197 |
+
wav_path = convert_to_wav(audio)
|
| 198 |
+
if wav_path is None:
|
| 199 |
+
return history, None
|
| 200 |
+
|
| 201 |
+
audio_text = transcribe_audio(wav_path)
|
| 202 |
+
# Clean up temporary file
|
| 203 |
+
os.remove(wav_path)
|
| 204 |
+
|
| 205 |
+
if not audio_text:
|
| 206 |
+
return history, None
|
| 207 |
+
|
| 208 |
+
# Get bot response
|
| 209 |
+
response = tutor.get_bot_response(audio_text)
|
| 210 |
+
|
| 211 |
+
# Generate audio for the main response
|
| 212 |
+
audio_path = tutor.text_to_speech(response["response"])[0]
|
| 213 |
+
|
| 214 |
+
# Format the complete response
|
| 215 |
+
formatted_response = format_response(response)
|
| 216 |
+
|
| 217 |
+
# Update history in the correct format for gr.Chatbot
|
| 218 |
+
history = history or []
|
| 219 |
+
history.append((audio_text, formatted_response))
|
| 220 |
+
|
| 221 |
+
return history, audio_path
|
| 222 |
+
|
| 223 |
+
def show_welcome():
|
| 224 |
+
"""Show welcome message on startup"""
|
| 225 |
+
welcome = tutor.get_welcome_message()
|
| 226 |
+
audio_path = tutor.text_to_speech(welcome)[0]
|
| 227 |
+
return [(None, welcome)], audio_path
|
| 228 |
+
|
| 229 |
+
# Create Gradio interface
|
| 230 |
+
with gr.Blocks(css="footer {display: none}") as demo:
|
| 231 |
+
gr.Markdown("# 🤖 Sam - Your English Tutor")
|
| 232 |
+
gr.Markdown("Welcome to your personalized English learning session! Click the microphone and start speaking!")
|
| 233 |
+
|
| 234 |
+
chatbot = gr.Chatbot(
|
| 235 |
+
show_label=False,
|
| 236 |
+
height=400,
|
| 237 |
+
type="messages"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
with gr.Row():
|
| 241 |
+
audio_input = gr.Audio(
|
| 242 |
+
source="microphone",
|
| 243 |
+
type="filepath",
|
| 244 |
+
label="Speak here",
|
| 245 |
+
show_label=True
|
| 246 |
+
)
|
| 247 |
+
audio_output = gr.Audio(
|
| 248 |
+
label="Sam's Voice",
|
| 249 |
+
show_label=True,
|
| 250 |
+
type="filepath"
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Handle audio input
|
| 254 |
+
audio_input.stop_recording(
|
| 255 |
+
fn=chat,
|
| 256 |
+
inputs=[audio_input, chatbot],
|
| 257 |
+
outputs=[chatbot, audio_output],
|
| 258 |
+
queue=False
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
# Show welcome message on page load
|
| 262 |
+
demo.load_event(
|
| 263 |
+
fn=show_welcome,
|
| 264 |
+
inputs=None,
|
| 265 |
+
outputs=[chatbot, audio_output]
|
| 266 |
+
)
|
| 267 |
|
| 268 |
+
# Launch the interface
|
| 269 |
if __name__ == "__main__":
|
| 270 |
+
demo.launch(
|
| 271 |
+
server_name="0.0.0.0",
|
| 272 |
+
server_port=7860
|
| 273 |
+
)
|