Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,14 +1,3 @@
|
|
| 1 |
-
import subprocess
|
| 2 |
-
|
| 3 |
-
# Install flash attention
|
| 4 |
-
subprocess.run(
|
| 5 |
-
"pip install flash-attn --no-build-isolation",
|
| 6 |
-
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
| 7 |
-
shell=True,
|
| 8 |
-
check=True # This will raise an exception if the command fails
|
| 9 |
-
)
|
| 10 |
-
|
| 11 |
-
# Rest of your app.py code
|
| 12 |
import gradio as gr
|
| 13 |
import asyncio
|
| 14 |
import os
|
|
@@ -16,7 +5,6 @@ import thinkingframes
|
|
| 16 |
import soundfile as sf
|
| 17 |
import numpy as np
|
| 18 |
import logging
|
| 19 |
-
from transformers import pipeline
|
| 20 |
from dotenv import load_dotenv
|
| 21 |
from policy import user_acceptance_policy
|
| 22 |
from styles import theme
|
|
@@ -39,21 +27,21 @@ engines = {default_lang: Model(default_lang)}
|
|
| 39 |
# For maintaining user session (to keep track of userID)
|
| 40 |
user_state = gr.State(value="")
|
| 41 |
|
| 42 |
-
@spaces.GPU(duration=120)
|
| 43 |
-
def transcribe(audio):
|
| 44 |
-
lang = "en"
|
| 45 |
-
model = engines[lang]
|
| 46 |
-
text = model.stt_file(audio)[0]
|
| 47 |
-
return text
|
| 48 |
-
|
| 49 |
# Load the Meta-Llama-3-8B model from Hugging Face
|
| 50 |
-
llm =
|
| 51 |
|
| 52 |
image_path = "picturePerformance.jpg"
|
| 53 |
img_html = get_image_html(image_path)
|
| 54 |
|
| 55 |
executor = ThreadPoolExecutor()
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
@spaces.GPU(duration=120)
|
| 58 |
def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
|
| 59 |
current_question_index = questions.index(question_choice)
|
|
@@ -67,7 +55,8 @@ def generate_feedback(user_id, question_choice, strategy_choice, message, feedba
|
|
| 67 |
"content": message
|
| 68 |
}]
|
| 69 |
|
| 70 |
-
|
|
|
|
| 71 |
|
| 72 |
questionNo = current_question_index + 1
|
| 73 |
add_submission(user_id, message, feedback, int(0), "", questionNo)
|
|
@@ -86,13 +75,13 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
|
|
| 86 |
current_audio_output = None
|
| 87 |
|
| 88 |
if audio is None:
|
| 89 |
-
yield [("Oral Coach
|
| 90 |
return
|
| 91 |
|
| 92 |
sample_rate, audio_data = audio
|
| 93 |
|
| 94 |
if audio_data is None or len(audio_data) == 0:
|
| 95 |
-
yield [("Oral Coach
|
| 96 |
return
|
| 97 |
|
| 98 |
audio_path = "audio.wav"
|
|
@@ -100,7 +89,7 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
|
|
| 100 |
raise ValueError("audio_data must be a numpy array")
|
| 101 |
sf.write(audio_path, audio_data, sample_rate)
|
| 102 |
|
| 103 |
-
chat_history = [("Oral Coach
|
| 104 |
yield chat_history, current_audio_output
|
| 105 |
|
| 106 |
try:
|
|
@@ -108,19 +97,19 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
|
|
| 108 |
student_response = await asyncio.wrap_future(transcription_future)
|
| 109 |
|
| 110 |
if not student_response.strip():
|
| 111 |
-
yield [("Oral Coach
|
| 112 |
return
|
| 113 |
|
| 114 |
chat_history.append(("Student", student_response))
|
| 115 |
yield chat_history, current_audio_output
|
| 116 |
|
| 117 |
-
chat_history.append(("Oral Coach
|
| 118 |
yield chat_history, current_audio_output
|
| 119 |
|
| 120 |
feedback_future = executor.submit(generate_feedback, int(user_state.value), question_choice, strategy_choice, student_response, feedback_level)
|
| 121 |
feedback = await asyncio.wrap_future(feedback_future)
|
| 122 |
|
| 123 |
-
chat_history.append(("Oral Coach
|
| 124 |
yield chat_history, current_audio_output
|
| 125 |
|
| 126 |
audio_future = executor.submit(generate_audio_feedback, feedback)
|
|
@@ -131,7 +120,7 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
|
|
| 131 |
|
| 132 |
except Exception as e:
|
| 133 |
logging.error(f"An error occurred: {str(e)}", exc_info=True)
|
| 134 |
-
yield [("Oral Coach
|
| 135 |
|
| 136 |
# Modify the toggle_oral_coach_visibility function to call add_user_privacy and store the returned user_id in user_state.value
|
| 137 |
def toggle_oral_coach_visibility(class_name, index_no, policy_checked):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import asyncio
|
| 3 |
import os
|
|
|
|
| 5 |
import soundfile as sf
|
| 6 |
import numpy as np
|
| 7 |
import logging
|
|
|
|
| 8 |
from dotenv import load_dotenv
|
| 9 |
from policy import user_acceptance_policy
|
| 10 |
from styles import theme
|
|
|
|
| 27 |
# For maintaining user session (to keep track of userID)
|
| 28 |
user_state = gr.State(value="")
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
# Load the Meta-Llama-3-8B model from Hugging Face
|
| 31 |
+
llm = gr.load("meta-llama/Meta-Llama-3-8B", src="models")
|
| 32 |
|
| 33 |
image_path = "picturePerformance.jpg"
|
| 34 |
img_html = get_image_html(image_path)
|
| 35 |
|
| 36 |
executor = ThreadPoolExecutor()
|
| 37 |
|
| 38 |
+
@spaces.GPU(duration=120)
|
| 39 |
+
def transcribe(audio):
|
| 40 |
+
lang = "en"
|
| 41 |
+
model = engines[lang]
|
| 42 |
+
text = model.stt_file(audio)[0]
|
| 43 |
+
return text
|
| 44 |
+
|
| 45 |
@spaces.GPU(duration=120)
|
| 46 |
def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
|
| 47 |
current_question_index = questions.index(question_choice)
|
|
|
|
| 55 |
"content": message
|
| 56 |
}]
|
| 57 |
|
| 58 |
+
# Use the loaded model for generating feedback
|
| 59 |
+
feedback = llm(conversation)[0]["generated_text"]
|
| 60 |
|
| 61 |
questionNo = current_question_index + 1
|
| 62 |
add_submission(user_id, message, feedback, int(0), "", questionNo)
|
|
|
|
| 75 |
current_audio_output = None
|
| 76 |
|
| 77 |
if audio is None:
|
| 78 |
+
yield [("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "No audio data received. Please try again.")], current_audio_output
|
| 79 |
return
|
| 80 |
|
| 81 |
sample_rate, audio_data = audio
|
| 82 |
|
| 83 |
if audio_data is None or len(audio_data) == 0:
|
| 84 |
+
yield [("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "No audio data received. Please try again.")], current_audio_output
|
| 85 |
return
|
| 86 |
|
| 87 |
audio_path = "audio.wav"
|
|
|
|
| 89 |
raise ValueError("audio_data must be a numpy array")
|
| 90 |
sf.write(audio_path, audio_data, sample_rate)
|
| 91 |
|
| 92 |
+
chat_history = [("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcribing your audio, please listen to your oral response while waiting ...")]
|
| 93 |
yield chat_history, current_audio_output
|
| 94 |
|
| 95 |
try:
|
|
|
|
| 97 |
student_response = await asyncio.wrap_future(transcription_future)
|
| 98 |
|
| 99 |
if not student_response.strip():
|
| 100 |
+
yield [("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription failed. Please try again or seek assistance.")], current_audio_output
|
| 101 |
return
|
| 102 |
|
| 103 |
chat_history.append(("Student", student_response))
|
| 104 |
yield chat_history, current_audio_output
|
| 105 |
|
| 106 |
+
chat_history.append(("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
|
| 107 |
yield chat_history, current_audio_output
|
| 108 |
|
| 109 |
feedback_future = executor.submit(generate_feedback, int(user_state.value), question_choice, strategy_choice, student_response, feedback_level)
|
| 110 |
feedback = await asyncio.wrap_future(feedback_future)
|
| 111 |
|
| 112 |
+
chat_history.append(("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", feedback))
|
| 113 |
yield chat_history, current_audio_output
|
| 114 |
|
| 115 |
audio_future = executor.submit(generate_audio_feedback, feedback)
|
|
|
|
| 120 |
|
| 121 |
except Exception as e:
|
| 122 |
logging.error(f"An error occurred: {str(e)}", exc_info=True)
|
| 123 |
+
yield [("Oral Coach ⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "An error occurred. Please try again or seek assistance.")], current_audio_output
|
| 124 |
|
| 125 |
# Modify the toggle_oral_coach_visibility function to call add_user_privacy and store the returned user_id in user_state.value
|
| 126 |
def toggle_oral_coach_visibility(class_name, index_no, policy_checked):
|