Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,7 +47,6 @@ def transcribe(audio_path):
|
|
| 47 |
text = model.stt_file(audio_file)[0]
|
| 48 |
return text
|
| 49 |
|
| 50 |
-
# Inference function using Hugging Face InferenceClient
|
| 51 |
# Inference function using Hugging Face InferenceClient
|
| 52 |
@spaces.GPU(duration=120)
|
| 53 |
def model(text):
|
|
@@ -63,7 +62,7 @@ def model(text):
|
|
| 63 |
do_sample=True,
|
| 64 |
seed=42,
|
| 65 |
)
|
| 66 |
-
formatted_prompt = system_instructions + text + "[
|
| 67 |
stream = client.text_generation(
|
| 68 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 69 |
output = ""
|
|
@@ -80,7 +79,7 @@ async def generate_audio_feedback(feedback_text):
|
|
| 80 |
await communicate.save(tmp_path)
|
| 81 |
return tmp_path
|
| 82 |
|
| 83 |
-
#
|
| 84 |
async def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
|
| 85 |
current_question_index = thinkingframes.questions.index(question_choice)
|
| 86 |
strategy, explanation = thinkingframes.strategy_options[strategy_choice]
|
|
@@ -174,15 +173,6 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
|
|
| 174 |
chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
|
| 175 |
yield chat_history, current_audio_output
|
| 176 |
|
| 177 |
-
moderation_response = client.moderations.create(input=student_response)
|
| 178 |
-
flagged = any(result.flagged for result in moderation_response.results)
|
| 179 |
-
if flagged:
|
| 180 |
-
moderated_message = "The message has been flagged. Please see your teacher to clarify."
|
| 181 |
-
questionNo = thinkingframes.questions.index(question_choice) + 1
|
| 182 |
-
add_submission(int(user_state.value), moderated_message, "", int(0), "", questionNo)
|
| 183 |
-
yield chat_history, current_audio_output
|
| 184 |
-
return
|
| 185 |
-
|
| 186 |
accumulated_feedback = "" # Variable to store the accumulated feedback
|
| 187 |
|
| 188 |
async for feedback_chunk in generate_feedback(int(user_state.value), question_choice, strategy_choice, student_response, feedback_level):
|
|
@@ -268,4 +258,4 @@ with gr.Blocks(title="Oral Coach powered by ZeroGPU⚡ϞϞ(๑⚈ ․̫ ⚈๑)
|
|
| 268 |
create_teachers_dashboard_tab()
|
| 269 |
|
| 270 |
demo.queue(max_size=20)
|
| 271 |
-
demo.launch()
|
|
|
|
| 47 |
text = model.stt_file(audio_file)[0]
|
| 48 |
return text
|
| 49 |
|
|
|
|
| 50 |
# Inference function using Hugging Face InferenceClient
|
| 51 |
@spaces.GPU(duration=120)
|
| 52 |
def model(text):
|
|
|
|
| 62 |
do_sample=True,
|
| 63 |
seed=42,
|
| 64 |
)
|
| 65 |
+
formatted_prompt = system_instructions + text + "[OralCoach]"
|
| 66 |
stream = client.text_generation(
|
| 67 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 68 |
output = ""
|
|
|
|
| 79 |
await communicate.save(tmp_path)
|
| 80 |
return tmp_path
|
| 81 |
|
| 82 |
+
# Generating feedback for the Oral Coach
|
| 83 |
async def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
|
| 84 |
current_question_index = thinkingframes.questions.index(question_choice)
|
| 85 |
strategy, explanation = thinkingframes.strategy_options[strategy_choice]
|
|
|
|
| 173 |
chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
|
| 174 |
yield chat_history, current_audio_output
|
| 175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
accumulated_feedback = "" # Variable to store the accumulated feedback
|
| 177 |
|
| 178 |
async for feedback_chunk in generate_feedback(int(user_state.value), question_choice, strategy_choice, student_response, feedback_level):
|
|
|
|
| 258 |
create_teachers_dashboard_tab()
|
| 259 |
|
| 260 |
demo.queue(max_size=20)
|
| 261 |
+
demo.launch(share=False)
|