Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,16 +34,17 @@ def get_feedback(session_data):
|
|
| 34 |
"""
|
| 35 |
Generate feedback for the session data.
|
| 36 |
"""
|
| 37 |
-
input_text = "Session Feedback
|
| 38 |
for qa in session_data:
|
| 39 |
input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
|
| 40 |
input_text += f"\nScore:"
|
| 41 |
|
| 42 |
inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
|
| 43 |
outputs = model.generate(inputs, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
|
| 44 |
-
|
| 45 |
return feedback
|
| 46 |
|
|
|
|
| 47 |
# Gradio Interface Functions
|
| 48 |
def question_interface(response, history):
|
| 49 |
"""
|
|
|
|
| 34 |
"""
|
| 35 |
Generate feedback for the session data.
|
| 36 |
"""
|
| 37 |
+
input_text = "Session Feedback:\n"
|
| 38 |
for qa in session_data:
|
| 39 |
input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
|
| 40 |
input_text += f"\nScore:"
|
| 41 |
|
| 42 |
inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
|
| 43 |
outputs = model.generate(inputs, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
|
| 44 |
+
feedback = tokenizer.decode(outputs[0], skip_special_tokens=True) # Decode and store the output
|
| 45 |
return feedback
|
| 46 |
|
| 47 |
+
|
| 48 |
# Gradio Interface Functions
|
| 49 |
def question_interface(response, history):
|
| 50 |
"""
|