Spaces:
Sleeping
Sleeping
Update model_evaluation.py
Browse files- model_evaluation.py +12 -3
model_evaluation.py
CHANGED
|
@@ -81,22 +81,31 @@ def generate_response(question):
|
|
| 81 |
def generate_response_with_context(user_query, relevant_segment):
|
| 82 |
"""
|
| 83 |
Generate a response based on a user query and a relevant segment.
|
| 84 |
-
|
| 85 |
Parameters:
|
| 86 |
- user_query (str): The user's query.
|
| 87 |
- relevant_segment (str): A relevant fact or detail.
|
| 88 |
-
|
| 89 |
Returns:
|
| 90 |
- str: Formatted response incorporating the relevant segment.
|
| 91 |
"""
|
| 92 |
try:
|
|
|
|
| 93 |
prompt = f"Thank you for your question! Here is an additional fact about your topic: {relevant_segment}"
|
|
|
|
|
|
|
| 94 |
max_tokens = len(tokenizer(prompt)['input_ids']) + 50
|
|
|
|
|
|
|
| 95 |
response = gpt_model(prompt, max_length=max_tokens, temperature=0.25)[0]['generated_text']
|
|
|
|
|
|
|
| 96 |
return clean_up_response(response, relevant_segment)
|
|
|
|
| 97 |
except Exception as e:
|
| 98 |
print(f"Error generating response: {e}")
|
| 99 |
-
return ""
|
|
|
|
| 100 |
|
| 101 |
def clean_up_response(response, segment):
|
| 102 |
"""
|
|
|
|
| 81 |
def generate_response_with_context(user_query, relevant_segment):
|
| 82 |
"""
|
| 83 |
Generate a response based on a user query and a relevant segment.
|
| 84 |
+
|
| 85 |
Parameters:
|
| 86 |
- user_query (str): The user's query.
|
| 87 |
- relevant_segment (str): A relevant fact or detail.
|
| 88 |
+
|
| 89 |
Returns:
|
| 90 |
- str: Formatted response incorporating the relevant segment.
|
| 91 |
"""
|
| 92 |
try:
|
| 93 |
+
# Prepare the prompt incorporating the relevant segment
|
| 94 |
prompt = f"Thank you for your question! Here is an additional fact about your topic: {relevant_segment}"
|
| 95 |
+
|
| 96 |
+
# Calculate the maximum tokens allowed for the response
|
| 97 |
max_tokens = len(tokenizer(prompt)['input_ids']) + 50
|
| 98 |
+
|
| 99 |
+
# Generate the response using the model
|
| 100 |
response = gpt_model(prompt, max_length=max_tokens, temperature=0.25)[0]['generated_text']
|
| 101 |
+
|
| 102 |
+
# Clean up the response for better formatting and clarity
|
| 103 |
return clean_up_response(response, relevant_segment)
|
| 104 |
+
|
| 105 |
except Exception as e:
|
| 106 |
print(f"Error generating response: {e}")
|
| 107 |
+
return "I'm sorry, but there was an error generating your response. Please try again."
|
| 108 |
+
|
| 109 |
|
| 110 |
def clean_up_response(response, segment):
|
| 111 |
"""
|