Spaces:
Sleeping
Sleeping
diana3135
commited on
Commit
·
aee476b
1
Parent(s):
663ed9b
evaluation prompt
Browse files
utils.py
CHANGED
|
@@ -69,15 +69,36 @@ def modify_with_suggestion(task_description, text, suggestions, api_key = None):
|
|
| 69 |
f"Modify the answer based on the following suggestions: {suggestions}"
|
| 70 |
return generate_text_with_gpt(prompt, api_key)
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 73 |
prompt = (
|
| 74 |
f"Given the task: {task_description}, the provided answer is: {text}\n"
|
| 75 |
-
f"
|
| 76 |
-
f"0-4
|
| 77 |
-
f"5
|
| 78 |
-
f"
|
| 79 |
-
f"When evaluating,
|
| 80 |
-
f"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
f"Please format the output exactly as follows:\n"
|
| 82 |
f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 83 |
f"Implementability: [Score]\nComment: [Short comment on Feasibility]\n"
|
|
|
|
| 69 |
f"Modify the answer based on the following suggestions: {suggestions}"
|
| 70 |
return generate_text_with_gpt(prompt, api_key)
|
| 71 |
|
| 72 |
+
# def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 73 |
+
# prompt = (
|
| 74 |
+
# f"Given the task: {task_description}, the provided answer is: {text}\n"
|
| 75 |
+
# f"Please evaluate the answer based on the following criteria, using a scale from 0 to 10, where:\n"
|
| 76 |
+
# f"0-4 reflects below-average quality, with minimal value or relevance.\n"
|
| 77 |
+
# f"5 represents acceptable quality, including general, acceptable suggestions but lacks specificity or detailed, actionable advice.\n"
|
| 78 |
+
# f"6-10 signifies good to very good quality, showing substantial thought, accuracy, and some level of specificity.\n"
|
| 79 |
+
# f"When evaluating, prioritize responses with specific, actionable suggestions over general, abstract ideas.\n"
|
| 80 |
+
# f"Provide both a score and a brief comment (1 sentence) for each criterion.\n"
|
| 81 |
+
# f"Please format the output exactly as follows:\n"
|
| 82 |
+
# f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 83 |
+
# f"Implementability: [Score]\nComment: [Short comment on Feasibility]\n"
|
| 84 |
+
# f"Defensibility: [Score]\nComment: [Short comment on Defensibility]\n"
|
| 85 |
+
# f"Overall Score: [Score]\nOverall Comment: [Overall feedback on the answer]\n"
|
| 86 |
+
# )
|
| 87 |
+
# return generate_text_with_gpt(prompt, api_key)
|
| 88 |
+
|
| 89 |
def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 90 |
prompt = (
|
| 91 |
f"Given the task: {task_description}, the provided answer is: {text}\n"
|
| 92 |
+
f"Evaluate the answer based on these revised criteria, using a scale from 0 to 10. Scores should reflect stricter quality thresholds:\n"
|
| 93 |
+
f"0-4: Below average. The answer includes some relevant ideas but has no valuable contribution.\n"
|
| 94 |
+
f"5-7: Fair to average quality. The answer is relevant, with some specific insights but minor weaknesses.\n"
|
| 95 |
+
f"8-10: Good to excellent quality. The answer is insightful, valuable, and actionable.\n"
|
| 96 |
+
f"When evaluating, use the entire scoring range and avoid defaulting to mid-range scores unless clearly warranted.\n\n"
|
| 97 |
+
f"Evaluate based on the criteria:\n\n"
|
| 98 |
+
f"Novelty: The uniqueness and innovation of the ideas."
|
| 99 |
+
f"Implementability: The practicality of suggested actions.\n"
|
| 100 |
+
f"Defensibility: Sustainable competitive advantage and resilience against competitors.\n"
|
| 101 |
+
|
| 102 |
f"Please format the output exactly as follows:\n"
|
| 103 |
f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 104 |
f"Implementability: [Score]\nComment: [Short comment on Feasibility]\n"
|