Spaces:
Running
Running
diana3135
commited on
Commit
·
9ccc332
1
Parent(s):
80c4a05
update evaluation prompt
Browse files
utils.py
CHANGED
|
@@ -69,23 +69,6 @@ def modify_with_suggestion(task_description, text, suggestions, api_key = None):
|
|
| 69 |
f"Modify the answer based on the following suggestions: {suggestions}"
|
| 70 |
return generate_text_with_gpt(prompt, api_key)
|
| 71 |
|
| 72 |
-
# def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 73 |
-
# prompt = (
|
| 74 |
-
# f"Given the task: {task_description}, the provided answer is: {text}\n"
|
| 75 |
-
# f"Please evaluate the answer based on the following criteria, using a scale from 0 to 10, where:\n"
|
| 76 |
-
# f"0-4 reflects below-average quality, with minimal value or relevance.\n"
|
| 77 |
-
# f"5 represents acceptable quality, including general, acceptable suggestions but lacks specificity or detailed, actionable advice.\n"
|
| 78 |
-
# f"6-10 signifies good to very good quality, showing substantial thought, accuracy, and some level of specificity.\n"
|
| 79 |
-
# f"When evaluating, prioritize responses with specific, actionable suggestions over general, abstract ideas.\n"
|
| 80 |
-
# f"Provide both a score and a brief comment (1 sentence) for each criterion.\n"
|
| 81 |
-
# f"Please format the output exactly as follows:\n"
|
| 82 |
-
# f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 83 |
-
# f"Implementability: [Score]\nComment: [Short comment on Feasibility]\n"
|
| 84 |
-
# f"Defensibility: [Score]\nComment: [Short comment on Defensibility]\n"
|
| 85 |
-
# f"Overall Score: [Score]\nOverall Comment: [Overall feedback on the answer]\n"
|
| 86 |
-
# )
|
| 87 |
-
# return generate_text_with_gpt(prompt, api_key)
|
| 88 |
-
|
| 89 |
def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 90 |
prompt = (
|
| 91 |
f"Given the task: {task_description}, the provided answer is: {text}\n"
|
|
@@ -101,7 +84,7 @@ def get_evaluation_with_gpt(task_description, text, api_key=None):
|
|
| 101 |
|
| 102 |
f"Please format the output exactly as follows:\n"
|
| 103 |
f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 104 |
-
f"Implementability: [Score]\nComment: [Short comment on
|
| 105 |
f"Defensibility: [Score]\nComment: [Short comment on Defensibility]\n"
|
| 106 |
f"Overall Score: [Score]\nOverall Comment: [Overall feedback on the answer]\n"
|
| 107 |
)
|
|
|
|
| 69 |
f"Modify the answer based on the following suggestions: {suggestions}"
|
| 70 |
return generate_text_with_gpt(prompt, api_key)
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
def get_evaluation_with_gpt(task_description, text, api_key=None):
|
| 73 |
prompt = (
|
| 74 |
f"Given the task: {task_description}, the provided answer is: {text}\n"
|
|
|
|
| 84 |
|
| 85 |
f"Please format the output exactly as follows:\n"
|
| 86 |
f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
|
| 87 |
+
f"Implementability: [Score]\nComment: [Short comment on Implementability]\n"
|
| 88 |
f"Defensibility: [Score]\nComment: [Short comment on Defensibility]\n"
|
| 89 |
f"Overall Score: [Score]\nOverall Comment: [Overall feedback on the answer]\n"
|
| 90 |
)
|