diana3135 commited on
Commit
663ed9b
·
1 Parent(s): 60cc9a1

update evaluation prompt

Browse files
Files changed (1) hide show
  1. utils.py +4 -5
utils.py CHANGED
@@ -73,11 +73,10 @@ def get_evaluation_with_gpt(task_description, text, api_key=None):
73
  prompt = (
74
  f"Given the task: {task_description}, the provided answer is: {text}\n"
75
  f"Please evaluate the answer based on the following criteria, using a scale from 0 to 10, where:\n"
76
- f"0-2 reflects very poor quality, with minimal value or relevance.\n"
77
- f"3-4 indicates below-average quality with significant shortcomings.\n"
78
- f"5 represents acceptable quality.\n"
79
- f"6-8 signifies good to very good quality, showing substantial thought and accuracy.\n"
80
- f"9-10 represents exceptional quality, with outstanding insight or depth.\n\n"
81
  f"Provide both a score and a brief comment (1 sentence) for each criterion.\n"
82
  f"Please format the output exactly as follows:\n"
83
  f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"
 
73
  prompt = (
74
  f"Given the task: {task_description}, the provided answer is: {text}\n"
75
  f"Please evaluate the answer based on the following criteria, using a scale from 0 to 10, where:\n"
76
+ f"0-4 reflects below-average quality, with minimal value or relevance.\n"
77
+ f"5 represents acceptable quality, including general, acceptable suggestions but lacks specificity or detailed, actionable advice.\n"
78
+ f"6-10 signifies good to very good quality, showing substantial thought, accuracy, and some level of specificity.\n"
79
+ f"When evaluating, prioritize responses with specific, actionable suggestions over general, abstract ideas.\n"
 
80
  f"Provide both a score and a brief comment (1 sentence) for each criterion.\n"
81
  f"Please format the output exactly as follows:\n"
82
  f"Novelty: [Score]\nComment: [Short comment on Novelty]\n"