diana3135 commited on
Commit
37ef775
·
1 Parent(s): b8646be

adjust evaluation prompt

Browse files
Files changed (1) hide show
  1. utils.py +4 -3
utils.py CHANGED
@@ -73,9 +73,10 @@ def get_evaluation_with_gpt(task_description, text, api_key=None):
73
  prompt = (
74
  f"Given the task: {task_description}, the provided answer is: {text}\n"
75
  f"Evaluate the answer using a scale from 0 to 10. Scores should reflect stricter quality thresholds:\n"
76
- f"0-4: Below average. The answer includes some relevant ideas but has no valuable contribution.\n"
77
- f"5-7: Fair to average quality. The answer is relevant, with some specific insights.\n"
78
- f"8-10: Good to excellent quality. The answer is insightful, valuable, and actionable.\n"
 
79
  f"When evaluating, use the entire scoring range and avoid defaulting to mid-range scores.\n\n"
80
  f"Evaluate based on the criteria:\n\n"
81
  f"Novelty: The uniqueness and innovation of the ideas."
 
73
  prompt = (
74
  f"Given the task: {task_description}, the provided answer is: {text}\n"
75
  f"Evaluate the answer using a scale from 0 to 10. Scores should reflect stricter quality thresholds:\n"
76
+ f"0-4 indicates below-average quality with significant shortcomings.\n"
77
+ f"5-6 represents acceptable quality.\n"
78
+ f"7-8 signifies good quality, showing substantial thought.\n"
79
+ f"9-10 represents exceptional quality, with insightful, and highly concrete suggestions.\n\n"
80
  f"When evaluating, use the entire scoring range and avoid defaulting to mid-range scores.\n\n"
81
  f"Evaluate based on the criteria:\n\n"
82
  f"Novelty: The uniqueness and innovation of the ideas."