nryadav18 commited on
Commit
b1bfca8
·
verified ·
1 Parent(s): da089aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -25,16 +25,16 @@ async def health_check():
25
 
26
  @app.post("/evaluate")
27
  async def evaluate_code(request: EvalRequest):
28
- prompt = f"Task Description:\n{request.task_description}\n\nSubmitted Code:\n{request.python_code}\n\nEvaluate the code against the task. Assign a final score out of 10. Keep your feedback concise and helpful."
 
29
 
30
  response = llm.create_chat_completion(
31
  messages=[
32
- # Framing the model specifically for grading student submissions
33
- {"role": "system", "content": "You are an expert Python instructor. You evaluate student code submissions accurately, checking for logical correctness and task completion."},
34
  {"role": "user", "content": prompt}
35
  ],
36
- max_tokens=250, # Limit response length to keep API fast
37
- temperature=0.2 # Low temperature for consistent scoring
38
  )
39
 
40
  return {"evaluation": response['choices'][0]['message']['content']}
 
25
 
26
  @app.post("/evaluate")
27
  async def evaluate_code(request: EvalRequest):
28
+ # Removed the extra instructions here to save tokens and speed up reading time
29
+ prompt = f"{request.task_description}\n\nStudent Code:\n{request.python_code}"
30
 
31
  response = llm.create_chat_completion(
32
  messages=[
33
+ {"role": "system", "content": "You are a Frendly Python grader. Output only valid JSON."},
 
34
  {"role": "user", "content": prompt}
35
  ],
36
+ max_tokens=250,
37
+ temperature=0.1 # Lowered slightly for stricter JSON formatting
38
  )
39
 
40
  return {"evaluation": response['choices'][0]['message']['content']}