IsmaelMousa commited on
Commit
b5ee2ea
·
verified ·
1 Parent(s): b948c0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -8,9 +8,9 @@ from json import loads
8
  checkpoint = "IsmaelMousa/SmolLM2-135M-Instruct-EngSaf-217K"
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
 
11
- tokenizer = AutoTokenizer.from_pretrained(checkpoint)
12
- model = AutoModelForCausalLM.from_pretrained(checkpoint)
13
- assistant = pipeline("text-generation", tokenizer=tokenizer, model=model, device=device)
14
 
15
 
16
  def extract(text):
@@ -26,7 +26,7 @@ def extract(text):
26
  except: return response
27
 
28
 
29
- def grade(question, reference_answer, student_answer, mark_scheme, max_new_tokens):
30
  system_content = "You are a grading assistant. Evaluate student answers based on the mark scheme. Respond only in JSON format with keys \"score\" (int) and \"rationale\" (string)."
31
 
32
  user_content = ("Provide both a score and a rationale by evaluating the student's answer strictly within the mark scheme range, "
@@ -40,7 +40,7 @@ def grade(question, reference_answer, student_answer, mark_scheme, max_new_token
40
 
41
  inputs = tokenizer.apply_chat_template(messages, tokenize=False)
42
 
43
- output = assistant(inputs, max_new_tokens=max_new_tokens, do_sample=False, return_full_text=False)[0]["generated_text"]
44
  parsed = extract(output)
45
 
46
  return parsed
@@ -51,8 +51,7 @@ demo = gr.Interface(fn =grade,
51
  gr.Textbox(label="Reference Answer"),
52
  gr.Textbox(label="Student Answer"),
53
  gr.Textbox(label="Mark Scheme"),
54
- gr.Slider(minimum=1, maximum=512, value=128, step=1, label="Max new tokens")],
55
- outputs=gr.JSON(label="Evaluation Output"))
56
 
57
 
58
  if __name__ == "__main__": demo.launch()
 
8
  checkpoint = "IsmaelMousa/SmolLM2-135M-Instruct-EngSaf-217K"
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
 
11
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
12
+ model = AutoModelForCausalLM.from_pretrained(checkpoint)
13
+ assistant = pipeline("text-generation", tokenizer=tokenizer, model=model, device=device)
14
 
15
 
16
  def extract(text):
 
26
  except: return response
27
 
28
 
29
+ def grade(question, reference_answer, student_answer, mark_scheme):
30
  system_content = "You are a grading assistant. Evaluate student answers based on the mark scheme. Respond only in JSON format with keys \"score\" (int) and \"rationale\" (string)."
31
 
32
  user_content = ("Provide both a score and a rationale by evaluating the student's answer strictly within the mark scheme range, "
 
40
 
41
  inputs = tokenizer.apply_chat_template(messages, tokenize=False)
42
 
43
+ output = assistant(inputs, max_new_tokens=128, do_sample=False, return_full_text=False)[0]["generated_text"]
44
  parsed = extract(output)
45
 
46
  return parsed
 
51
  gr.Textbox(label="Reference Answer"),
52
  gr.Textbox(label="Student Answer"),
53
  gr.Textbox(label="Mark Scheme"),
54
+ outputs=gr.JSON (label="Evaluation Output"))
 
55
 
56
 
57
  if __name__ == "__main__": demo.launch()