MindLabUnimib commited on
Commit
2fd18ea
·
verified ·
1 Parent(s): f00394f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -62,7 +62,7 @@ cls_model = AutoModelForSequenceClassification.from_pretrained(cls_model_name, d
62
  cls_tokenizer = AutoTokenizer.from_pretrained(cls_model_name)
63
  cls_model = cls_model.to(device)
64
 
65
- def generate_responses(pipeline, tokenizer, prompts):
66
  messages = [[{"role": "user", "content": prompt}] for prompt in prompts]
67
  responses = pipeline(messages, do_sample=False, max_new_tokens=512, repetition_penalty=1.1)
68
 
@@ -89,18 +89,12 @@ def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, s
89
  ids = [s["id"] for s in submission]
90
  prompts = [s["prompt"] for s in submission]
91
 
92
- responses = generate_responses(pipeline, chat_tokenizer, prompts)
93
  print(responses)
94
 
95
  scores = classify_pairs(cls_model, cls_tokenizer, prompts, responses)
96
  print(scores)
97
 
98
- chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
99
- ids = [s["id"] for s in submission]
100
- prompts = [s["prompt"] for s in submission]
101
- responses = ["This is a placeholder response." for _ in prompts]
102
- scores = [0.5 for _ in prompts]
103
-
104
  outputs = [
105
  {
106
  "id": id,
 
62
  cls_tokenizer = AutoTokenizer.from_pretrained(cls_model_name)
63
  cls_model = cls_model.to(device)
64
 
65
+ def generate_responses(pipeline, prompts):
66
  messages = [[{"role": "user", "content": prompt}] for prompt in prompts]
67
  responses = pipeline(messages, do_sample=False, max_new_tokens=512, repetition_penalty=1.1)
68
 
 
89
  ids = [s["id"] for s in submission]
90
  prompts = [s["prompt"] for s in submission]
91
 
92
+ responses = generate_responses(pipeline, prompts)
93
  print(responses)
94
 
95
  scores = classify_pairs(cls_model, cls_tokenizer, prompts, responses)
96
  print(scores)
97
 
 
 
 
 
 
 
98
  outputs = [
99
  {
100
  "id": id,