MindLabUnimib commited on
Commit
e8c05eb
·
1 Parent(s): f7f608b

feat: receive prompt ids

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -30,8 +30,7 @@ def generate_responses(model, tokenizer, prompts):
30
  repetition_penalty=1.0,
31
  max_new_tokens=512,
32
  )
33
-
34
- prompt_lengths = (model_inputs.input_ids != tokenizer.pad_token_id).sum(dim=1)
35
  generated_ids = [
36
  output_ids[length:] for length, output_ids in zip(prompt_lengths, generated_ids)
37
  ]
@@ -55,13 +54,16 @@ def classify_pairs(model, tokenizer, prompts, responses):
55
 
56
 
57
  @spaces.GPU()
58
- def generate(prompts: list[str]) -> list[dict[str, str | float]]:
 
 
 
59
  responses = generate_responses(chat_model, chat_tokenizer, prompts)
60
  scores = classify_pairs(moderator_model, moderator_tokenizer, prompts, responses)
61
 
62
  return [
63
- {"prompt": prompt, "response": response, "score": score}
64
- for prompt, response, score in zip(prompts, responses, scores)
65
  ]
66
 
67
 
 
30
  repetition_penalty=1.0,
31
  max_new_tokens=512,
32
  )
33
+ prompt_lengths = model_inputs["attention_mask"].sum(dim=1)
 
34
  generated_ids = [
35
  output_ids[length:] for length, output_ids in zip(prompt_lengths, generated_ids)
36
  ]
 
54
 
55
 
56
  @spaces.GPU()
57
+ def generate(submission: list[dict[str, str]]) -> list[dict[str, str | float]]:
58
+ ids = [s["id"] for s in submission]
59
+ prompts = [s["prompt"] for s in submission]
60
+
61
  responses = generate_responses(chat_model, chat_tokenizer, prompts)
62
  scores = classify_pairs(moderator_model, moderator_tokenizer, prompts, responses)
63
 
64
  return [
65
+ {"id": id, "prompt": prompt, "response": response, "score": score}
66
+ for id, prompt, response, score in zip(ids, prompts, responses, scores)
67
  ]
68
 
69