Spaces:
Sleeping
Sleeping
Commit
·
4cf07ab
1
Parent(s):
931e551
debug
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ moderator_model_name = "saiteki-kai/QA-DeBERTa-v3-large"
|
|
| 15 |
moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_model_name)
|
| 16 |
moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
|
| 17 |
|
| 18 |
-
|
| 19 |
def generate_responses(model, tokenizer, prompts):
|
| 20 |
messages = [[{"role": "user", "content": message}] for message in prompts]
|
| 21 |
|
|
@@ -38,7 +38,7 @@ def generate_responses(model, tokenizer, prompts):
|
|
| 38 |
|
| 39 |
return responses
|
| 40 |
|
| 41 |
-
|
| 42 |
def classify_pairs(model, tokenizer, prompts, responses):
|
| 43 |
texts = [
|
| 44 |
prompt + "[SEP]" + response for prompt, response in zip(prompts, responses)
|
|
@@ -62,9 +62,8 @@ def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, s
|
|
| 62 |
prompts = [s["prompt"] for s in submission]
|
| 63 |
|
| 64 |
responses = generate_responses(chat_model, chat_tokenizer, prompts)
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
return [
|
| 69 |
{"id": id, "prompt": prompt, "response": response, "score": score, "model": chat_model_name, "team_id": team_id}
|
| 70 |
for id, prompt, response, score in zip(ids, prompts, responses, scores)
|
|
|
|
| 15 |
moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_model_name)
|
| 16 |
moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
|
| 17 |
|
| 18 |
+
@spaces.GPU()
|
| 19 |
def generate_responses(model, tokenizer, prompts):
|
| 20 |
messages = [[{"role": "user", "content": message}] for message in prompts]
|
| 21 |
|
|
|
|
| 38 |
|
| 39 |
return responses
|
| 40 |
|
| 41 |
+
@spaces.GPU()
|
| 42 |
def classify_pairs(model, tokenizer, prompts, responses):
|
| 43 |
texts = [
|
| 44 |
prompt + "[SEP]" + response for prompt, response in zip(prompts, responses)
|
|
|
|
| 62 |
prompts = [s["prompt"] for s in submission]
|
| 63 |
|
| 64 |
responses = generate_responses(chat_model, chat_tokenizer, prompts)
|
| 65 |
+
scores = classify_pairs(moderator_model, moderator_tokenizer, prompts, responses)
|
| 66 |
+
|
|
|
|
| 67 |
return [
|
| 68 |
{"id": id, "prompt": prompt, "response": response, "score": score, "model": chat_model_name, "team_id": team_id}
|
| 69 |
for id, prompt, response, score in zip(ids, prompts, responses, scores)
|