- demo.py +18 -0
- special_tokens_map.json +1 -0
- vocab.json +0 -0
demo.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
model_card = "EvgeniaKomleva/rpt" # you can try other model_card listed in the table above
|
| 5 |
+
tokenizer = AutoTokenizer.from_pretrained(model_card)
|
| 6 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_card)
|
| 7 |
+
|
| 8 |
+
def score(cxt, hyp):
|
| 9 |
+
model_input = tokenizer.encode(cxt + "<|endoftext|>" + hyp, return_tensors="pt")
|
| 10 |
+
result = model(model_input, return_dict=True)
|
| 11 |
+
return torch.sigmoid(result.logits)
|
| 12 |
+
|
| 13 |
+
cxt = "I love NLP!"
|
| 14 |
+
hyp_A = "Me too!"
|
| 15 |
+
hyp_B = "Here’s a free textbook (URL) in case anyone needs it."
|
| 16 |
+
|
| 17 |
+
print('%.3f %s'%(score(cxt, hyp_A).squeeze(), hyp_A))
|
| 18 |
+
print('%.3f %s'%(score(cxt, hyp_B).squeeze(), hyp_B))
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"pad_token": "<|endoftext|>"}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|