Update README.md
#1
by
ls-da3m0ns
- opened
README.md
CHANGED
|
@@ -24,20 +24,23 @@ This method is surprisingly effective in many cases, particularly when used with
|
|
| 24 |
|
| 25 |
With the zero-shot classification pipeline
|
| 26 |
The model can be loaded with the zero-shot-classification pipeline like so:
|
| 27 |
-
|
| 28 |
from transformers import pipeline
|
| 29 |
classifier = pipeline("zero-shot-classification",
|
| 30 |
model="facebook/bart-large-mnli")
|
| 31 |
-
You can then use this pipeline to classify sequences into any of the class names you specify.
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
sequence_to_classify = "one day I will see the world"
|
| 34 |
candidate_labels = ['travel', 'cooking', 'dancing']
|
| 35 |
classifier(sequence_to_classify, candidate_labels)
|
| 36 |
#{'labels': ['travel', 'dancing', 'cooking'],
|
| 37 |
# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289],
|
| 38 |
# 'sequence': 'one day I will see the world'}
|
|
|
|
| 39 |
If more than one candidate label can be correct, pass multi_class=True to calculate each class independently:
|
| 40 |
-
|
| 41 |
candidate_labels = ['travel', 'cooking', 'dancing', 'exploration']
|
| 42 |
classifier(sequence_to_classify, candidate_labels, multi_class=True)
|
| 43 |
#{'labels': ['travel', 'exploration', 'dancing', 'cooking'],
|
|
@@ -46,7 +49,9 @@ classifier(sequence_to_classify, candidate_labels, multi_class=True)
|
|
| 46 |
# 0.0057061901316046715,
|
| 47 |
# 0.0018193122232332826],
|
| 48 |
# 'sequence': 'one day I will see the world'}
|
|
|
|
| 49 |
With manual PyTorch
|
|
|
|
| 50 |
# pose sequence as a NLI premise and label as a hypothesis
|
| 51 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 52 |
nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')
|
|
@@ -64,4 +69,5 @@ logits = nli_model(x.to(device))[0]
|
|
| 64 |
# "entailment" (2) as the probability of the label being true
|
| 65 |
entail_contradiction_logits = logits[:,[0,2]]
|
| 66 |
probs = entail_contradiction_logits.softmax(dim=1)
|
| 67 |
-
prob_label_is_true = probs[:,1]
|
|
|
|
|
|
| 24 |
|
| 25 |
With the zero-shot classification pipeline
|
| 26 |
The model can be loaded with the zero-shot-classification pipeline like so:
|
| 27 |
+
```
|
| 28 |
from transformers import pipeline
|
| 29 |
classifier = pipeline("zero-shot-classification",
|
| 30 |
model="facebook/bart-large-mnli")
|
|
|
|
| 31 |
|
| 32 |
+
```
|
| 33 |
+
You can then use this pipeline to classify sequences into any of the class names you specify.
|
| 34 |
+
```
|
| 35 |
sequence_to_classify = "one day I will see the world"
|
| 36 |
candidate_labels = ['travel', 'cooking', 'dancing']
|
| 37 |
classifier(sequence_to_classify, candidate_labels)
|
| 38 |
#{'labels': ['travel', 'dancing', 'cooking'],
|
| 39 |
# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289],
|
| 40 |
# 'sequence': 'one day I will see the world'}
|
| 41 |
+
```
|
| 42 |
If more than one candidate label can be correct, pass multi_class=True to calculate each class independently:
|
| 43 |
+
```
|
| 44 |
candidate_labels = ['travel', 'cooking', 'dancing', 'exploration']
|
| 45 |
classifier(sequence_to_classify, candidate_labels, multi_class=True)
|
| 46 |
#{'labels': ['travel', 'exploration', 'dancing', 'cooking'],
|
|
|
|
| 49 |
# 0.0057061901316046715,
|
| 50 |
# 0.0018193122232332826],
|
| 51 |
# 'sequence': 'one day I will see the world'}
|
| 52 |
+
```
|
| 53 |
With manual PyTorch
|
| 54 |
+
```
|
| 55 |
# pose sequence as a NLI premise and label as a hypothesis
|
| 56 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 57 |
nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')
|
|
|
|
| 69 |
# "entailment" (2) as the probability of the label being true
|
| 70 |
entail_contradiction_logits = logits[:,[0,2]]
|
| 71 |
probs = entail_contradiction_logits.softmax(dim=1)
|
| 72 |
+
prob_label_is_true = probs[:,1]
|
| 73 |
+
```
|