Update README.md
Browse files
README.md
CHANGED
|
@@ -11,7 +11,20 @@ Requires Pytorch
|
|
| 11 |
|
| 12 |
How to use to infer text
|
| 13 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
type = "gpt2-large"
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained(type)
|
| 16 |
model = AutoModelForCausalLM.from_pretrained(type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
```
|
|
|
|
| 11 |
|
| 12 |
How to use to infer text
|
| 13 |
```python
|
| 14 |
+
|
| 15 |
+
from transformers import AutoTokenizer, AutoModelForCasualLM
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
type = "gpt2-large"
|
| 19 |
tokenizer = AutoTokenizer.from_pretrained(type)
|
| 20 |
model = AutoModelForCausalLM.from_pretrained(type)
|
| 21 |
+
|
| 22 |
+
model_path = '../model.pt'
|
| 23 |
+
|
| 24 |
+
model = torch.load(model_path)
|
| 25 |
+
|
| 26 |
+
your_text = "A courier received 50 packages yesterday and twice as many today. All of these should be delivered tomorrow. How many packages should be delivered tomorrow?"
|
| 27 |
+
encoded_text = self.tokenizer.encode(your_text, return_tensors='pt')
|
| 28 |
+
outputs = model.generate(encoded_text, max_length=64, do_sample=True, temperature=0.5, top_p=1)
|
| 29 |
+
outputs = [tokenizer.decode(output) for output in outputs]
|
| 30 |
```
|