Update README.md
#3
by
kamalkraj
- opened
README.md
CHANGED
|
@@ -12,9 +12,9 @@ set a seed for reproducibility:
|
|
| 12 |
|
| 13 |
```python
|
| 14 |
>>> from transformers import pipeline, set_seed
|
| 15 |
-
>>> from transformers import BioGptTokenizer,
|
| 16 |
-
>>> model =
|
| 17 |
-
>>> tokenizer = BioGptTokenizer.from_pretrained("
|
| 18 |
>>> generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 19 |
>>> set_seed(42)
|
| 20 |
>>> generator("COVID-19 is", max_length=20, num_return_sequences=5, do_sample=True)
|
|
@@ -28,9 +28,9 @@ set a seed for reproducibility:
|
|
| 28 |
Here is how to use this model to get the features of a given text in PyTorch:
|
| 29 |
|
| 30 |
```python
|
| 31 |
-
from transformers import BioGptTokenizer,
|
| 32 |
-
tokenizer = BioGptTokenizer.from_pretrained("
|
| 33 |
-
model =
|
| 34 |
text = "Replace me by any text you'd like."
|
| 35 |
encoded_input = tokenizer(text, return_tensors='pt')
|
| 36 |
output = model(**encoded_input)
|
|
@@ -40,10 +40,10 @@ Beam-search decoding:
|
|
| 40 |
|
| 41 |
```python
|
| 42 |
import torch
|
| 43 |
-
from transformers import BioGptTokenizer,
|
| 44 |
|
| 45 |
-
tokenizer = BioGptTokenizer.from_pretrained("
|
| 46 |
-
model =
|
| 47 |
|
| 48 |
sentence = "COVID-19 is"
|
| 49 |
inputs = tokenizer(sentence, return_tensors="pt")
|
|
|
|
| 12 |
|
| 13 |
```python
|
| 14 |
>>> from transformers import pipeline, set_seed
|
| 15 |
+
>>> from transformers import BioGptTokenizer, BioGptForCausalLM
|
| 16 |
+
>>> model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
|
| 17 |
+
>>> tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
|
| 18 |
>>> generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 19 |
>>> set_seed(42)
|
| 20 |
>>> generator("COVID-19 is", max_length=20, num_return_sequences=5, do_sample=True)
|
|
|
|
| 28 |
Here is how to use this model to get the features of a given text in PyTorch:
|
| 29 |
|
| 30 |
```python
|
| 31 |
+
from transformers import BioGptTokenizer, BioGptForCausalLM
|
| 32 |
+
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
|
| 33 |
+
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
|
| 34 |
text = "Replace me by any text you'd like."
|
| 35 |
encoded_input = tokenizer(text, return_tensors='pt')
|
| 36 |
output = model(**encoded_input)
|
|
|
|
| 40 |
|
| 41 |
```python
|
| 42 |
import torch
|
| 43 |
+
from transformers import BioGptTokenizer, BioGptForCausalLM, set_seed
|
| 44 |
|
| 45 |
+
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
|
| 46 |
+
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
|
| 47 |
|
| 48 |
sentence = "COVID-19 is"
|
| 49 |
inputs = tokenizer(sentence, return_tensors="pt")
|