update usage code
Browse files
README.md
CHANGED
|
@@ -39,27 +39,18 @@ We would like to thank the Baseten team for their contributions in deploying and
|
|
| 39 |
|
| 40 |
## Usage :
|
| 41 |
```python
|
| 42 |
-
import os
|
| 43 |
import torch
|
| 44 |
-
|
| 45 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 46 |
|
| 47 |
-
# set HF_TOKEN in terminal as export HF_TOKEN=hf_***
|
| 48 |
-
auth_token = os.environ.get("HF_TOKEN", True)
|
| 49 |
-
|
| 50 |
model_name = "Writer/camel-5b-hf"
|
| 51 |
|
| 52 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 53 |
-
model_name, use_auth_token=auth_token
|
| 54 |
-
)
|
| 55 |
model = AutoModelForCausalLM.from_pretrained(
|
| 56 |
-
|
| 57 |
device_map="auto",
|
| 58 |
-
torch_dtype=torch.float16
|
| 59 |
-
use_auth_token=auth_token,
|
| 60 |
)
|
| 61 |
|
| 62 |
-
|
| 63 |
instruction = "Describe a futuristic device that revolutionizes space travel."
|
| 64 |
|
| 65 |
|
|
@@ -85,16 +76,14 @@ text = (
|
|
| 85 |
model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
|
| 86 |
output_ids = model.generate(
|
| 87 |
**model_inputs,
|
| 88 |
-
max_length=
|
| 89 |
)
|
| 90 |
output_text = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
| 91 |
clean_output = output_text.split("### Response:")[1].strip()
|
| 92 |
|
| 93 |
print(clean_output)
|
| 94 |
-
|
| 95 |
```
|
| 96 |
|
| 97 |
-
`
|
| 98 |
|
| 99 |
### Limitations and Biases
|
| 100 |
|
|
|
|
| 39 |
|
| 40 |
## Usage :
|
| 41 |
```python
|
|
|
|
| 42 |
import torch
|
|
|
|
| 43 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 44 |
|
|
|
|
|
|
|
|
|
|
| 45 |
model_name = "Writer/camel-5b-hf"
|
| 46 |
|
| 47 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
|
|
|
| 48 |
model = AutoModelForCausalLM.from_pretrained(
|
| 49 |
+
model_name,
|
| 50 |
device_map="auto",
|
| 51 |
+
torch_dtype=torch.float16
|
|
|
|
| 52 |
)
|
| 53 |
|
|
|
|
| 54 |
instruction = "Describe a futuristic device that revolutionizes space travel."
|
| 55 |
|
| 56 |
|
|
|
|
| 76 |
model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
|
| 77 |
output_ids = model.generate(
|
| 78 |
**model_inputs,
|
| 79 |
+
max_length=256,
|
| 80 |
)
|
| 81 |
output_text = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
| 82 |
clean_output = output_text.split("### Response:")[1].strip()
|
| 83 |
|
| 84 |
print(clean_output)
|
|
|
|
| 85 |
```
|
| 86 |
|
|
|
|
| 87 |
|
| 88 |
### Limitations and Biases
|
| 89 |
|