Update README.md
Browse files
README.md
CHANGED
|
@@ -50,6 +50,7 @@ print(res)
|
|
| 50 |
|
| 51 |
Alternatively, if you prefer to not use trust_remote_code=True you can download instruct_pipeline.py, store it alongside your notebook, and construct the pipeline yourself from the loaded model and tokenizer:
|
| 52 |
|
|
|
|
| 53 |
from instruct_pipeline import InstructionTextGenerationPipeline
|
| 54 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 55 |
import torch
|
|
@@ -58,3 +59,4 @@ tokenizer = AutoTokenizer.from_pretrained("aisquared/dlite-v2-1_5b", padding_sid
|
|
| 58 |
model = AutoModelForCausalLM.from_pretrained("aisquared/dlite-v2-1_5b", device_map="auto", torch_dtype=torch.bfloat16)
|
| 59 |
|
| 60 |
generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer)
|
|
|
|
|
|
| 50 |
|
| 51 |
Alternatively, if you prefer to not use trust_remote_code=True you can download instruct_pipeline.py, store it alongside your notebook, and construct the pipeline yourself from the loaded model and tokenizer:
|
| 52 |
|
| 53 |
+
```python
|
| 54 |
from instruct_pipeline import InstructionTextGenerationPipeline
|
| 55 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 56 |
import torch
|
|
|
|
| 59 |
model = AutoModelForCausalLM.from_pretrained("aisquared/dlite-v2-1_5b", device_map="auto", torch_dtype=torch.bfloat16)
|
| 60 |
|
| 61 |
generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer)
|
| 62 |
+
```
|