Update README.md
Browse files
README.md
CHANGED
|
@@ -80,10 +80,11 @@ To use this model for inference, you need to load the fine-tuned model and token
|
|
| 80 |
|
| 81 |
Running on CPU
|
| 82 |
```python
|
| 83 |
-
|
|
|
|
| 84 |
|
| 85 |
-
tokenizer =
|
| 86 |
-
model =
|
| 87 |
|
| 88 |
input_text = "Your conversation Here"
|
| 89 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
|
@@ -95,10 +96,10 @@ print(tokenizer.decode(outputs[0]))
|
|
| 95 |
Running on GPU
|
| 96 |
```python
|
| 97 |
# pip install accelerate
|
| 98 |
-
from transformers import
|
| 99 |
|
| 100 |
-
tokenizer =
|
| 101 |
-
model =
|
| 102 |
|
| 103 |
input_text = "Your conversation Here"
|
| 104 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
|
|
|
| 80 |
|
| 81 |
Running on CPU
|
| 82 |
```python
|
| 83 |
+
# Load model directly
|
| 84 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 85 |
|
| 86 |
+
tokenizer = AutoTokenizer.from_pretrained("Falconsai/arc_of_conversation")
|
| 87 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/arc_of_conversation")
|
| 88 |
|
| 89 |
input_text = "Your conversation Here"
|
| 90 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
|
|
|
| 96 |
Running on GPU
|
| 97 |
```python
|
| 98 |
# pip install accelerate
|
| 99 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 100 |
|
| 101 |
+
tokenizer = AutoTokenizer.from_pretrained("Falconsai/arc_of_conversation")
|
| 102 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/arc_of_conversation", device_map="auto")
|
| 103 |
|
| 104 |
input_text = "Your conversation Here"
|
| 105 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|