Update README.md
Browse files
README.md
CHANGED
|
@@ -90,7 +90,7 @@ import torch
|
|
| 90 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 91 |
|
| 92 |
tokenizer = AutoTokenizer.from_pretrained("Bochkov/emergent-semantics-model-64-float-272m")
|
| 93 |
-
model = AutoModelForCausalLM.from_pretrained("Bochkov/emergent-semantics-model-64-float-272m", trust_remote_code=True)
|
| 94 |
|
| 95 |
inputs = torch.tensor([tokenizer.encode("Question: What is the capital of Japan?\nAnswer:")], dtype=torch.long, device='cuda')
|
| 96 |
|
|
@@ -101,6 +101,10 @@ outputs = model.generate(
|
|
| 101 |
)
|
| 102 |
print(tokenizer.decode(outputs[0].tolist()))
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
```
|
| 105 |
|
| 106 |
---
|
|
|
|
| 90 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 91 |
|
| 92 |
tokenizer = AutoTokenizer.from_pretrained("Bochkov/emergent-semantics-model-64-float-272m")
|
| 93 |
+
model = AutoModelForCausalLM.from_pretrained("Bochkov/emergent-semantics-model-64-float-272m", trust_remote_code=True).to('cuda')
|
| 94 |
|
| 95 |
inputs = torch.tensor([tokenizer.encode("Question: What is the capital of Japan?\nAnswer:")], dtype=torch.long, device='cuda')
|
| 96 |
|
|
|
|
| 101 |
)
|
| 102 |
print(tokenizer.decode(outputs[0].tolist()))
|
| 103 |
|
| 104 |
+
#Question: What is the capital of Japan?
|
| 105 |
+
#Answer:Japan
|
| 106 |
+
# </s><|
|
| 107 |
+
|
| 108 |
```
|
| 109 |
|
| 110 |
---
|