Update README.md
Browse files
README.md
CHANGED
|
@@ -6,7 +6,7 @@ library_name: transformers
|
|
| 6 |
# Mamba
|
| 7 |
|
| 8 |
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
-
This repository contains the `transfromers` compatible `mamba
|
| 10 |
|
| 11 |
# Usage
|
| 12 |
|
|
@@ -30,8 +30,8 @@ You can use the classic `generate` API:
|
|
| 30 |
>>> from transformers import MambaConfig, MambaForCausalLM, AutoTokenizer
|
| 31 |
>>> import torch
|
| 32 |
|
| 33 |
-
>>> tokenizer = AutoTokenizer.from_pretrained("
|
| 34 |
-
>>> model = MambaForCausalLM.from_pretrained("
|
| 35 |
>>> input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"]
|
| 36 |
|
| 37 |
>>> out = model.generate(input_ids, max_new_tokens=10)
|
|
|
|
| 6 |
# Mamba
|
| 7 |
|
| 8 |
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
This repository contains the `transfromers` compatible `mamba`. The checkpoints are untouched, but the full `config.json` and tokenizer are pushed to this repo.
|
| 10 |
|
| 11 |
# Usage
|
| 12 |
|
|
|
|
| 30 |
>>> from transformers import MambaConfig, MambaForCausalLM, AutoTokenizer
|
| 31 |
>>> import torch
|
| 32 |
|
| 33 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Jo1uck/mamba-11b-back")
|
| 34 |
+
>>> model = MambaForCausalLM.from_pretrained("Jo1uck/mamba-11b-back")
|
| 35 |
>>> input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"]
|
| 36 |
|
| 37 |
>>> out = model.generate(input_ids, max_new_tokens=10)
|