Update README.md
Browse files
README.md
CHANGED
|
@@ -52,6 +52,23 @@ We would like to take this opportunity to thank
|
|
| 52 |
- BAD: γγͺγγ―ββγγ§γγΎγ
|
| 53 |
- GOOD: γγͺγγ―ββγγγΎγ
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
## Merge recipe
|
| 56 |
|
| 57 |
|
|
@@ -60,5 +77,8 @@ We would like to take this opportunity to thank
|
|
| 60 |
- VT0.2on0.1 = VT0.1 + VT0.2
|
| 61 |
|
| 62 |
- VT1 = all VT Series + Lora + Ninja 128k and Normal
|
|
|
|
| 63 |
## Other points to keep in mind
|
| 64 |
-
|
|
|
|
|
|
|
|
|
| 52 |
- BAD: γγͺγγ―ββγγ§γγΎγ
|
| 53 |
- GOOD: γγͺγγ―ββγγγΎγ
|
| 54 |
|
| 55 |
+
## Performing inference
|
| 56 |
+
|
| 57 |
+
```python
|
| 58 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 59 |
+
|
| 60 |
+
model = AutoModelForCausalLM.from_pretrained("Local-Novel-LLM-project/Ninja-v1-128k", trust_remote_code=True)
|
| 61 |
+
tokenizer = AutoTokenizer.from_pretrained("Local-Novel-LLM-project/Ninja-v1-128k")
|
| 62 |
+
|
| 63 |
+
prompt = "Once upon a time,"
|
| 64 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
| 65 |
+
|
| 66 |
+
output = model.generate(input_ids, max_length=100, do_sample=True)
|
| 67 |
+
generated_text = tokenizer.decode(output)
|
| 68 |
+
|
| 69 |
+
print(generated_text)
|
| 70 |
+
````
|
| 71 |
+
|
| 72 |
## Merge recipe
|
| 73 |
|
| 74 |
|
|
|
|
| 77 |
- VT0.2on0.1 = VT0.1 + VT0.2
|
| 78 |
|
| 79 |
- VT1 = all VT Series + Lora + Ninja 128k and Normal
|
| 80 |
+
|
| 81 |
## Other points to keep in mind
|
| 82 |
+
- The training data may be biased. Be careful with the generated sentences.
|
| 83 |
+
- Memory usage may be large for long inferences.
|
| 84 |
+
- If possible, we recommend inferring with llamacpp rather than Transformers.
|