Update README.md
Browse files
README.md
CHANGED
|
@@ -56,7 +56,7 @@ You can run Lightning-1.7B using the `transformers` library.
|
|
| 56 |
import torch
|
| 57 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 58 |
|
| 59 |
-
model_name = "
|
| 60 |
|
| 61 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 62 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -85,7 +85,14 @@ outputs = model.generate(
|
|
| 85 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 86 |
# Output: "movie guy lives in computer takes red pill matrix plot"
|
| 87 |
```
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
📊 Performance & Benchmarks
|
| 91 |
|
|
|
|
| 56 |
import torch
|
| 57 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 58 |
|
| 59 |
+
model_name = "TitleOS/Lightning-1.7B"
|
| 60 |
|
| 61 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 62 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 85 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 86 |
# Output: "movie guy lives in computer takes red pill matrix plot"
|
| 87 |
```
|
| 88 |
+
|
| 89 |
+
Merged FP16 and Quantizations:
|
| 90 |
+
|
| 91 |
+
FP16: https://huggingface.co/TitleOS/Lightning-1.7B
|
| 92 |
+
|
| 93 |
+
Q4_K_M:https://huggingface.co/TitleOS/Lightning-1.7B-Q4_K_M-GGUF
|
| 94 |
+
|
| 95 |
+
Q8: https://huggingface.co/TitleOS/Lightning-1.7B-Q8_0-GGUF
|
| 96 |
|
| 97 |
📊 Performance & Benchmarks
|
| 98 |
|