Update README.md
Browse files
README.md
CHANGED
|
@@ -7,4 +7,30 @@ language:
|
|
| 7 |
# open-llama-2-ko based model with inhouse dataset
|
| 8 |
|
| 9 |
This is an Korean Model based on
|
| 10 |
-
* [beomi/open-llama-2-ko-7b]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
# open-llama-2-ko based model with inhouse dataset
|
| 8 |
|
| 9 |
This is an Korean Model based on
|
| 10 |
+
* [beomi/open-llama-2-ko-7b]
|
| 11 |
+
|
| 12 |
+
gpu code example
|
| 13 |
+
|
| 14 |
+
```
|
| 15 |
+
import torch
|
| 16 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 17 |
+
import math
|
| 18 |
+
|
| 19 |
+
## v2 models
|
| 20 |
+
model_path = "colable/llama-ko-peft"
|
| 21 |
+
|
| 22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, use_default_system_prompt=False)
|
| 23 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 24 |
+
model_path, torch_dtype=torch.float32, device_map='auto',local_files_only=False, load_in_4bit=True
|
| 25 |
+
)
|
| 26 |
+
print(model)
|
| 27 |
+
prompt = input("please input prompt:")
|
| 28 |
+
while len(prompt) > 0:
|
| 29 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cuda")
|
| 30 |
+
|
| 31 |
+
generation_output = model.generate(
|
| 32 |
+
input_ids=input_ids, max_new_tokens=500,repetition_penalty=1.2
|
| 33 |
+
)
|
| 34 |
+
print(tokenizer.decode(generation_output[0]))
|
| 35 |
+
prompt = input("please input prompt:")
|
| 36 |
+
```
|