Commit
·
e5c662f
1
Parent(s):
f1c51c9
Update README.md
Browse files
README.md
CHANGED
|
@@ -1,3 +1,26 @@
|
|
| 1 |
---
|
| 2 |
license: other
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: other
|
| 3 |
---
|
| 4 |
+
```
|
| 5 |
+
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
| 6 |
+
import os
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
# 载入Tokenizer
|
| 10 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
| 11 |
+
config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128)
|
| 12 |
+
model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True)
|
| 13 |
+
prefix_state_dict = torch.load(os.path.join("./Adjust_ChatGLM_6B/", "pytorch_model.bin"))
|
| 14 |
+
new_prefix_state_dict = {}
|
| 15 |
+
for k, v in prefix_state_dict.items():
|
| 16 |
+
if k.startswith("transformer.prefix_encoder."):
|
| 17 |
+
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
|
| 18 |
+
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
|
| 19 |
+
model = model.quantize(4)
|
| 20 |
+
model = model.half().cuda()
|
| 21 |
+
model.transformer.prefix_encoder.float()
|
| 22 |
+
model = model.eval()
|
| 23 |
+
|
| 24 |
+
response, history = model.chat(tokenizer, "生成衬衣的广告词", history=[])
|
| 25 |
+
print(response)
|
| 26 |
+
```
|