NewBreaker commited on
Commit ·
9768cd8
1
Parent(s): 3f712ba
auto git
Browse files- config.json +0 -1
- demo_pipeline.py +1 -1
- load_model.py +8 -7
config.json
CHANGED
|
@@ -8,7 +8,6 @@
|
|
| 8 |
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
| 9 |
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
|
| 10 |
},
|
| 11 |
-
"device": "cuda",
|
| 12 |
"bos_token_id": 130004,
|
| 13 |
"eos_token_id": 130005,
|
| 14 |
"mask_token_id": 130000,
|
|
|
|
| 8 |
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
| 9 |
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
|
| 10 |
},
|
|
|
|
| 11 |
"bos_token_id": 130004,
|
| 12 |
"eos_token_id": 130005,
|
| 13 |
"mask_token_id": 130000,
|
demo_pipeline.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
|
| 2 |
from transformers import pipeline
|
| 3 |
-
nlp = pipeline('text2text-generation',model ='
|
| 4 |
# response = nlp
|
|
|
|
| 1 |
|
| 2 |
from transformers import pipeline
|
| 3 |
+
nlp = pipeline('text2text-generation',model ='THUDM/chatglm-6b',trust_remote_code=True)
|
| 4 |
# response = nlp
|
load_model.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
| 1 |
-
from transformers import AutoTokenizer, AutoModel
|
| 2 |
|
| 3 |
#
|
| 4 |
-
tokenizer = AutoTokenizer.from_pretrained(".\\", trust_remote_code=True)
|
| 5 |
-
# model = AutoModel.from_pretrained(".\\", trust_remote_code=True).float()
|
| 6 |
-
model = AutoModel.from_pretrained(".\\", trust_remote_code=True)
|
| 7 |
-
model = model.eval()
|
| 8 |
-
response, history = model.chat(tokenizer, "你好", history=[])
|
| 9 |
-
print("response:", response)
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModel,pipeline
|
| 2 |
|
| 3 |
#
|
| 4 |
+
# tokenizer = AutoTokenizer.from_pretrained(".\\", trust_remote_code=True)
|
| 5 |
+
# # model = AutoModel.from_pretrained(".\\", trust_remote_code=True).float()
|
| 6 |
+
# model = AutoModel.from_pretrained(".\\", trust_remote_code=True)
|
| 7 |
+
# model = model.eval()
|
| 8 |
+
# response, history = model.chat(tokenizer, "你好", history=[])
|
| 9 |
+
# print("response:", response)
|
| 10 |
+
npl = pipeline('')
|