quincyqiang commited on
Commit ·
c259ee3
1
Parent(s): 3ba55ac
merge 14GB
Browse files- config.json +2 -2
- generation_config.json +1 -1
- pytorch_model-00001-of-00002.bin +2 -2
- pytorch_model-00002-of-00002.bin +2 -2
- pytorch_model.bin.index.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "/data/searchgpt/yq/
|
| 3 |
"architectures": [
|
| 4 |
"LlamaForCausalLM"
|
| 5 |
],
|
|
@@ -20,7 +20,7 @@
|
|
| 20 |
"rope_scaling": null,
|
| 21 |
"tie_word_embeddings": false,
|
| 22 |
"torch_dtype": "float16",
|
| 23 |
-
"transformers_version": "4.
|
| 24 |
"use_cache": false,
|
| 25 |
"vocab_size": 68420
|
| 26 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "/data/searchgpt/yq/Firefly/output/llama2-7b-moss-sft/checkpoint-17000",
|
| 3 |
"architectures": [
|
| 4 |
"LlamaForCausalLM"
|
| 5 |
],
|
|
|
|
| 20 |
"rope_scaling": null,
|
| 21 |
"tie_word_embeddings": false,
|
| 22 |
"torch_dtype": "float16",
|
| 23 |
+
"transformers_version": "4.31.0",
|
| 24 |
"use_cache": false,
|
| 25 |
"vocab_size": 68420
|
| 26 |
}
|
generation_config.json
CHANGED
|
@@ -5,5 +5,5 @@
|
|
| 5 |
"pad_token_id": 32000,
|
| 6 |
"temperature": 0.9,
|
| 7 |
"top_p": 0.6,
|
| 8 |
-
"transformers_version": "4.
|
| 9 |
}
|
|
|
|
| 5 |
"pad_token_id": 32000,
|
| 6 |
"temperature": 0.9,
|
| 7 |
"top_p": 0.6,
|
| 8 |
+
"transformers_version": "4.31.0"
|
| 9 |
}
|
pytorch_model-00001-of-00002.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:451e9fa08494310706144cec474237dfa2f4d58c7cb153b85d00bcbfdcbc1daf
|
| 3 |
+
size 9970884397
|
pytorch_model-00002-of-00002.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52181a67541f108853b196708f0c9a4693e0d375d4a37486af66fe992cff928e
|
| 3 |
+
size 4102775441
|
pytorch_model.bin.index.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{
|
| 2 |
"metadata": {
|
| 3 |
-
"total_size":
|
| 4 |
},
|
| 5 |
"weight_map": {
|
| 6 |
"lm_head.weight": "pytorch_model-00002-of-00002.bin",
|
|
|
|
| 1 |
{
|
| 2 |
"metadata": {
|
| 3 |
+
"total_size": 14073544704
|
| 4 |
},
|
| 5 |
"weight_map": {
|
| 6 |
"lm_head.weight": "pytorch_model-00002-of-00002.bin",
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
CHANGED
|
@@ -18,6 +18,7 @@
|
|
| 18 |
"rstrip": false,
|
| 19 |
"single_word": false
|
| 20 |
},
|
|
|
|
| 21 |
"model_max_length": 4096,
|
| 22 |
"pad_token": null,
|
| 23 |
"padding_side": "right",
|
|
|
|
| 18 |
"rstrip": false,
|
| 19 |
"single_word": false
|
| 20 |
},
|
| 21 |
+
"legacy": true,
|
| 22 |
"model_max_length": 4096,
|
| 23 |
"pad_token": null,
|
| 24 |
"padding_side": "right",
|