Upload folder using huggingface_hub
Browse files- config.json +4 -2
- model-00001-of-00001.safetensors +1 -1
- special_tokens_map.json +3 -3
- tokenizer.json +28 -0
- tokenizer_config.json +27 -3
config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "
|
| 3 |
"architectures": [
|
| 4 |
"StableLmForCausalLM"
|
| 5 |
],
|
|
@@ -18,12 +18,14 @@
|
|
| 18 |
"num_hidden_layers": 24,
|
| 19 |
"num_key_value_heads": 32,
|
| 20 |
"partial_rotary_factor": 0.25,
|
|
|
|
| 21 |
"rope_scaling": null,
|
| 22 |
"rope_theta": 10000,
|
| 23 |
"tie_word_embeddings": false,
|
| 24 |
"torch_dtype": "bfloat16",
|
| 25 |
-
"transformers_version": "4.
|
| 26 |
"use_cache": false,
|
|
|
|
| 27 |
"use_qkv_bias": true,
|
| 28 |
"vocab_size": 100352
|
| 29 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "GamblerOnTrain/danke30IPA01",
|
| 3 |
"architectures": [
|
| 4 |
"StableLmForCausalLM"
|
| 5 |
],
|
|
|
|
| 18 |
"num_hidden_layers": 24,
|
| 19 |
"num_key_value_heads": 32,
|
| 20 |
"partial_rotary_factor": 0.25,
|
| 21 |
+
"qk_layernorm": false,
|
| 22 |
"rope_scaling": null,
|
| 23 |
"rope_theta": 10000,
|
| 24 |
"tie_word_embeddings": false,
|
| 25 |
"torch_dtype": "bfloat16",
|
| 26 |
+
"transformers_version": "4.40.0",
|
| 27 |
"use_cache": false,
|
| 28 |
+
"use_parallel_residual": false,
|
| 29 |
"use_qkv_bias": true,
|
| 30 |
"vocab_size": 100352
|
| 31 |
}
|
model-00001-of-00001.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 3289069520
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:398cc168a80a48e7d53fc9e960a2b25dec303a5ef05fa2c919f8dc9904d7d8c4
|
| 3 |
size 3289069520
|
special_tokens_map.json
CHANGED
|
@@ -35,14 +35,14 @@
|
|
| 35 |
"<|extra0|>"
|
| 36 |
],
|
| 37 |
"bos_token": {
|
| 38 |
-
"content": "
|
| 39 |
"lstrip": false,
|
| 40 |
"normalized": false,
|
| 41 |
"rstrip": false,
|
| 42 |
"single_word": false
|
| 43 |
},
|
| 44 |
"eos_token": {
|
| 45 |
-
"content": "
|
| 46 |
"lstrip": false,
|
| 47 |
"normalized": false,
|
| 48 |
"rstrip": false,
|
|
@@ -56,7 +56,7 @@
|
|
| 56 |
"single_word": false
|
| 57 |
},
|
| 58 |
"unk_token": {
|
| 59 |
-
"content": "
|
| 60 |
"lstrip": false,
|
| 61 |
"normalized": false,
|
| 62 |
"rstrip": false,
|
|
|
|
| 35 |
"<|extra0|>"
|
| 36 |
],
|
| 37 |
"bos_token": {
|
| 38 |
+
"content": "<s>",
|
| 39 |
"lstrip": false,
|
| 40 |
"normalized": false,
|
| 41 |
"rstrip": false,
|
| 42 |
"single_word": false
|
| 43 |
},
|
| 44 |
"eos_token": {
|
| 45 |
+
"content": "</s>",
|
| 46 |
"lstrip": false,
|
| 47 |
"normalized": false,
|
| 48 |
"rstrip": false,
|
|
|
|
| 56 |
"single_word": false
|
| 57 |
},
|
| 58 |
"unk_token": {
|
| 59 |
+
"content": "<unk>",
|
| 60 |
"lstrip": false,
|
| 61 |
"normalized": false,
|
| 62 |
"rstrip": false,
|
tokenizer.json
CHANGED
|
@@ -299,6 +299,33 @@
|
|
| 299 |
"rstrip": false,
|
| 300 |
"normalized": false,
|
| 301 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
}
|
| 303 |
],
|
| 304 |
"normalizer": null,
|
|
@@ -336,6 +363,7 @@
|
|
| 336 |
"end_of_word_suffix": "",
|
| 337 |
"fuse_unk": false,
|
| 338 |
"byte_fallback": false,
|
|
|
|
| 339 |
"vocab": {
|
| 340 |
"!": 0,
|
| 341 |
"\"": 1,
|
|
|
|
| 299 |
"rstrip": false,
|
| 300 |
"normalized": false,
|
| 301 |
"special": true
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"id": 100289,
|
| 305 |
+
"content": "<s>",
|
| 306 |
+
"single_word": false,
|
| 307 |
+
"lstrip": false,
|
| 308 |
+
"rstrip": false,
|
| 309 |
+
"normalized": false,
|
| 310 |
+
"special": true
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"id": 100290,
|
| 314 |
+
"content": "</s>",
|
| 315 |
+
"single_word": false,
|
| 316 |
+
"lstrip": false,
|
| 317 |
+
"rstrip": false,
|
| 318 |
+
"normalized": false,
|
| 319 |
+
"special": true
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"id": 100291,
|
| 323 |
+
"content": "<unk>",
|
| 324 |
+
"single_word": false,
|
| 325 |
+
"lstrip": false,
|
| 326 |
+
"rstrip": false,
|
| 327 |
+
"normalized": false,
|
| 328 |
+
"special": true
|
| 329 |
}
|
| 330 |
],
|
| 331 |
"normalizer": null,
|
|
|
|
| 363 |
"end_of_word_suffix": "",
|
| 364 |
"fuse_unk": false,
|
| 365 |
"byte_fallback": false,
|
| 366 |
+
"ignore_merges": false,
|
| 367 |
"vocab": {
|
| 368 |
"!": 0,
|
| 369 |
"\"": 1,
|
tokenizer_config.json
CHANGED
|
@@ -264,6 +264,30 @@
|
|
| 264 |
"rstrip": false,
|
| 265 |
"single_word": false,
|
| 266 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
}
|
| 268 |
},
|
| 269 |
"additional_special_tokens": [
|
|
@@ -301,12 +325,12 @@
|
|
| 301 |
"<|reg7|>",
|
| 302 |
"<|extra0|>"
|
| 303 |
],
|
| 304 |
-
"bos_token": "
|
| 305 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
| 306 |
"clean_up_tokenization_spaces": true,
|
| 307 |
-
"eos_token": "
|
| 308 |
"model_max_length": 2048,
|
| 309 |
"pad_token": "<|endoftext|>",
|
| 310 |
"tokenizer_class": "GPT2Tokenizer",
|
| 311 |
-
"unk_token": "
|
| 312 |
}
|
|
|
|
| 264 |
"rstrip": false,
|
| 265 |
"single_word": false,
|
| 266 |
"special": true
|
| 267 |
+
},
|
| 268 |
+
"100289": {
|
| 269 |
+
"content": "<s>",
|
| 270 |
+
"lstrip": false,
|
| 271 |
+
"normalized": false,
|
| 272 |
+
"rstrip": false,
|
| 273 |
+
"single_word": false,
|
| 274 |
+
"special": true
|
| 275 |
+
},
|
| 276 |
+
"100290": {
|
| 277 |
+
"content": "</s>",
|
| 278 |
+
"lstrip": false,
|
| 279 |
+
"normalized": false,
|
| 280 |
+
"rstrip": false,
|
| 281 |
+
"single_word": false,
|
| 282 |
+
"special": true
|
| 283 |
+
},
|
| 284 |
+
"100291": {
|
| 285 |
+
"content": "<unk>",
|
| 286 |
+
"lstrip": false,
|
| 287 |
+
"normalized": false,
|
| 288 |
+
"rstrip": false,
|
| 289 |
+
"single_word": false,
|
| 290 |
+
"special": true
|
| 291 |
}
|
| 292 |
},
|
| 293 |
"additional_special_tokens": [
|
|
|
|
| 325 |
"<|reg7|>",
|
| 326 |
"<|extra0|>"
|
| 327 |
],
|
| 328 |
+
"bos_token": "<s>",
|
| 329 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
| 330 |
"clean_up_tokenization_spaces": true,
|
| 331 |
+
"eos_token": "</s>",
|
| 332 |
"model_max_length": 2048,
|
| 333 |
"pad_token": "<|endoftext|>",
|
| 334 |
"tokenizer_class": "GPT2Tokenizer",
|
| 335 |
+
"unk_token": "<unk>"
|
| 336 |
}
|