Training in progress, step 69
Browse files- chat_template.jinja +5 -54
- config.json +23 -48
- debug.log +0 -0
- generation_config.json +7 -8
- model.safetensors +2 -2
- tokenizer.json +2 -2
- tokenizer_config.json +9 -24
- training_args.bin +2 -2
chat_template.jinja
CHANGED
|
@@ -1,54 +1,5 @@
|
|
| 1 |
-
{
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
|
| 7 |
-
{%- endif %}
|
| 8 |
-
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 9 |
-
{%- for tool in tools %}
|
| 10 |
-
{{- "\n" }}
|
| 11 |
-
{{- tool | tojson }}
|
| 12 |
-
{%- endfor %}
|
| 13 |
-
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 14 |
-
{%- else %}
|
| 15 |
-
{%- if messages[0]['role'] == 'system' %}
|
| 16 |
-
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
| 17 |
-
{%- else %}
|
| 18 |
-
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
| 19 |
-
{%- endif %}
|
| 20 |
-
{%- endif %}
|
| 21 |
-
{%- for message in messages %}
|
| 22 |
-
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
| 23 |
-
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
| 24 |
-
{%- elif message.role == "assistant" %}
|
| 25 |
-
{{- '<|im_start|>' + message.role }}
|
| 26 |
-
{%- if message.content %}
|
| 27 |
-
{{- '\n' + message.content }}
|
| 28 |
-
{%- endif %}
|
| 29 |
-
{%- for tool_call in message.tool_calls %}
|
| 30 |
-
{%- if tool_call.function is defined %}
|
| 31 |
-
{%- set tool_call = tool_call.function %}
|
| 32 |
-
{%- endif %}
|
| 33 |
-
{{- '\n<tool_call>\n{"name": "' }}
|
| 34 |
-
{{- tool_call.name }}
|
| 35 |
-
{{- '", "arguments": ' }}
|
| 36 |
-
{{- tool_call.arguments | tojson }}
|
| 37 |
-
{{- '}\n</tool_call>' }}
|
| 38 |
-
{%- endfor %}
|
| 39 |
-
{{- '<|im_end|>\n' }}
|
| 40 |
-
{%- elif message.role == "tool" %}
|
| 41 |
-
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
| 42 |
-
{{- '<|im_start|>user' }}
|
| 43 |
-
{%- endif %}
|
| 44 |
-
{{- '\n<tool_response>\n' }}
|
| 45 |
-
{{- message.content }}
|
| 46 |
-
{{- '\n</tool_response>' }}
|
| 47 |
-
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 48 |
-
{{- '<|im_end|>\n' }}
|
| 49 |
-
{%- endif %}
|
| 50 |
-
{%- endif %}
|
| 51 |
-
{%- endfor %}
|
| 52 |
-
{%- if add_generation_prompt %}
|
| 53 |
-
{{- '<|im_start|>assistant\n' }}
|
| 54 |
-
{%- endif %}
|
|
|
|
| 1 |
+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
|
| 2 |
+
|
| 3 |
+
'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
|
| 4 |
+
|
| 5 |
+
' }}{% endif %}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
|
@@ -1,61 +1,36 @@
|
|
| 1 |
{
|
| 2 |
"architectures": [
|
| 3 |
-
"
|
| 4 |
],
|
|
|
|
| 5 |
"attention_dropout": 0.0,
|
| 6 |
-
"bos_token_id":
|
| 7 |
"dtype": "float32",
|
| 8 |
-
"eos_token_id":
|
|
|
|
| 9 |
"hidden_act": "silu",
|
| 10 |
-
"hidden_size":
|
| 11 |
"initializer_range": 0.02,
|
| 12 |
-
"intermediate_size":
|
| 13 |
-
"
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
"full_attention",
|
| 23 |
-
"full_attention",
|
| 24 |
-
"full_attention",
|
| 25 |
-
"full_attention",
|
| 26 |
-
"full_attention",
|
| 27 |
-
"full_attention",
|
| 28 |
-
"full_attention",
|
| 29 |
-
"full_attention",
|
| 30 |
-
"full_attention",
|
| 31 |
-
"full_attention",
|
| 32 |
-
"full_attention",
|
| 33 |
-
"full_attention",
|
| 34 |
-
"full_attention",
|
| 35 |
-
"full_attention",
|
| 36 |
-
"full_attention",
|
| 37 |
-
"full_attention",
|
| 38 |
-
"full_attention",
|
| 39 |
-
"full_attention",
|
| 40 |
-
"full_attention",
|
| 41 |
-
"full_attention"
|
| 42 |
-
],
|
| 43 |
-
"max_position_embeddings": 32768,
|
| 44 |
-
"max_window_layers": 21,
|
| 45 |
-
"model_type": "qwen2",
|
| 46 |
-
"num_attention_heads": 12,
|
| 47 |
-
"num_hidden_layers": 28,
|
| 48 |
-
"num_key_value_heads": 2,
|
| 49 |
-
"pad_token_id": 151643,
|
| 50 |
-
"rms_norm_eps": 1e-06,
|
| 51 |
"rope_parameters": {
|
| 52 |
-
"
|
| 53 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
},
|
| 55 |
-
"sliding_window": null,
|
| 56 |
"tie_word_embeddings": true,
|
| 57 |
"transformers_version": "5.0.0",
|
| 58 |
"use_cache": false,
|
| 59 |
-
"
|
| 60 |
-
"vocab_size": 151936
|
| 61 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 128000,
|
| 8 |
"dtype": "float32",
|
| 9 |
+
"eos_token_id": 128009,
|
| 10 |
+
"head_dim": 64,
|
| 11 |
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 2048,
|
| 13 |
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 8192,
|
| 15 |
+
"max_position_embeddings": 131072,
|
| 16 |
+
"mlp_bias": false,
|
| 17 |
+
"model_type": "llama",
|
| 18 |
+
"num_attention_heads": 32,
|
| 19 |
+
"num_hidden_layers": 16,
|
| 20 |
+
"num_key_value_heads": 8,
|
| 21 |
+
"pad_token_id": 128001,
|
| 22 |
+
"pretraining_tp": 1,
|
| 23 |
+
"rms_norm_eps": 1e-05,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
"rope_parameters": {
|
| 25 |
+
"factor": 32.0,
|
| 26 |
+
"high_freq_factor": 4.0,
|
| 27 |
+
"low_freq_factor": 1.0,
|
| 28 |
+
"original_max_position_embeddings": 8192,
|
| 29 |
+
"rope_theta": 500000.0,
|
| 30 |
+
"rope_type": "llama3"
|
| 31 |
},
|
|
|
|
| 32 |
"tie_word_embeddings": true,
|
| 33 |
"transformers_version": "5.0.0",
|
| 34 |
"use_cache": false,
|
| 35 |
+
"vocab_size": 128256
|
|
|
|
| 36 |
}
|
debug.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
generation_config.json
CHANGED
|
@@ -1,14 +1,13 @@
|
|
| 1 |
{
|
| 2 |
-
"bos_token_id":
|
| 3 |
"do_sample": true,
|
| 4 |
"eos_token_id": [
|
| 5 |
-
|
| 6 |
-
|
|
|
|
| 7 |
],
|
| 8 |
-
"pad_token_id":
|
| 9 |
-
"
|
| 10 |
-
"
|
| 11 |
-
"top_k": 20,
|
| 12 |
-
"top_p": 0.8,
|
| 13 |
"transformers_version": "5.0.0"
|
| 14 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"bos_token_id": 128000,
|
| 3 |
"do_sample": true,
|
| 4 |
"eos_token_id": [
|
| 5 |
+
128001,
|
| 6 |
+
128008,
|
| 7 |
+
128009
|
| 8 |
],
|
| 9 |
+
"pad_token_id": 128001,
|
| 10 |
+
"temperature": 0.6,
|
| 11 |
+
"top_p": 0.9,
|
|
|
|
|
|
|
| 12 |
"transformers_version": "5.0.0"
|
| 13 |
}
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:049b14823f6c179ed101bfa80b8b9c83eca5c3d7a67ef2785bfc3e2ed43eb97e
|
| 3 |
+
size 5993947576
|
tokenizer.json
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
| 3 |
+
size 17209920
|
tokenizer_config.json
CHANGED
|
@@ -1,29 +1,14 @@
|
|
| 1 |
{
|
| 2 |
-
"add_prefix_space": false,
|
| 3 |
"backend": "tokenizers",
|
| 4 |
-
"bos_token": "<|
|
| 5 |
-
"clean_up_tokenization_spaces":
|
| 6 |
-
"eos_token": "<|
|
| 7 |
-
"errors": "replace",
|
| 8 |
-
"extra_special_tokens": [
|
| 9 |
-
"<|im_start|>",
|
| 10 |
-
"<|im_end|>",
|
| 11 |
-
"<|object_ref_start|>",
|
| 12 |
-
"<|object_ref_end|>",
|
| 13 |
-
"<|box_start|>",
|
| 14 |
-
"<|box_end|>",
|
| 15 |
-
"<|quad_start|>",
|
| 16 |
-
"<|quad_end|>",
|
| 17 |
-
"<|vision_start|>",
|
| 18 |
-
"<|vision_end|>",
|
| 19 |
-
"<|vision_pad|>",
|
| 20 |
-
"<|image_pad|>",
|
| 21 |
-
"<|video_pad|>"
|
| 22 |
-
],
|
| 23 |
"is_local": false,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
"model_max_length": 131072,
|
| 25 |
-
"pad_token": "<|
|
| 26 |
-
"
|
| 27 |
-
"tokenizer_class": "Qwen2Tokenizer",
|
| 28 |
-
"unk_token": null
|
| 29 |
}
|
|
|
|
| 1 |
{
|
|
|
|
| 2 |
"backend": "tokenizers",
|
| 3 |
+
"bos_token": "<|begin_of_text|>",
|
| 4 |
+
"clean_up_tokenization_spaces": true,
|
| 5 |
+
"eos_token": "<|eot_id|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
"is_local": false,
|
| 7 |
+
"model_input_names": [
|
| 8 |
+
"input_ids",
|
| 9 |
+
"attention_mask"
|
| 10 |
+
],
|
| 11 |
"model_max_length": 131072,
|
| 12 |
+
"pad_token": "<|end_of_text|>",
|
| 13 |
+
"tokenizer_class": "TokenizersBackend"
|
|
|
|
|
|
|
| 14 |
}
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:956b4a5cc1fa36a1f1ae16061ee1c063868c773f4e2e2f705b946f16c0ad45e2
|
| 3 |
+
size 7697
|