Zhaoming213 commited on
Commit
b91e9f5
·
verified ·
1 Parent(s): 0574826

Upload 6 files

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0]['role'] == 'system' -%}
14
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
15
+ {%- else -%}
16
+ {{- '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}
17
+ {%- endif %}
18
+ {%- endif %}
19
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
20
+ {%- for message in messages[::-1] %}
21
+ {%- set index = (messages|length - 1) - loop.index0 %}
22
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
23
+ {%- set ns.multi_step_tool = false %}
24
+ {%- set ns.last_query_index = index %}
25
+ {%- endif %}
26
+ {%- endfor %}
27
+ {%- for message in messages %}
28
+ {%- if message.content is string %}
29
+ {%- set content = message.content %}
30
+ {%- else %}
31
+ {%- set content = '' %}
32
+ {%- endif %}
33
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
34
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
35
+ {%- elif message.role == "assistant" %}
36
+ {{- '<|im_start|>' + message.role + '\n' + content }}
37
+ {%- if message.tool_calls %}
38
+ {%- for tool_call in message.tool_calls %}
39
+ {%- if (loop.first and content) or (not loop.first) %}
40
+ {{- '\n' }}
41
+ {%- endif %}
42
+ {%- if tool_call.function %}
43
+ {%- set tool_call = tool_call.function %}
44
+ {%- endif %}
45
+ {{- '<tool_call>\n{"name": "' }}
46
+ {{- tool_call.name }}
47
+ {{- '", "arguments": ' }}
48
+ {%- if tool_call.arguments is string %}
49
+ {{- tool_call.arguments }}
50
+ {%- else %}
51
+ {{- tool_call.arguments | tojson }}
52
+ {%- endif %}
53
+ {{- '}\n</tool_call>' }}
54
+ {%- endfor %}
55
+ {%- endif %}
56
+ {{- '<|im_end|>\n' }}
57
+ {%- elif message.role == "tool" %}
58
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
59
+ {{- '<|im_start|>user' }}
60
+ {%- endif %}
61
+ {{- '\n<tool_response>\n' }}
62
+ {{- content }}
63
+ {{- '\n</tool_response>' }}
64
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
65
+ {{- '<|im_end|>\n' }}
66
+ {%- endif %}
67
+ {%- endif %}
68
+ {%- endfor %}
69
+ {%- if add_generation_prompt %}
70
+ {{- '<|im_start|>assistant\n' }}
71
+ {%- if enable_thinking is defined and enable_thinking is false %}
72
+ {{- '<think>\n\n</think>\n\n' }}
73
+ {%- endif %}
74
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "dtype": "float16",
9
+ "eos_token_id": 2,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 512,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 1408,
15
+ "max_position_embeddings": 32768,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 8,
20
+ "num_key_value_heads": 2,
21
+ "pad_token_id": null,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_parameters": {
25
+ "rope_theta": 1000000.0,
26
+ "rope_type": "default"
27
+ },
28
+ "tie_word_embeddings": true,
29
+ "transformers_version": "5.3.0",
30
+ "use_cache": true,
31
+ "vocab_size": 6400
32
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "output_attentions": false,
6
+ "output_hidden_states": false,
7
+ "transformers_version": "5.3.0",
8
+ "use_cache": true
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4227458238624957ac5f191fc8794ab58bf206f1c35ab2a9cf2a271dec4c10a4
3
+ size 51667832
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|im_start|>",
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "extra_special_tokens": {},
8
+ "is_local": true,
9
+ "legacy": true,
10
+ "model_max_length": 32768,
11
+ "pad_token": "<|endoftext|>",
12
+ "sp_model_kwargs": {},
13
+ "spaces_between_special_tokens": false,
14
+ "tokenizer_class": "PreTrainedTokenizerFast",
15
+ "unk_token": "<|endoftext|>"
16
+ }