bimabk commited on
Commit
4814b81
Β·
verified Β·
1 Parent(s): b827c8e

Upload task output 1

Browse files
README.md CHANGED
@@ -3,7 +3,7 @@ base_model: None
3
  library_name: peft
4
  pipeline_tag: text-generation
5
  tags:
6
- - base_model:adapter:/cache/models/Qwen--Qwen2.5-3B-Instruct
7
  - grpo
8
  - lora
9
  - transformers
 
3
  library_name: peft
4
  pipeline_tag: text-generation
5
  tags:
6
+ - base_model:adapter:/cache/models/codellama--CodeLlama-7b-Instruct-hf
7
  - grpo
8
  - lora
9
  - transformers
adapter_config.json CHANGED
@@ -16,7 +16,7 @@
16
  "layers_pattern": null,
17
  "layers_to_transform": null,
18
  "loftq_config": {},
19
- "lora_alpha": 256,
20
  "lora_bias": false,
21
  "lora_dropout": 0.05,
22
  "megatron_config": null,
@@ -25,16 +25,16 @@
25
  "peft_type": "LORA",
26
  "peft_version": "0.18.1",
27
  "qalora_group_size": 16,
28
- "r": 128,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
32
- "up_proj",
33
- "o_proj",
34
  "v_proj",
35
- "gate_proj",
36
  "down_proj",
 
 
37
  "k_proj",
 
38
  "q_proj"
39
  ],
40
  "target_parameters": null,
 
16
  "layers_pattern": null,
17
  "layers_to_transform": null,
18
  "loftq_config": {},
19
+ "lora_alpha": 64,
20
  "lora_bias": false,
21
  "lora_dropout": 0.05,
22
  "megatron_config": null,
 
25
  "peft_type": "LORA",
26
  "peft_version": "0.18.1",
27
  "qalora_group_size": 16,
28
+ "r": 32,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
 
 
32
  "v_proj",
 
33
  "down_proj",
34
+ "up_proj",
35
+ "o_proj",
36
  "k_proj",
37
+ "gate_proj",
38
  "q_proj"
39
  ],
40
  "target_parameters": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51dcc60062ae1b1cf80ce9f9cf0b07ba4a42abdffd827bf4c19f5b4c6152a313
3
- size 957942768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d4e9eebb2b73438828d23b8d54ab10ce0c2534a61b6aa0633e1866c67f9e1d8
3
+ size 319876032
chat_template.jinja CHANGED
@@ -1,54 +1 @@
1
- {%- if tools %}
2
- {{- '<|im_start|>system\n' }}
3
- {%- if messages[0]['role'] == 'system' %}
4
- {{- messages[0]['content'] }}
5
- {%- else %}
6
- {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
- {%- endif %}
8
- {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
- {%- for tool in tools %}
10
- {{- "\n" }}
11
- {{- tool | tojson }}
12
- {%- endfor %}
13
- {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
- {%- else %}
15
- {%- if messages[0]['role'] == 'system' %}
16
- {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
- {%- else %}
18
- {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
- {%- endif %}
20
- {%- endif %}
21
- {%- for message in messages %}
22
- {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
- {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
- {%- elif message.role == "assistant" %}
25
- {{- '<|im_start|>' + message.role }}
26
- {%- if message.content %}
27
- {{- '\n' + message.content }}
28
- {%- endif %}
29
- {%- for tool_call in message.tool_calls %}
30
- {%- if tool_call.function is defined %}
31
- {%- set tool_call = tool_call.function %}
32
- {%- endif %}
33
- {{- '\n<tool_call>\n{"name": "' }}
34
- {{- tool_call.name }}
35
- {{- '", "arguments": ' }}
36
- {{- tool_call.arguments | tojson }}
37
- {{- '}\n</tool_call>' }}
38
- {%- endfor %}
39
- {{- '<|im_end|>\n' }}
40
- {%- elif message.role == "tool" %}
41
- {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
- {{- '<|im_start|>user' }}
43
- {%- endif %}
44
- {{- '\n<tool_response>\n' }}
45
- {{- message.content }}
46
- {{- '\n</tool_response>' }}
47
- {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
- {{- '<|im_end|>\n' }}
49
- {%- endif %}
50
- {%- endif %}
51
- {%- endfor %}
52
- {%- if add_generation_prompt %}
53
- {{- '<|im_start|>assistant\n' }}
54
- {%- endif %}
 
1
+ {% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\n' + system_message + '\n<</SYS>>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
loss.txt CHANGED
@@ -1 +1 @@
1
- 156,-1.1483332872390748
 
1
+ 75,no_eval
special_tokens_map.json CHANGED
@@ -1,28 +1,27 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|object_ref_start|>",
6
- "<|object_ref_end|>",
7
- "<|box_start|>",
8
- "<|box_end|>",
9
- "<|quad_start|>",
10
- "<|quad_end|>",
11
- "<|vision_start|>",
12
- "<|vision_end|>",
13
- "<|vision_pad|>",
14
- "<|image_pad|>",
15
- "<|video_pad|>"
16
  ],
 
 
 
 
 
 
 
17
  "eos_token": {
18
- "content": "<|im_end|>",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  },
24
- "pad_token": {
25
- "content": "<|endoftext|>",
 
26
  "lstrip": false,
27
  "normalized": false,
28
  "rstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ "▁<PRE>",
4
+ "▁<MID>",
5
+ "▁<SUF>",
6
+ "▁<EOT>"
 
 
 
 
 
 
 
 
 
7
  ],
8
+ "bos_token": {
9
+ "content": "<s>",
10
+ "lstrip": false,
11
+ "normalized": false,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
  "eos_token": {
16
+ "content": "</s>",
17
  "lstrip": false,
18
  "normalized": false,
19
  "rstrip": false,
20
  "single_word": false
21
  },
22
+ "pad_token": "</s>",
23
+ "unk_token": {
24
+ "content": "<unk>",
25
  "lstrip": false,
26
  "normalized": false,
27
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
- size 11421892
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2206073a6598893988ad5f1d96aa193d274fd6ad2a8d8c7ab8f56b29b6d4d0aa
3
+ size 3620731
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
tokenizer_config.json CHANGED
@@ -1,29 +1,84 @@
1
  {
2
- "add_prefix_space": false,
3
- "backend": "tokenizers",
4
- "bos_token": null,
5
- "clean_up_tokenization_spaces": false,
6
- "eos_token": "<|im_end|>",
7
- "errors": "replace",
8
- "extra_special_tokens": [
9
- "<|im_start|>",
10
- "<|im_end|>",
11
- "<|object_ref_start|>",
12
- "<|object_ref_end|>",
13
- "<|box_start|>",
14
- "<|box_end|>",
15
- "<|quad_start|>",
16
- "<|quad_end|>",
17
- "<|vision_start|>",
18
- "<|vision_end|>",
19
- "<|vision_pad|>",
20
- "<|image_pad|>",
21
- "<|video_pad|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  ],
23
- "is_local": true,
24
- "model_max_length": 131072,
25
- "pad_token": "<|endoftext|>",
26
- "split_special_tokens": false,
27
- "tokenizer_class": "Qwen2Tokenizer",
28
- "unk_token": null
 
 
 
 
 
 
 
 
 
 
29
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32007": {
30
+ "content": "▁<PRE>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32008": {
38
+ "content": "▁<SUF>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32009": {
46
+ "content": "▁<MID>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "32010": {
54
+ "content": "▁<EOT>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ }
61
+ },
62
+ "additional_special_tokens": [
63
+ "▁<PRE>",
64
+ "▁<MID>",
65
+ "▁<SUF>",
66
+ "▁<EOT>"
67
  ],
68
+ "bos_token": "<s>",
69
+ "clean_up_tokenization_spaces": false,
70
+ "eos_token": "</s>",
71
+ "eot_token": "▁<EOT>",
72
+ "extra_special_tokens": {},
73
+ "fill_token": "<FILL_ME>",
74
+ "legacy": null,
75
+ "middle_token": "▁<MID>",
76
+ "model_max_length": 1000000000000000019884624838656,
77
+ "pad_token": "</s>",
78
+ "prefix_token": "▁<PRE>",
79
+ "sp_model_kwargs": {},
80
+ "suffix_token": "▁<SUF>",
81
+ "tokenizer_class": "CodeLlamaTokenizer",
82
+ "unk_token": "<unk>",
83
+ "use_default_system_prompt": false
84
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:906bc07f18d85f3fdbe47d01e60bbe6f967852d19caecc88d502ce07c5e4aa78
3
- size 7185
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb0e24b2a66d930ca7964cb444586aae8c29ce9e3b9f7dbedde1321db29e772e
3
+ size 8081