aaronatfar commited on
Commit
b39de99
·
verified ·
1 Parent(s): 0db79be

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -13,31 +13,31 @@
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
- "lora_alpha": 1024,
17
  "lora_bias": false,
18
  "lora_dropout": 0.0,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
- "r": 512,
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
 
27
  "model.layers.1.self_attn.v_proj",
28
- "model.layers.0.self_attn.o_proj",
29
- "model.layers.0.self_attn.k_proj",
30
- "model.layers.0.self_attn.v_proj",
31
- "model.layers.1.mlp.down_proj",
32
  "model.layers.1.self_attn.q_proj",
33
- "model.layers.1.mlp.gate_proj",
34
  "model.layers.1.mlp.up_proj",
35
- "model.layers.0.mlp.gate_proj",
 
 
 
36
  "model.layers.1.self_attn.o_proj",
37
  "model.layers.0.self_attn.q_proj",
38
- "model.layers.0.mlp.up_proj",
39
- "model.layers.1.self_attn.k_proj",
40
- "model.layers.0.mlp.down_proj"
41
  ],
42
  "task_type": "CAUSAL_LM",
43
  "trainable_token_indices": null,
 
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
+ "lora_alpha": 32,
17
  "lora_bias": false,
18
  "lora_dropout": 0.0,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
+ "r": 16,
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
+ "model.layers.0.mlp.down_proj",
28
  "model.layers.1.self_attn.v_proj",
 
 
 
 
29
  "model.layers.1.self_attn.q_proj",
30
+ "model.layers.0.mlp.up_proj",
31
  "model.layers.1.mlp.up_proj",
32
+ "model.layers.1.self_attn.k_proj",
33
+ "model.layers.1.mlp.gate_proj",
34
+ "model.layers.0.self_attn.o_proj",
35
+ "model.layers.1.mlp.down_proj",
36
  "model.layers.1.self_attn.o_proj",
37
  "model.layers.0.self_attn.q_proj",
38
+ "model.layers.0.mlp.gate_proj",
39
+ "model.layers.0.self_attn.k_proj",
40
+ "model.layers.0.self_attn.v_proj"
41
  ],
42
  "task_type": "CAUSAL_LM",
43
  "trainable_token_indices": null,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2324fd01baa069b213e40b09329392972609bcd3e155fcdb9a3577faf13b65e
3
- size 72362139
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c407ebffc2778b2c34cdc35c99cf83f8a58cf658964cf177f3bf95271fe0881
3
+ size 2271259
special_tokens_map.json CHANGED
@@ -13,13 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
  "unk_token": {
24
  "content": "<unk>",
25
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<unk>",
 
 
 
 
 
 
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -34,7 +34,7 @@
34
  "extra_special_tokens": {},
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
- "pad_token": "</s>",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
 
34
  "extra_special_tokens": {},
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "<unk>",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",