yuanyun commited on
Commit
f62404f
·
1 Parent(s): 147733b

feat: add Mistral-lora

Browse files
Files changed (2) hide show
  1. adapter_config.json +4 -4
  2. adapter_model.bin +2 -2
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "./quantized/test",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
@@ -8,14 +8,14 @@
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
  "lora_alpha": 16.0,
11
- "lora_dropout": 0.05,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
- "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "o_proj",
18
  "q_proj",
 
19
  "gate_proj",
20
  "up_proj",
21
  "down_proj",
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "../data/Mistral-7B-v0.1",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
  "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
+ "r": 16,
15
  "revision": null,
16
  "target_modules": [
 
17
  "q_proj",
18
+ "o_proj",
19
  "gate_proj",
20
  "up_proj",
21
  "down_proj",
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab2ef5577c310f5e8e45e79985ec21ac751ba0fe888e7c445428809c409524de
3
- size 279957837
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6585ab6e73bab173e6f36d89a2569c5e44aa5f3cdbfb5de0b37d5b2ba5b7d2d8
3
+ size 84047501