gpol13 commited on
Commit
1bc4732
·
verified ·
1 Parent(s): a9f5dbc

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +6 -0
  2. adapter_config.json +14 -2
  3. tokenizer_config.json +4 -33
README.md CHANGED
@@ -1,6 +1,11 @@
1
  ---
2
  base_model: mistralai/Mistral-7B-v0.1
3
  library_name: peft
 
 
 
 
 
4
  ---
5
 
6
  # Model Card for Model ID
@@ -199,4 +204,5 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
199
  [More Information Needed]
200
  ### Framework versions
201
 
 
202
  - PEFT 0.12.0
 
1
  ---
2
  base_model: mistralai/Mistral-7B-v0.1
3
  library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:mistralai/Mistral-7B-v0.1
7
+ - lora
8
+ - transformers
9
  ---
10
 
11
  # Model Card for Model ID
 
204
  [More Information Needed]
205
  ### Framework versions
206
 
207
+ - PEFT 0.18.0
208
  - PEFT 0.12.0
adapter_config.json CHANGED
@@ -1,8 +1,14 @@
1
  {
 
2
  "alpha_pattern": {},
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
  "bias": "none",
 
 
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,21 +17,27 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
 
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
 
 
19
  "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
  "o_proj",
25
  "v_proj",
26
- "k_proj"
 
27
  ],
 
28
  "task_type": "CAUSAL_LM",
 
29
  "use_dora": false,
 
30
  "use_rslora": false
31
  }
 
1
  {
2
+ "alora_invocation_tokens": null,
3
  "alpha_pattern": {},
4
+ "arrow_config": null,
5
  "auto_mapping": null,
6
  "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
7
  "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
  "fan_in_fan_out": false,
13
  "inference_mode": true,
14
  "init_lora_weights": true,
 
17
  "layers_to_transform": null,
18
  "loftq_config": {},
19
  "lora_alpha": 32,
20
+ "lora_bias": false,
21
  "lora_dropout": 0.05,
22
  "megatron_config": null,
23
  "megatron_core": "megatron.core",
24
  "modules_to_save": null,
25
  "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
  "r": 16,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
 
32
  "o_proj",
33
  "v_proj",
34
+ "k_proj",
35
+ "q_proj"
36
  ],
37
+ "target_parameters": null,
38
  "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
  "use_dora": false,
41
+ "use_qalora": false,
42
  "use_rslora": false
43
  }
tokenizer_config.json CHANGED
@@ -2,47 +2,18 @@
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
  "add_prefix_space": null,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- }
30
- },
31
- "additional_special_tokens": [],
32
  "bos_token": "<s>",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
- "extra_special_tokens": {},
36
- "legacy": false,
37
- "max_length": 256,
38
  "model_max_length": 1000000000000000019884624838656,
39
  "pad_token": "</s>",
40
  "sp_model_kwargs": {},
41
  "spaces_between_special_tokens": false,
42
- "stride": 0,
43
  "tokenizer_class": "LlamaTokenizer",
44
- "truncation_side": "right",
45
- "truncation_strategy": "longest_first",
46
  "unk_token": "<unk>",
47
  "use_default_system_prompt": false
48
  }
 
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
  "add_prefix_space": null,
5
+ "additional_special_tokens": null,
6
+ "backend": "tokenizers",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "bos_token": "<s>",
8
  "clean_up_tokenization_spaces": false,
9
  "eos_token": "</s>",
10
+ "extra_special_tokens": [],
11
+ "is_local": false,
 
12
  "model_max_length": 1000000000000000019884624838656,
13
  "pad_token": "</s>",
14
  "sp_model_kwargs": {},
15
  "spaces_between_special_tokens": false,
 
16
  "tokenizer_class": "LlamaTokenizer",
 
 
17
  "unk_token": "<unk>",
18
  "use_default_system_prompt": false
19
  }