ManthanKulakarni commited on
Commit
0f5cf21
·
1 Parent(s): 2f96190

Upload model

Browse files
Files changed (2) hide show
  1. README.md +0 -17
  2. adapter_config.json +3 -3
README.md CHANGED
@@ -1,20 +1,3 @@
1
  ---
2
  library_name: peft
3
  ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: True
9
- - load_in_4bit: False
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: fp4
15
- - bnb_4bit_use_double_quant: False
16
- - bnb_4bit_compute_dtype: float32
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.4.0.dev0
 
1
  ---
2
  library_name: peft
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adapter_config.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "base_model_name_or_path": "decapoda-research/llama-7b-hf",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
6
  "init_lora_weights": true,
7
  "layers_pattern": null,
8
  "layers_to_transform": null,
9
- "lora_alpha": 16,
10
  "lora_dropout": 0.05,
11
  "modules_to_save": null,
12
  "peft_type": "LORA",
13
- "r": 8,
14
  "revision": null,
15
  "target_modules": [
16
  "q_proj",
 
1
  {
2
+ "base_model_name_or_path": "models/decapoda-research_llama-7b-hf",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
6
  "init_lora_weights": true,
7
  "layers_pattern": null,
8
  "layers_to_transform": null,
9
+ "lora_alpha": 128,
10
  "lora_dropout": 0.05,
11
  "modules_to_save": null,
12
  "peft_type": "LORA",
13
+ "r": 256,
14
  "revision": null,
15
  "target_modules": [
16
  "q_proj",