Logistic12 commited on
Commit
b513274
·
verified ·
1 Parent(s): 64b8e4b

workmate-pretrain

Browse files
README.md CHANGED
@@ -1,9 +1,9 @@
1
  ---
 
2
  license: gemma
3
  base_model: google/gemma-2b
4
  tags:
5
  - generated_from_trainer
6
- library_name: peft
7
  model-index:
8
  - name: lora_optput
9
  results: []
@@ -34,17 +34,17 @@ More information needed
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0002
37
- - train_batch_size: 4
38
- - eval_batch_size: 8
39
  - seed: 42
40
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
  - num_epochs: 3
43
 
44
  ### Framework versions
45
 
46
- - PEFT 0.10.0
47
- - Transformers 4.40.0
48
- - Pytorch 2.2.2
49
- - Datasets 2.18.0
50
- - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: peft
3
  license: gemma
4
  base_model: google/gemma-2b
5
  tags:
6
  - generated_from_trainer
 
7
  model-index:
8
  - name: lora_optput
9
  results: []
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0002
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 16
39
  - seed: 42
40
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
  - lr_scheduler_type: linear
42
  - num_epochs: 3
43
 
44
  ### Framework versions
45
 
46
+ - PEFT 0.14.0
47
+ - Transformers 4.47.0
48
+ - Pytorch 2.5.1+cu121
49
+ - Datasets 3.3.1
50
+ - Tokenizers 0.21.0
adapter_config.json CHANGED
@@ -3,6 +3,8 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "google/gemma-2b",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,6 +13,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 16,
 
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -23,7 +26,7 @@
23
  "q_proj",
24
  "v_proj"
25
  ],
26
- "task_type": "CASUSL_LM",
27
  "use_dora": false,
28
  "use_rslora": false
29
  }
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "google/gemma-2b",
5
  "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
 
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 16,
16
+ "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
 
26
  "q_proj",
27
  "v_proj"
28
  ],
29
+ "task_type": "CAUSAL_LM",
30
  "use_dora": false,
31
  "use_rslora": false
32
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be175e019b4eb86e0bb9856d7c95e8357179df757175e83de0d5b2a447e5b32e
3
  size 3695848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25745ee49d1a33572183beb853dc7c134155c206df28c5c95383830f9b1365e3
3
  size 3695848
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b3892a11c5d0a47eadf12115d7f8d66a43b3794edccc4dd9b9af0428e8c66b1
3
- size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d74ebef0249a792f06f792ff1851821ca3aea4f39fe455bbed250acae5f9978
3
+ size 5304