Louis GERARD commited on
Commit
a85c59c
·
verified ·
1 Parent(s): 9e3a1ab

Training in progress, epoch 1

Browse files
README.md CHANGED
@@ -4,8 +4,8 @@ library_name: transformers
4
  model_name: gemma-text-to-sql
5
  tags:
6
  - generated_from_trainer
7
- - sft
8
  - trl
 
9
  licence: license
10
  ---
11
 
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/alternis-universit-de-gen-ve/datamixture-gemma/runs/ipghx4kn)
31
 
32
 
33
  This model was trained with SFT.
@@ -35,7 +35,7 @@ This model was trained with SFT.
35
  ### Framework versions
36
 
37
  - TRL: 0.19.1
38
- - Transformers: 4.53.3
39
  - Pytorch: 2.7.1
40
  - Datasets: 4.0.0
41
  - Tokenizers: 0.21.2
 
4
  model_name: gemma-text-to-sql
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
+ - sft
9
  licence: license
10
  ---
11
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/alternis-universit-de-gen-ve/gemma-text-to-sql/runs/qgksc4x6)
31
 
32
 
33
  This model was trained with SFT.
 
35
  ### Framework versions
36
 
37
  - TRL: 0.19.1
38
+ - Transformers: 4.54.0
39
  - Pytorch: 2.7.1
40
  - Datasets: 4.0.0
41
  - Tokenizers: 0.21.2
adapter_config.json CHANGED
@@ -18,26 +18,23 @@
18
  "lora_dropout": 0.05,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
- "modules_to_save": [
22
- "lm_head",
23
- "embed_tokens"
24
- ],
25
  "peft_type": "LORA",
26
  "qalora_group_size": 16,
27
  "r": 16,
28
  "rank_pattern": {},
29
  "revision": null,
30
  "target_modules": [
31
- "v_proj",
32
  "up_proj",
 
33
  "fc1",
 
34
  "k_proj",
35
  "gate_proj",
36
- "q_proj",
37
- "fc2",
38
- "out_proj",
39
- "down_proj",
40
- "o_proj"
41
  ],
42
  "task_type": "CAUSAL_LM",
43
  "trainable_token_indices": null,
 
18
  "lora_dropout": 0.05,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
+ "modules_to_save": [],
 
 
 
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
  "r": 16,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "fc2",
29
  "up_proj",
30
+ "down_proj",
31
  "fc1",
32
+ "out_proj",
33
  "k_proj",
34
  "gate_proj",
35
+ "v_proj",
36
+ "o_proj",
37
+ "q_proj"
 
 
38
  ],
39
  "task_type": "CAUSAL_LM",
40
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b33ac2e3e39b56ec433472abf2c6b46f009ed5a722d59731326df211e6f3d19
3
- size 2839126480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8d84ba828f8f725e83a56bde0f0c83a887871b97aa668eac9746e9fff9b327
3
+ size 154116312
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00dbb7ed35fc501014606f48a80bfa9cb279c301bf79a822ff860ae4d5831b77
3
- size 6161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15f5869ef0e8dd0cd9445975cd45cf2640bdbaff3a8bc1e02b94bd43a2c7c25c
3
+ size 6225