Training in progress, step 1684
Browse files- README.md +1 -1
- adapter_config.json +4 -4
- adapter_model.safetensors +1 -1
- tokenizer_config.json +1 -1
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -28,7 +28,7 @@ print(output["generated_text"])
|
|
| 28 |
|
| 29 |
## Training procedure
|
| 30 |
|
| 31 |
-
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/themachinefan/gemma-innoc-finetune/runs/
|
| 32 |
|
| 33 |
|
| 34 |
This model was trained with SFT.
|
|
|
|
| 28 |
|
| 29 |
## Training procedure
|
| 30 |
|
| 31 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/themachinefan/gemma-innoc-finetune/runs/u3ctzb53)
|
| 32 |
|
| 33 |
|
| 34 |
This model was trained with SFT.
|
adapter_config.json
CHANGED
|
@@ -33,13 +33,13 @@
|
|
| 33 |
"rank_pattern": {},
|
| 34 |
"revision": null,
|
| 35 |
"target_modules": [
|
| 36 |
-
"
|
| 37 |
"v_proj",
|
| 38 |
"gate_proj",
|
| 39 |
-
"
|
| 40 |
-
"q_proj",
|
| 41 |
"o_proj",
|
| 42 |
-
"
|
|
|
|
| 43 |
],
|
| 44 |
"target_parameters": null,
|
| 45 |
"task_type": "CAUSAL_LM",
|
|
|
|
| 33 |
"rank_pattern": {},
|
| 34 |
"revision": null,
|
| 35 |
"target_modules": [
|
| 36 |
+
"q_proj",
|
| 37 |
"v_proj",
|
| 38 |
"gate_proj",
|
| 39 |
+
"down_proj",
|
|
|
|
| 40 |
"o_proj",
|
| 41 |
+
"k_proj",
|
| 42 |
+
"up_proj"
|
| 43 |
],
|
| 44 |
"target_parameters": null,
|
| 45 |
"task_type": "CAUSAL_LM",
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 166182480
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1beb34b6d7632dbe493fe6582fea035c85ff0ec08843a1e133b4610fb1e65033
|
| 3 |
size 166182480
|
tokenizer_config.json
CHANGED
|
@@ -2005,7 +2005,7 @@
|
|
| 2005 |
"extra_special_tokens": {},
|
| 2006 |
"model_max_length": 8192,
|
| 2007 |
"pad_token": "<pad>",
|
| 2008 |
-
"padding_side": "
|
| 2009 |
"sp_model_kwargs": {},
|
| 2010 |
"spaces_between_special_tokens": false,
|
| 2011 |
"tokenizer_class": "GemmaTokenizer",
|
|
|
|
| 2005 |
"extra_special_tokens": {},
|
| 2006 |
"model_max_length": 8192,
|
| 2007 |
"pad_token": "<pad>",
|
| 2008 |
+
"padding_side": "right",
|
| 2009 |
"sp_model_kwargs": {},
|
| 2010 |
"spaces_between_special_tokens": false,
|
| 2011 |
"tokenizer_class": "GemmaTokenizer",
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 6289
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:584bef55c65df354224064656aa57e99320a45f5eea1ea0b49ef1773aef51dd8
|
| 3 |
size 6289
|