rayymaxx commited on
Commit
1205a20
·
verified ·
1 Parent(s): 403475f

Training in progress, epoch 1

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. adapter_config.json +5 -5
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -28,7 +28,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/rayymondodhiambo-direct-ed/huggingface/runs/5f7565my)
32
 
33
 
34
  This model was trained with SFT.
 
28
 
29
  ## Training procedure
30
 
31
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/rayymondodhiambo-direct-ed/huggingface/runs/41vprhri)
32
 
33
 
34
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -25,13 +25,13 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "q_proj",
29
- "down_proj",
30
- "gate_proj",
31
  "v_proj",
32
- "o_proj",
 
 
 
33
  "k_proj",
34
- "up_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
 
 
28
  "v_proj",
29
+ "gate_proj",
30
+ "down_proj",
31
+ "up_proj",
32
+ "q_proj",
33
  "k_proj",
34
+ "o_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cc03a608251f6d0ffcc835447e7d664184f87ff7e9c1cd4c3345ace61215886
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6c56e57bf5d0c060a87493716036c9007fec8de9f45d50a8c2d20d8b03be7ca
3
  size 83945296