unknown167943 commited on
Commit
8bde28f
·
verified ·
1 Parent(s): 67cb50e

Model save

Browse files
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-135M-Instruct
3
- datasets: louisbrulenaudet/legalkit
4
  library_name: transformers
5
  model_name: judge.bro
6
  tags:
@@ -12,7 +11,7 @@ licence: license
12
 
13
  # Model Card for judge.bro
14
 
15
- This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct) on the [louisbrulenaudet/legalkit](https://huggingface.co/datasets/louisbrulenaudet/legalkit) dataset.
16
  It has been trained using [TRL](https://github.com/huggingface/trl).
17
 
18
  ## Quick start
@@ -28,7 +27,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/arnouxmartin0-universit-savoie-mont-blanc/huggingface/runs/xy85ewdh)
32
 
33
 
34
  This model was trained with SFT.
 
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-135M-Instruct
 
3
  library_name: transformers
4
  model_name: judge.bro
5
  tags:
 
11
 
12
  # Model Card for judge.bro
13
 
14
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/arnouxmartin0-universit-savoie-mont-blanc/huggingface/runs/fwz1sb7a)
31
 
32
 
33
  This model was trained with SFT.
final_checkpoint/adapter_config.json CHANGED
@@ -12,21 +12,21 @@
12
  "layers_pattern": null,
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
- "lora_alpha": 32,
16
  "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
- "r": 16,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
26
  "k_proj",
27
  "o_proj",
28
- "q_proj",
29
- "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
12
  "layers_pattern": null,
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
+ "lora_alpha": 64,
16
  "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
+ "r": 32,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "v_proj",
27
  "k_proj",
28
  "o_proj",
29
+ "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
final_checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:257f583a1fa00d811716fd810f2f41801193cf959abc0013e831d53756497ccc
3
- size 7404368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76fb2afa026ff0450c5601bdda9e27585810738a672d8c1eb0a895ca0a04148c
3
+ size 14777360
final_checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1cb2d7a037c030c43b32fc6deaab955aa2e848cf64b6d8525fb34caeae80fae4
3
  size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c4b91911aff3b9bc62c8fcdcafde276d0c74aac5fc940bd8981b85d7e39f4b2
3
  size 5688