williamplacroix commited on
Commit
4e06c79
·
verified ·
1 Parent(s): 554c103

Finished finetuning grade 6

Browse files
README.md CHANGED
@@ -16,11 +16,12 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/s2qn3a6p)
17
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/qz6551rc)
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/lm55kzij)
 
19
  # text-simplification
20
 
21
  This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) on an unknown dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.5203
24
 
25
  ## Model description
26
 
@@ -51,8 +52,10 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
- | 0.5464 | 1.0 | 775 | 0.5197 |
55
- | 0.5402 | 2.0 | 1550 | 0.5203 |
 
 
56
 
57
 
58
  ### Framework versions
 
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/s2qn3a6p)
17
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/qz6551rc)
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/lm55kzij)
19
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/uds/Graded%20text%20simplification%20training/runs/nifhcrxt)
20
  # text-simplification
21
 
22
  This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.4966
25
 
26
  ## Model description
27
 
 
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
+ | 0.5345 | 1.0 | 1429 | 0.4962 |
56
+ | 0.5295 | 2.0 | 2858 | 0.4955 |
57
+ | 0.526 | 3.0 | 4287 | 0.4938 |
58
+ | 0.5234 | 4.0 | 5716 | 0.4966 |
59
 
60
 
61
  ### Framework versions
gpt2-grade-6-finetuned/adapter_config.json CHANGED
@@ -3,6 +3,8 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "openai-community/gpt2",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,6 +13,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
 
14
  "lora_dropout": 0.01,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -20,10 +23,10 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "c_proj",
24
- "c_attn",
25
  "lm_head",
26
- "c_fc"
 
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "openai-community/gpt2",
5
  "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
 
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
+ "lora_bias": false,
17
  "lora_dropout": 0.01,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
 
26
  "lm_head",
27
+ "c_proj",
28
+ "c_fc",
29
+ "c_attn"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
gpt2-grade-6-finetuned/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db4dfb1d006ccf2ef00c486f335f312da00050a6ef6d277ee3350dc45677d7fb
3
- size 83581271
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b580abb3245554e7cf41c13eaa4b9a2514944850fab04c032209dd4033e8a129
3
+ size 160776023
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6541f06e89dc26ef2d9baa29792b43a2ddc3dbe3b5ff119019273887c9d4332c
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58257b67a6f74d22ae4b935e2e85a8c390512a3eada62376979f705295c8fc9d
3
  size 5496