Toadoum commited on
Commit
fc2745f
·
verified ·
1 Parent(s): f48541c

Toadoum/nllb-200-yoruba-english

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: cc-by-nc-4.0
4
+ base_model: facebook/nllb-200-3.3B
5
+ tags:
6
+ - base_model:adapter:facebook/nllb-200-3.3B
7
+ - lora
8
+ - transformers
9
+ metrics:
10
+ - bleu
11
+ model-index:
12
+ - name: eng_yor_unidirection
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # eng_yor_unidirection
20
+
21
+ This model is a fine-tuned version of [facebook/nllb-200-3.3B](https://huggingface.co/facebook/nllb-200-3.3B) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.6342
24
+ - Bleu: 17.1195
25
+ - Gen Len: 46.6583
26
+
27
+ ## Model description
28
+
29
+ More information needed
30
+
31
+ ## Intended uses & limitations
32
+
33
+ More information needed
34
+
35
+ ## Training and evaluation data
36
+
37
+ More information needed
38
+
39
+ ## Training procedure
40
+
41
+ ### Training hyperparameters
42
+
43
+ The following hyperparameters were used during training:
44
+ - learning_rate: 1e-05
45
+ - train_batch_size: 1
46
+ - eval_batch_size: 1
47
+ - seed: 42
48
+ - gradient_accumulation_steps: 8
49
+ - total_train_batch_size: 8
50
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 5
53
+ - mixed_precision_training: Native AMP
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
58
+ |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|
59
+ | 5.3266 | 1.0 | 3441 | 1.7253 | 15.867 | 46.9732 |
60
+ | 2.0388 | 2.0 | 6882 | 1.6761 | 16.4011 | 47.0961 |
61
+ | 3.3007 | 3.0 | 10323 | 1.6516 | 16.6914 | 46.9297 |
62
+ | 2.1298 | 4.0 | 13764 | 1.6385 | 17.0557 | 46.9065 |
63
+ | 2.8537 | 5.0 | 17205 | 1.6342 | 17.1195 | 46.6583 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - PEFT 0.18.1
69
+ - Transformers 5.3.0
70
+ - Pytorch 2.10.0+cu128
71
+ - Datasets 4.6.1
72
+ - Tokenizers 0.22.2
adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "facebook/nllb-200-3.3B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.1,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 8,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "q_proj",
33
+ "v_proj"
34
+ ],
35
+ "target_parameters": null,
36
+ "task_type": "SEQ_2_SEQ_LM",
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_qalora": false,
40
+ "use_rslora": false
41
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53995575ece4918e3f9be6a3003bd1ceed693c8de94bbe759ae53eb6ce60fde2
3
+ size 18915136
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9c1a8f115baac9f292390dedd60d039d238a7eb4c10b3f962b2c87062d7e63
3
+ size 32240234
tokenizer_config.json ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "extra_special_tokens": [
7
+ "ace_Arab",
8
+ "ace_Latn",
9
+ "acm_Arab",
10
+ "acq_Arab",
11
+ "aeb_Arab",
12
+ "afr_Latn",
13
+ "ajp_Arab",
14
+ "aka_Latn",
15
+ "amh_Ethi",
16
+ "apc_Arab",
17
+ "arb_Arab",
18
+ "ars_Arab",
19
+ "ary_Arab",
20
+ "arz_Arab",
21
+ "asm_Beng",
22
+ "ast_Latn",
23
+ "awa_Deva",
24
+ "ayr_Latn",
25
+ "azb_Arab",
26
+ "azj_Latn",
27
+ "bak_Cyrl",
28
+ "bam_Latn",
29
+ "ban_Latn",
30
+ "bel_Cyrl",
31
+ "bem_Latn",
32
+ "ben_Beng",
33
+ "bho_Deva",
34
+ "bjn_Arab",
35
+ "bjn_Latn",
36
+ "bod_Tibt",
37
+ "bos_Latn",
38
+ "bug_Latn",
39
+ "bul_Cyrl",
40
+ "cat_Latn",
41
+ "ceb_Latn",
42
+ "ces_Latn",
43
+ "cjk_Latn",
44
+ "ckb_Arab",
45
+ "crh_Latn",
46
+ "cym_Latn",
47
+ "dan_Latn",
48
+ "deu_Latn",
49
+ "dik_Latn",
50
+ "dyu_Latn",
51
+ "dzo_Tibt",
52
+ "ell_Grek",
53
+ "eng_Latn",
54
+ "epo_Latn",
55
+ "est_Latn",
56
+ "eus_Latn",
57
+ "ewe_Latn",
58
+ "fao_Latn",
59
+ "pes_Arab",
60
+ "fij_Latn",
61
+ "fin_Latn",
62
+ "fon_Latn",
63
+ "fra_Latn",
64
+ "fur_Latn",
65
+ "fuv_Latn",
66
+ "gla_Latn",
67
+ "gle_Latn",
68
+ "glg_Latn",
69
+ "grn_Latn",
70
+ "guj_Gujr",
71
+ "hat_Latn",
72
+ "hau_Latn",
73
+ "heb_Hebr",
74
+ "hin_Deva",
75
+ "hne_Deva",
76
+ "hrv_Latn",
77
+ "hun_Latn",
78
+ "hye_Armn",
79
+ "ibo_Latn",
80
+ "ilo_Latn",
81
+ "ind_Latn",
82
+ "isl_Latn",
83
+ "ita_Latn",
84
+ "jav_Latn",
85
+ "jpn_Jpan",
86
+ "kab_Latn",
87
+ "kac_Latn",
88
+ "kam_Latn",
89
+ "kan_Knda",
90
+ "kas_Arab",
91
+ "kas_Deva",
92
+ "kat_Geor",
93
+ "knc_Arab",
94
+ "knc_Latn",
95
+ "kaz_Cyrl",
96
+ "kbp_Latn",
97
+ "kea_Latn",
98
+ "khm_Khmr",
99
+ "kik_Latn",
100
+ "kin_Latn",
101
+ "kir_Cyrl",
102
+ "kmb_Latn",
103
+ "kon_Latn",
104
+ "kor_Hang",
105
+ "kmr_Latn",
106
+ "lao_Laoo",
107
+ "lvs_Latn",
108
+ "lij_Latn",
109
+ "lim_Latn",
110
+ "lin_Latn",
111
+ "lit_Latn",
112
+ "lmo_Latn",
113
+ "ltg_Latn",
114
+ "ltz_Latn",
115
+ "lua_Latn",
116
+ "lug_Latn",
117
+ "luo_Latn",
118
+ "lus_Latn",
119
+ "mag_Deva",
120
+ "mai_Deva",
121
+ "mal_Mlym",
122
+ "mar_Deva",
123
+ "min_Latn",
124
+ "mkd_Cyrl",
125
+ "plt_Latn",
126
+ "mlt_Latn",
127
+ "mni_Beng",
128
+ "khk_Cyrl",
129
+ "mos_Latn",
130
+ "mri_Latn",
131
+ "zsm_Latn",
132
+ "mya_Mymr",
133
+ "nld_Latn",
134
+ "nno_Latn",
135
+ "nob_Latn",
136
+ "npi_Deva",
137
+ "nso_Latn",
138
+ "nus_Latn",
139
+ "nya_Latn",
140
+ "oci_Latn",
141
+ "gaz_Latn",
142
+ "ory_Orya",
143
+ "pag_Latn",
144
+ "pan_Guru",
145
+ "pap_Latn",
146
+ "pol_Latn",
147
+ "por_Latn",
148
+ "prs_Arab",
149
+ "pbt_Arab",
150
+ "quy_Latn",
151
+ "ron_Latn",
152
+ "run_Latn",
153
+ "rus_Cyrl",
154
+ "sag_Latn",
155
+ "san_Deva",
156
+ "sat_Beng",
157
+ "scn_Latn",
158
+ "shn_Mymr",
159
+ "sin_Sinh",
160
+ "slk_Latn",
161
+ "slv_Latn",
162
+ "smo_Latn",
163
+ "sna_Latn",
164
+ "snd_Arab",
165
+ "som_Latn",
166
+ "sot_Latn",
167
+ "spa_Latn",
168
+ "als_Latn",
169
+ "srd_Latn",
170
+ "srp_Cyrl",
171
+ "ssw_Latn",
172
+ "sun_Latn",
173
+ "swe_Latn",
174
+ "swh_Latn",
175
+ "szl_Latn",
176
+ "tam_Taml",
177
+ "tat_Cyrl",
178
+ "tel_Telu",
179
+ "tgk_Cyrl",
180
+ "tgl_Latn",
181
+ "tha_Thai",
182
+ "tir_Ethi",
183
+ "taq_Latn",
184
+ "taq_Tfng",
185
+ "tpi_Latn",
186
+ "tsn_Latn",
187
+ "tso_Latn",
188
+ "tuk_Latn",
189
+ "tum_Latn",
190
+ "tur_Latn",
191
+ "twi_Latn",
192
+ "tzm_Tfng",
193
+ "uig_Arab",
194
+ "ukr_Cyrl",
195
+ "umb_Latn",
196
+ "urd_Arab",
197
+ "uzn_Latn",
198
+ "vec_Latn",
199
+ "vie_Latn",
200
+ "war_Latn",
201
+ "wol_Latn",
202
+ "xho_Latn",
203
+ "ydd_Hebr",
204
+ "yor_Latn",
205
+ "yue_Hant",
206
+ "zho_Hans",
207
+ "zho_Hant",
208
+ "zul_Latn"
209
+ ],
210
+ "is_local": false,
211
+ "legacy_behaviour": false,
212
+ "mask_token": "<mask>",
213
+ "model_max_length": 1024,
214
+ "pad_token": "<pad>",
215
+ "sep_token": "</s>",
216
+ "sp_model_kwargs": {},
217
+ "src_lang": "eng_Latn",
218
+ "tgt_lang": "yor_Latn",
219
+ "tokenizer_class": "NllbTokenizer",
220
+ "unk_token": "<unk>"
221
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a1e7d599a03d7610cae916bf9c9f36720059461b1235f23cd941ae258eaf4c
3
+ size 5329