Hailay Kidu Teklehaymanot
commited on
Commit
·
666a104
1
Parent(s):
47c8e6a
Initial commit of MachineT_TigEng model
Browse files- .gitattributes +2 -0
- README.md +71 -0
- config.json +55 -0
- generation_config.json +16 -0
- source.spm +3 -0
- special_tokens_map.json +5 -0
- target.spm +3 -0
- tokenizer_config.json +39 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
source.spm filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
target.spm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Machine Translation Model: English ↔ Tigrinya
|
| 2 |
+
|
| 3 |
+
This model is a fine-tuned machine translation model trained to translate between English and Tigrinya. It was trained on the parallel corpus of English and Tigrinya sentences.
|
| 4 |
+
|
| 5 |
+
## Model Overview
|
| 6 |
+
|
| 7 |
+
- **Model Type**: MarianMT (Multilingual Transformer Model)
|
| 8 |
+
- **Languages**: English ↔ Tigrinya
|
| 9 |
+
- **Model Architecture**: MarianMT, fine-tuned for English ↔ Tigrinya translation
|
| 10 |
+
- **Training Framework**: Hugging Face Transformers, PyTorch
|
| 11 |
+
|
| 12 |
+
## Training Details
|
| 13 |
+
|
| 14 |
+
- **Training Dataset**: NLLB Parallel Corpus (English ↔ Tigrinya)
|
| 15 |
+
- **Training Epochs**: 3
|
| 16 |
+
- **Batch Size**: 8
|
| 17 |
+
- **Max Length**: 128 tokens
|
| 18 |
+
- **Learning Rate**: Starts from `1.44e-07` and decays during training
|
| 19 |
+
- **Training Loss**:
|
| 20 |
+
- Final training loss: 0.4756
|
| 21 |
+
- Per-epoch loss progress:
|
| 22 |
+
- Epoch 1: 0.443
|
| 23 |
+
- Epoch 2: 0.4077
|
| 24 |
+
- Epoch 3: 0.4379
|
| 25 |
+
|
| 26 |
+
- **Gradient Norms**:
|
| 27 |
+
- Epoch 1: 1.14
|
| 28 |
+
- Epoch 2: 1.11
|
| 29 |
+
- Epoch 3: 1.06
|
| 30 |
+
|
| 31 |
+
- **Training Time**: 43376.7 seconds (~12 hours)
|
| 32 |
+
- **Training Speed**:
|
| 33 |
+
- Training samples per second: 96.7
|
| 34 |
+
- Training steps per second: 12.08
|
| 35 |
+
|
| 36 |
+
## Model Usage
|
| 37 |
+
|
| 38 |
+
This model can be used for translating English sentences to Tigrinya and vice versa.
|
| 39 |
+
|
| 40 |
+
### Example Usage (Python)
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
from transformers import MarianMTModel, MarianTokenizer
|
| 44 |
+
|
| 45 |
+
# Load the model and tokenizer
|
| 46 |
+
model_name = "Hailay/MachineT_TigEng"
|
| 47 |
+
model = MarianMTModel.from_pretrained(model_name)
|
| 48 |
+
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
| 49 |
+
|
| 50 |
+
# Translate an English sentence to Tigrinya
|
| 51 |
+
english_text = "We must obey the Lord and leave them alone"
|
| 52 |
+
encoded_input = tokenizer(english_text, return_tensors="pt", padding=True, truncation=True)
|
| 53 |
+
translated = model.generate(**encoded_input)
|
| 54 |
+
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
| 55 |
+
|
| 56 |
+
print(f"Translated text: {translated_text}")
|
| 57 |
+
|
| 58 |
+
## Model Card
|
| 59 |
+
|
| 60 |
+
This model is trained to handle general English to Tigrinya translation tasks. It is suitable for a wide range of text, but might not perform well on domain-specific language or specialized terminology unless fine-tuned further.
|
| 61 |
+
|
| 62 |
+
##Model Architecture
|
| 63 |
+
The model is based on the MarianMT architecture, a transformer model designed for multilingual machine translation. It has been fine-tuned on English ↔ Tigrinya data.
|
| 64 |
+
##Acknowledgements
|
| 65 |
+
Corpus Name: NLLB
|
| 66 |
+
|
| 67 |
+
Package: NLLB.am-en in Moses format
|
| 68 |
+
|
| 69 |
+
Website: NLLB Corpus
|
| 70 |
+
|
| 71 |
+
If you use this model or the NLLB corpus in your work, please cite it as follows:
|
config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 3,
|
| 3 |
+
"activation_dropout": 0.0,
|
| 4 |
+
"activation_function": "swish",
|
| 5 |
+
"add_bias_logits": false,
|
| 6 |
+
"add_final_layer_norm": false,
|
| 7 |
+
"architectures": [
|
| 8 |
+
"MarianMTModel"
|
| 9 |
+
],
|
| 10 |
+
"attention_dropout": 0.0,
|
| 11 |
+
"bos_token_id": 0,
|
| 12 |
+
"classif_dropout": 0.0,
|
| 13 |
+
"classifier_dropout": 0.0,
|
| 14 |
+
"d_model": 512,
|
| 15 |
+
"decoder_attention_heads": 8,
|
| 16 |
+
"decoder_ffn_dim": 2048,
|
| 17 |
+
"decoder_layerdrop": 0.0,
|
| 18 |
+
"decoder_layers": 6,
|
| 19 |
+
"decoder_start_token_id": 63049,
|
| 20 |
+
"decoder_vocab_size": 63050,
|
| 21 |
+
"dropout": 0.1,
|
| 22 |
+
"encoder_attention_heads": 8,
|
| 23 |
+
"encoder_ffn_dim": 2048,
|
| 24 |
+
"encoder_layerdrop": 0.0,
|
| 25 |
+
"encoder_layers": 6,
|
| 26 |
+
"eos_token_id": 0,
|
| 27 |
+
"forced_eos_token_id": 0,
|
| 28 |
+
"id2label": {
|
| 29 |
+
"0": "LABEL_0",
|
| 30 |
+
"1": "LABEL_1",
|
| 31 |
+
"2": "LABEL_2"
|
| 32 |
+
},
|
| 33 |
+
"init_std": 0.02,
|
| 34 |
+
"is_encoder_decoder": true,
|
| 35 |
+
"label2id": {
|
| 36 |
+
"LABEL_0": 0,
|
| 37 |
+
"LABEL_1": 1,
|
| 38 |
+
"LABEL_2": 2
|
| 39 |
+
},
|
| 40 |
+
"max_length": null,
|
| 41 |
+
"max_position_embeddings": 512,
|
| 42 |
+
"model_type": "marian",
|
| 43 |
+
"normalize_before": false,
|
| 44 |
+
"normalize_embedding": false,
|
| 45 |
+
"num_beams": null,
|
| 46 |
+
"num_hidden_layers": 6,
|
| 47 |
+
"pad_token_id": 63049,
|
| 48 |
+
"scale_embedding": true,
|
| 49 |
+
"share_encoder_decoder_embeddings": true,
|
| 50 |
+
"static_position_embeddings": true,
|
| 51 |
+
"torch_dtype": "float32",
|
| 52 |
+
"transformers_version": "4.51.3",
|
| 53 |
+
"use_cache": true,
|
| 54 |
+
"vocab_size": 63050
|
| 55 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bad_words_ids": [
|
| 3 |
+
[
|
| 4 |
+
63049
|
| 5 |
+
]
|
| 6 |
+
],
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"decoder_start_token_id": 63049,
|
| 9 |
+
"eos_token_id": 0,
|
| 10 |
+
"forced_eos_token_id": 0,
|
| 11 |
+
"max_length": 512,
|
| 12 |
+
"num_beams": 6,
|
| 13 |
+
"pad_token_id": 63049,
|
| 14 |
+
"renormalize_logits": true,
|
| 15 |
+
"transformers_version": "4.51.3"
|
| 16 |
+
}
|
source.spm
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06c7982b8faa707054b6d5ee3a6831ee9d91413e5508bb19f152619d4c0b74cb
|
| 3 |
+
size 972082
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"eos_token": "</s>",
|
| 3 |
+
"pad_token": "<pad>",
|
| 4 |
+
"unk_token": "<unk>"
|
| 5 |
+
}
|
target.spm
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d163fc242604fed1a55fa9932322050f94b737b59ef2d8c14e1506e3d227251
|
| 3 |
+
size 818925
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "</s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<unk>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"63049": {
|
| 20 |
+
"content": "<pad>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"clean_up_tokenization_spaces": false,
|
| 29 |
+
"eos_token": "</s>",
|
| 30 |
+
"extra_special_tokens": {},
|
| 31 |
+
"model_max_length": 512,
|
| 32 |
+
"pad_token": "<pad>",
|
| 33 |
+
"separate_vocabs": false,
|
| 34 |
+
"source_lang": "ti",
|
| 35 |
+
"sp_model_kwargs": {},
|
| 36 |
+
"target_lang": "en",
|
| 37 |
+
"tokenizer_class": "MarianTokenizer",
|
| 38 |
+
"unk_token": "<unk>"
|
| 39 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|