Update README.md
Browse files
README.md
CHANGED
|
@@ -44,134 +44,100 @@ Sequence to Sequence Model for Treansliterationg Romanised Malayalam (Manglish)
|
|
| 44 |
- **Repository:** https://github.com/VRCLC-DUK/ml-en-transliteration
|
| 45 |
- **Paper:** https://arxiv.org/abs/2412.09957
|
| 46 |
- **Demo:** https://huggingface.co/spaces/vrclc/en-ml-transliteration
|
|
|
|
| 47 |
|
| 48 |
## How to Get Started with the Model
|
| 49 |
|
| 50 |
-
The model needs to have an
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
[More Information Needed]
|
| 144 |
-
|
| 145 |
-
#### Software
|
| 146 |
-
|
| 147 |
-
[More Information Needed]
|
| 148 |
-
|
| 149 |
-
## Citation [optional]
|
| 150 |
-
|
| 151 |
-
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 152 |
-
|
| 153 |
-
**BibTeX:**
|
| 154 |
-
|
| 155 |
-
[More Information Needed]
|
| 156 |
-
|
| 157 |
-
**APA:**
|
| 158 |
-
|
| 159 |
-
[More Information Needed]
|
| 160 |
-
|
| 161 |
-
## Glossary [optional]
|
| 162 |
-
|
| 163 |
-
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 164 |
-
|
| 165 |
-
[More Information Needed]
|
| 166 |
-
|
| 167 |
-
## More Information [optional]
|
| 168 |
-
|
| 169 |
-
[More Information Needed]
|
| 170 |
-
|
| 171 |
-
## Model Card Authors [optional]
|
| 172 |
-
|
| 173 |
-
[More Information Needed]
|
| 174 |
-
|
| 175 |
-
## Model Card Contact
|
| 176 |
-
|
| 177 |
-
[More Information Needed]
|
|
|
|
| 44 |
- **Repository:** https://github.com/VRCLC-DUK/ml-en-transliteration
|
| 45 |
- **Paper:** https://arxiv.org/abs/2412.09957
|
| 46 |
- **Demo:** https://huggingface.co/spaces/vrclc/en-ml-transliteration
|
| 47 |
+
- Developed as a shared task submission to [INDONLP Workshop](https://indonlp-workshop.github.io/IndoNLP-Workshop/) at [COLING 2025](https://coling2025.org//), Abu Dhabi.
|
| 48 |
|
| 49 |
## How to Get Started with the Model
|
| 50 |
|
| 51 |
+
The model needs to have an user defined tokenizers for source and target scripts. The model is trained on words. If your use case involves transliterating full sentences, split the sentences into words before passing to the model.
|
| 52 |
+
|
| 53 |
+
### Load Dependencies
|
| 54 |
+
```
|
| 55 |
+
import keras
|
| 56 |
+
import huggingface_hub
|
| 57 |
+
import tensorflow as tf
|
| 58 |
+
import numpy as np
|
| 59 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
| 60 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
| 61 |
+
from huggingface_hub import from_pretrained_keras
|
| 62 |
+
import re
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
## Load Model
|
| 66 |
+
```
|
| 67 |
+
model = from_pretrained_keras("vrclc/transliteration")
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
### Define Tokens:
|
| 71 |
+
|
| 72 |
+
```
|
| 73 |
+
source_tokens = list('abcdefghijklmnopqrstuvwxyz ')
|
| 74 |
+
source_tokenizer = Tokenizer(char_level=True, filters='')
|
| 75 |
+
source_tokenizer.fit_on_texts(source_tokens)
|
| 76 |
+
|
| 77 |
+
target_tokens = [
|
| 78 |
+
# Independent vowels
|
| 79 |
+
'അ', 'ആ', 'ഇ', 'ഈ', 'ഉ', 'ഊ', 'ഋ', 'ൠ', 'ഌ', 'ൡ', 'എ', 'ഏ', 'ഐ', 'ഒ', 'ഓ', 'ഔ',
|
| 80 |
+
# Consonants
|
| 81 |
+
'ക', 'ഖ', 'ഗ', 'ഘ', 'ങ', 'ച', 'ഛ', 'ജ', 'ഝ', 'ഞ',
|
| 82 |
+
'ട', 'ഠ', 'ഡ', 'ഢ', 'ണ', 'ത', 'ഥ', 'ദ', 'ധ', 'ന',
|
| 83 |
+
'പ', 'ഫ', 'ബ', 'ഭ', 'മ', 'യ', 'ര', 'ല', 'വ', 'ശ',
|
| 84 |
+
'ഷ', 'സ', 'ഹ', 'ള', 'ഴ', 'റ',
|
| 85 |
+
# Chillu letters
|
| 86 |
+
'ൺ', 'ൻ', 'ർ', 'ൽ', 'ൾ',
|
| 87 |
+
# Additional characters
|
| 88 |
+
'ം', 'ഃ', '്',
|
| 89 |
+
# Vowel modifiers / Signs
|
| 90 |
+
'ാ', 'ി', 'ീ', 'ു', 'ൂ', 'ൃ', 'ൄ', 'െ', 'േ', 'ൈ', 'ൊ', 'ോ', 'ൌ', 'ൗ', ' '
|
| 91 |
+
]
|
| 92 |
+
target_tokenizer = Tokenizer(char_level=True, filters='')
|
| 93 |
+
target_tokenizer.fit_on_texts(target_tokens)
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
### Wrapper script to split input sentences to words before passing to the model
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
def transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length):
|
| 100 |
+
"""
|
| 101 |
+
Transliterates input text in roman script, retains all other characters (including punctuation, spaces, etc.)
|
| 102 |
+
"""
|
| 103 |
+
# Regular expression to split the text into tokens and non-tokens
|
| 104 |
+
tokens_and_non_tokens = re.findall(r"([a-zA-Z]+)|([^a-zA-Z]+)", input_text)
|
| 105 |
+
|
| 106 |
+
transliterated_text = ""
|
| 107 |
+
for token_or_non_token in tokens_and_non_tokens:
|
| 108 |
+
token = token_or_non_token[0]
|
| 109 |
+
non_token = token_or_non_token[1]
|
| 110 |
+
|
| 111 |
+
if token:
|
| 112 |
+
input_sequence = source_tokenizer.texts_to_sequences([token])[0]
|
| 113 |
+
input_sequence_padded = pad_sequences([input_sequence], maxlen=max_seq_length, padding='post')
|
| 114 |
+
predicted_sequence = model.predict(input_sequence_padded)
|
| 115 |
+
predicted_indices = np.argmax(predicted_sequence, axis=-1)[0]
|
| 116 |
+
transliterated_word = ''.join([target_tokenizer.index_word[idx] for idx in predicted_indices if idx != 0])
|
| 117 |
+
transliterated_text += transliterated_word
|
| 118 |
+
elif non_token:
|
| 119 |
+
transliterated_text += non_token
|
| 120 |
+
|
| 121 |
+
return transliterated_text
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
### Usage
|
| 125 |
+
```
|
| 126 |
+
input text = "ente veedu"
|
| 127 |
+
transliterated_text = transliterate_with_split_tokens(input_text, model, source_tokenizer, target_tokenizer, max_seq_length)
|
| 128 |
+
|
| 129 |
+
print(transliterated_text)
|
| 130 |
+
```
|
| 131 |
+
[More Information Needed]
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
## Citation
|
| 135 |
+
|
| 136 |
+
```
|
| 137 |
+
@article{baiju2024romanized,
|
| 138 |
+
title={Romanized to Native Malayalam Script Transliteration Using an Encoder-Decoder Framework},
|
| 139 |
+
author={Baiju, Bajiyo and Pillai, Leena G and Manohar, Kavya and Sherly, Elizabeth},
|
| 140 |
+
journal={arXiv preprint arXiv:2412.09957},
|
| 141 |
+
year={2024}
|
| 142 |
+
}
|
| 143 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|