Upload 3 files
Browse files- Ewondo_NER_Dataset.ipynb +566 -0
- Ewondo_Tokenizer_Test.ipynb +339 -0
- Successful_Ewondo_Bible_Scarping_Python.ipynb +0 -0
Ewondo_NER_Dataset.ipynb
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"nbformat": 4,
|
| 3 |
+
"nbformat_minor": 0,
|
| 4 |
+
"metadata": {
|
| 5 |
+
"colab": {
|
| 6 |
+
"provenance": []
|
| 7 |
+
},
|
| 8 |
+
"kernelspec": {
|
| 9 |
+
"name": "python3",
|
| 10 |
+
"display_name": "Python 3"
|
| 11 |
+
},
|
| 12 |
+
"language_info": {
|
| 13 |
+
"name": "python"
|
| 14 |
+
}
|
| 15 |
+
},
|
| 16 |
+
"cells": [
|
| 17 |
+
{
|
| 18 |
+
"cell_type": "code",
|
| 19 |
+
"execution_count": null,
|
| 20 |
+
"metadata": {
|
| 21 |
+
"id": "xlvsNRhUhhVd"
|
| 22 |
+
},
|
| 23 |
+
"outputs": [],
|
| 24 |
+
"source": []
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"cell_type": "code",
|
| 28 |
+
"source": [
|
| 29 |
+
"import pandas as pd\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"# Load the Excel file containing Yambeta Bible sentences\n",
|
| 32 |
+
"file_path = '/content/Bible_EN_EWO.xlsx'\n",
|
| 33 |
+
"df = pd.read_excel(file_path)\n",
|
| 34 |
+
"\n",
|
| 35 |
+
"# Assuming the sentences are in a column named 'Bible text (YAT)'\n",
|
| 36 |
+
"sentences_column = 'Bible text (EWO)'\n",
|
| 37 |
+
"\n",
|
| 38 |
+
"# Extract the sentences and store them in a list\n",
|
| 39 |
+
"ewondo_sentences = df[sentences_column].dropna().tolist()\n",
|
| 40 |
+
"\n",
|
| 41 |
+
"print(f\"Loaded {len(ewondo_sentences)} ewondo sentences.\")"
|
| 42 |
+
],
|
| 43 |
+
"metadata": {
|
| 44 |
+
"colab": {
|
| 45 |
+
"base_uri": "https://localhost:8080/"
|
| 46 |
+
},
|
| 47 |
+
"id": "if2Y53VsihzS",
|
| 48 |
+
"outputId": "c6d5f8b4-7048-4287-ddb0-4e2c28a391ea"
|
| 49 |
+
},
|
| 50 |
+
"execution_count": 1,
|
| 51 |
+
"outputs": [
|
| 52 |
+
{
|
| 53 |
+
"output_type": "stream",
|
| 54 |
+
"name": "stdout",
|
| 55 |
+
"text": [
|
| 56 |
+
"Loaded 7944 ewondo sentences.\n"
|
| 57 |
+
]
|
| 58 |
+
}
|
| 59 |
+
]
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"cell_type": "code",
|
| 63 |
+
"source": [
|
| 64 |
+
"# Define your dictionaries of named entities\n",
|
| 65 |
+
"\n",
|
| 66 |
+
"person_names = [\n",
|
| 67 |
+
" 'Yesus', 'Yesus Kristus', # Synonyms for Jesus\n",
|
| 68 |
+
" 'David', 'Isaak', # Isaac\n",
|
| 69 |
+
" 'Yakòb', # Jacob\n",
|
| 70 |
+
" 'Yuda', # Juda\n",
|
| 71 |
+
" 'Yosef', # Joseph\n",
|
| 72 |
+
" 'Elias', # Elijah\n",
|
| 73 |
+
" 'Yohannes', # James\n",
|
| 74 |
+
" 'Simun', # Simon\n",
|
| 75 |
+
" 'Andreas', # Andrew\n",
|
| 76 |
+
" 'Bartolomeus', # Bartholomew\n",
|
| 77 |
+
" 'Tomas', # Thomas\n",
|
| 78 |
+
" 'Filip', # Philip\n",
|
| 79 |
+
" 'Petrus', # Peter\n",
|
| 80 |
+
" 'Paulus', # Paul\n",
|
| 81 |
+
" 'Timoteus', # Timothy\n",
|
| 82 |
+
" 'Mikael', # Michael\n",
|
| 83 |
+
" 'Moses', # Moses\n",
|
| 84 |
+
" 'Maria Magdalena', # Mary Magdalene\n",
|
| 85 |
+
" 'Maria', # Mary (mother of Jesus)\n",
|
| 86 |
+
" 'Marta' # Martha\n",
|
| 87 |
+
"]\n",
|
| 88 |
+
"\n",
|
| 89 |
+
"locations = [\n",
|
| 90 |
+
" 'Yerusalem', # Jerusalem\n",
|
| 91 |
+
" 'Betleem', # Bethlehem\n",
|
| 92 |
+
" 'Galilea', # Galilee\n",
|
| 93 |
+
" 'Yordan', # Jordan\n",
|
| 94 |
+
" 'Golgota', # Golgotha\n",
|
| 95 |
+
" 'Kafarnao', # Capernaum\n",
|
| 96 |
+
" 'Nazaret', # Nazareth\n",
|
| 97 |
+
" 'Yeriko', # Jericho\n",
|
| 98 |
+
" 'Sodoma ai Gomorra' # Sodom and Gomorrah\n",
|
| 99 |
+
"]\n",
|
| 100 |
+
"\n",
|
| 101 |
+
"organizations = [\n",
|
| 102 |
+
" 'Bëfarisea', # Pharisees\n",
|
| 103 |
+
" 'Bësadukea', # Sadducees\n",
|
| 104 |
+
" 'Bëyègè', # Disciples\n",
|
| 105 |
+
" 'Tempel', # The Temple\n",
|
| 106 |
+
" 'Tesalonika', # Thessalonica\n",
|
| 107 |
+
" 'Italien', # Italy\n",
|
| 108 |
+
" 'Masedonia', # Macedonia\n",
|
| 109 |
+
" 'Roma' # Rome\n",
|
| 110 |
+
"]\n",
|
| 111 |
+
"\n",
|
| 112 |
+
"# Combine the named entities into a dictionary with their respective types\n",
|
| 113 |
+
"entity_dict = {name: 'PER' for name in person_names}\n",
|
| 114 |
+
"entity_dict.update({loc: 'LOC' for loc in locations})\n",
|
| 115 |
+
"entity_dict.update({org: 'ORG' for org in organizations})\n",
|
| 116 |
+
"\n",
|
| 117 |
+
"# Print the result\n",
|
| 118 |
+
"print(f\"Created a dictionary of {len(entity_dict)} named entities.\")\n"
|
| 119 |
+
],
|
| 120 |
+
"metadata": {
|
| 121 |
+
"colab": {
|
| 122 |
+
"base_uri": "https://localhost:8080/"
|
| 123 |
+
},
|
| 124 |
+
"id": "DwDRcsY8jDMW",
|
| 125 |
+
"outputId": "70c52b5b-12d9-4a70-f925-ebf5e92cac1c"
|
| 126 |
+
},
|
| 127 |
+
"execution_count": 2,
|
| 128 |
+
"outputs": [
|
| 129 |
+
{
|
| 130 |
+
"output_type": "stream",
|
| 131 |
+
"name": "stdout",
|
| 132 |
+
"text": [
|
| 133 |
+
"Created a dictionary of 39 named entities.\n"
|
| 134 |
+
]
|
| 135 |
+
}
|
| 136 |
+
]
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"cell_type": "code",
|
| 140 |
+
"source": [
|
| 141 |
+
"import pandas as pd\n",
|
| 142 |
+
"from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers, processors\n",
|
| 143 |
+
"from transformers import BertTokenizerFast\n",
|
| 144 |
+
"\n",
|
| 145 |
+
"# Load the Ewondo dataset\n",
|
| 146 |
+
"def load_dataset(filepath):\n",
|
| 147 |
+
" df = pd.read_excel(filepath)\n",
|
| 148 |
+
" ewondo_sentences = df[\"Bible text (EWO)\"].tolist()\n",
|
| 149 |
+
" return ewondo_sentences\n",
|
| 150 |
+
"\n",
|
| 151 |
+
"# Define Ewondo consonants, vowels, tones, and other special characters\n",
|
| 152 |
+
"ewondo_consonants = [\n",
|
| 153 |
+
" 'p', 'b', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'g', 'q', 'ɢ',\n",
|
| 154 |
+
" 'ʔ', 'm', 'ɱ', 'n', 'ɳ', 'ɲ', 'ŋ', 'ɴ', 'ʙ', 'r', 'ʀ',\n",
|
| 155 |
+
" 'ɾ', 'ɽ', 'ɸ', 'β', 'f', 'v', 'θ', 'ð', 's', 'z', 'ʃ',\n",
|
| 156 |
+
" 'ʒ', 'ʂ', 'ʐ', 'ç', 'ʝ', 'x', 'ɣ', 'χ', 'ʁ', 'ħ', 'ʕ',\n",
|
| 157 |
+
" 'h', 'ɦ', 'ɬ', 'ɮ', 'ʋ', 'ɹ', 'ɻ', 'j', 'ɰ', 'l', 'ɭ',\n",
|
| 158 |
+
" 'ʎ', 'ʟ', 'ƥ', 'ɓ', 'ƭ', 'ɗ', 'ƈ', 'ʄ', 'ƙ', 'ɠ', 'ʠ',\n",
|
| 159 |
+
" 'ʛ'\n",
|
| 160 |
+
"]\n",
|
| 161 |
+
"\n",
|
| 162 |
+
"ewondo_vowels = [\n",
|
| 163 |
+
" 'i', 'y', 'ɨ', 'ʉ', 'ɯ', 'u', 'ɪ', 'ʏ', 'ʊ', 'e', 'ø',\n",
|
| 164 |
+
" 'ɘ', 'ɵ', 'ɤ', 'ə', 'ɛ', 'œ', 'ɜ', 'ɞ', 'ʌ', 'ɔ',\n",
|
| 165 |
+
" 'æ', 'ɐ', 'a', 'ɶ', 'ɑ', 'ɒ'\n",
|
| 166 |
+
"]\n",
|
| 167 |
+
"\n",
|
| 168 |
+
"ewondo_tones = ['́', '̀', '̂', '̃', '̄']\n",
|
| 169 |
+
"other_special_characters = [\"...\", \"-\", \"—\", \"–\", \"_\", \"(\", \")\", \"[\", \"]\", \"<\", \">\",\" \"]\n",
|
| 170 |
+
"\n",
|
| 171 |
+
"# Combine special tokens\n",
|
| 172 |
+
"special_tokens = [\"[UNK]\", \"[PAD]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"] + \\\n",
|
| 173 |
+
" ewondo_consonants + ewondo_vowels + ewondo_tones + other_special_characters\n",
|
| 174 |
+
"\n",
|
| 175 |
+
"# Fine-tune Bert-Tokenizer for Ewondo language\n",
|
| 176 |
+
"def train_bert_tokenizer(filepath):\n",
|
| 177 |
+
" # Load sentences from the dataset\n",
|
| 178 |
+
" ewondo_sentences = load_dataset(filepath)\n",
|
| 179 |
+
"\n",
|
| 180 |
+
" tokenizer = Tokenizer(models.WordPiece(unk_token=\"[UNK]\"))\n",
|
| 181 |
+
"\n",
|
| 182 |
+
" # 1. Normalization\n",
|
| 183 |
+
" tokenizer.normalizer = normalizers.Sequence([\n",
|
| 184 |
+
" normalizers.NFD(), # Decomposes characters\n",
|
| 185 |
+
" normalizers.Lowercase() # Lowercases the text\n",
|
| 186 |
+
" ])\n",
|
| 187 |
+
"\n",
|
| 188 |
+
" # 2. Pre-Tokenization\n",
|
| 189 |
+
" tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()\n",
|
| 190 |
+
"\n",
|
| 191 |
+
" # 3. Model Training\n",
|
| 192 |
+
" trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)\n",
|
| 193 |
+
" tokenizer.train_from_iterator(ewondo_sentences, trainer=trainer)\n",
|
| 194 |
+
"\n",
|
| 195 |
+
" # 4. Post-Processing\n",
|
| 196 |
+
" cls_token_id = tokenizer.token_to_id(\"[CLS]\")\n",
|
| 197 |
+
" sep_token_id = tokenizer.token_to_id(\"[SEP]\")\n",
|
| 198 |
+
"\n",
|
| 199 |
+
" tokenizer.post_processor = processors.TemplateProcessing(\n",
|
| 200 |
+
" single=f\"[CLS]:0 $A:0 [SEP]:0\",\n",
|
| 201 |
+
" pair=f\"[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1\",\n",
|
| 202 |
+
" special_tokens=[\n",
|
| 203 |
+
" (\"[CLS]\", cls_token_id),\n",
|
| 204 |
+
" (\"[SEP]\", sep_token_id),\n",
|
| 205 |
+
" ],\n",
|
| 206 |
+
" )\n",
|
| 207 |
+
"\n",
|
| 208 |
+
" # Wrap the tokenizer inside Transformers for easy use\n",
|
| 209 |
+
" bert_tokenizer = BertTokenizerFast(tokenizer_object=tokenizer)\n",
|
| 210 |
+
" return bert_tokenizer\n",
|
| 211 |
+
"\n",
|
| 212 |
+
"# Train the Ewondo tokenizer\n",
|
| 213 |
+
"ewondo_bert_tokenizer = train_bert_tokenizer('/content/Bible_EN_EWO.xlsx')\n",
|
| 214 |
+
"\n",
|
| 215 |
+
"# Save the tokenizer in /content\n",
|
| 216 |
+
"save_directory = '/content/ewondo_bert_tokenizer'\n",
|
| 217 |
+
"ewondo_bert_tokenizer.save_pretrained(save_directory)\n",
|
| 218 |
+
"\n",
|
| 219 |
+
"# Example: Test tokenization on sample sentences\n",
|
| 220 |
+
"sample_sentences = [\n",
|
| 221 |
+
" \"Kalara mvòṅ bod Yesus Kristus, man David, man Abraham.\",\n",
|
| 222 |
+
" \"Abraham abye Isaak, Isaak ny'abye Yakòb, Yakòb abye Yuda ai babënyaṅ.\",\n",
|
| 223 |
+
" \"Yuda abye Farès ban Zara, ai Tamar, Farès abye Esrom, Esrom abye Aram.\",\n",
|
| 224 |
+
" \"Aram abye Aminadab, Aminadab abye Naason, Naason abye Salmon.\",\n",
|
| 225 |
+
" \"Salmon abye Boaz ai Rahab, Boaz abye Yobèd ai Ruth, Yobèd abye Yesse.\",\n",
|
| 226 |
+
" \"Yesse nyè abye nkukuma David. David abye Salomon ai mininga Uria.\",\n",
|
| 227 |
+
" \"Salomon abye Roboam, Roboam abye Abia, Abia abye Asaf.\",\n",
|
| 228 |
+
" \"Asaf abye Yosafat, Yosafat abye Yoram, Yoram abye Ozia.\",\n",
|
| 229 |
+
" \"Ozia abye Yoatam, Yoatam abye Akas, Akas abye Ezekias.\",\n",
|
| 230 |
+
" \"Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\"\n",
|
| 231 |
+
"]\n",
|
| 232 |
+
"\n",
|
| 233 |
+
"# Test tokenizer on sample sentences\n",
|
| 234 |
+
"for sentence in sample_sentences:\n",
|
| 235 |
+
" tokens = ewondo_bert_tokenizer.tokenize(sentence)\n",
|
| 236 |
+
" print(f\"Original Sentence: {sentence}\")\n",
|
| 237 |
+
" print(f\"Tokens: {tokens}\\n\")\n",
|
| 238 |
+
"\n",
|
| 239 |
+
"# Evaluate the Tokenizer\n",
|
| 240 |
+
"vocab_size = len(ewondo_bert_tokenizer.get_vocab())\n",
|
| 241 |
+
"print(f\"Vocabulary Size: {vocab_size}\")\n",
|
| 242 |
+
"\n",
|
| 243 |
+
"# Measure tokenization efficiency\n",
|
| 244 |
+
"def calculate_tokenization_efficiency(tokenizer, sentences):\n",
|
| 245 |
+
" total_tokens = 0\n",
|
| 246 |
+
" total_sentences = len(sentences)\n",
|
| 247 |
+
"\n",
|
| 248 |
+
" for sentence in sentences:\n",
|
| 249 |
+
" encoding = tokenizer(sentence)\n",
|
| 250 |
+
" total_tokens += len(encoding['input_ids']) # Count the number of tokens for each sentence\n",
|
| 251 |
+
"\n",
|
| 252 |
+
" avg_tokens_per_sentence = total_tokens / total_sentences\n",
|
| 253 |
+
" print(f\"Average tokens per sentence: {avg_tokens_per_sentence:.2f}\")\n",
|
| 254 |
+
"\n",
|
| 255 |
+
"# Test tokenization efficiency on sample sentences\n",
|
| 256 |
+
"calculate_tokenization_efficiency(ewondo_bert_tokenizer, sample_sentences)\n",
|
| 257 |
+
"\n",
|
| 258 |
+
"# Calculate the Out-of-Vocabulary (OOV) rate\n",
|
| 259 |
+
"def calculate_oov_rate(tokenizer, sentences):\n",
|
| 260 |
+
" oov_count = 0\n",
|
| 261 |
+
" total_tokens = 0\n",
|
| 262 |
+
"\n",
|
| 263 |
+
" for sentence in sentences:\n",
|
| 264 |
+
" encoding = tokenizer(sentence)\n",
|
| 265 |
+
" total_tokens += len(encoding['input_ids'])\n",
|
| 266 |
+
" oov_count += encoding['input_ids'].count(tokenizer.unk_token_id)\n",
|
| 267 |
+
"\n",
|
| 268 |
+
" oov_rate = (oov_count / total_tokens) * 100\n",
|
| 269 |
+
" print(f\"OOV Rate: {oov_rate:.2f}%\")\n",
|
| 270 |
+
"\n",
|
| 271 |
+
"# Evaluate the OOV rate\n",
|
| 272 |
+
"calculate_oov_rate(ewondo_bert_tokenizer, sample_sentences)\n",
|
| 273 |
+
"\n",
|
| 274 |
+
"# Test decoding accuracy\n",
|
| 275 |
+
"sentence = \"Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\"\n",
|
| 276 |
+
"encoded = ewondo_bert_tokenizer(sentence)['input_ids']\n",
|
| 277 |
+
"decoded_sentence = ewondo_bert_tokenizer.decode(encoded)\n",
|
| 278 |
+
"\n",
|
| 279 |
+
"print(f\"Original Sentence: {sentence}\")\n",
|
| 280 |
+
"print(f\"Decoded Sentence: {decoded_sentence}\")\n"
|
| 281 |
+
],
|
| 282 |
+
"metadata": {
|
| 283 |
+
"colab": {
|
| 284 |
+
"base_uri": "https://localhost:8080/"
|
| 285 |
+
},
|
| 286 |
+
"id": "jN11H_8HjpJg",
|
| 287 |
+
"outputId": "06437ab2-ac68-4abf-8f51-7eac269c9167"
|
| 288 |
+
},
|
| 289 |
+
"execution_count": 3,
|
| 290 |
+
"outputs": [
|
| 291 |
+
{
|
| 292 |
+
"output_type": "stream",
|
| 293 |
+
"name": "stderr",
|
| 294 |
+
"text": [
|
| 295 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:1601: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
|
| 296 |
+
" warnings.warn(\n"
|
| 297 |
+
]
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"output_type": "stream",
|
| 301 |
+
"name": "stdout",
|
| 302 |
+
"text": [
|
| 303 |
+
"Original Sentence: Kalara mvòṅ bod Yesus Kristus, man David, man Abraham.\n",
|
| 304 |
+
"Tokens: ['k', 'a', 'l', 'a', 'r', 'a', ' ', 'm', 'v', 'òṅ', ' ', 'b', 'o', 'd', ' ', 'y', 'e', 's', 'u', 's', ' ', 'k', 'r', 'i', 's', 't', 'u', 's', ',', ' ', 'm', 'a', 'n', ' ', 'd', 'a', 'v', 'i', 'd', ',', ' ', 'm', 'a', 'n', ' ', 'a', 'b', 'r', 'a', 'h', 'a', 'm', '.']\n",
|
| 305 |
+
"\n",
|
| 306 |
+
"Original Sentence: Abraham abye Isaak, Isaak ny'abye Yakòb, Yakòb abye Yuda ai babënyaṅ.\n",
|
| 307 |
+
"Tokens: ['a', 'b', 'r', 'a', 'h', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'i', 's', 'a', 'a', 'k', ',', ' ', 'i', 's', 'a', 'a', 'k', ' ', 'n', 'y', \"'\", 'a', 'b', 'y', 'e', ' ', 'y', 'a', 'k', 'ò', 'b', ',', ' ', 'y', 'a', 'k', 'ò', 'b', ' ', 'a', 'b', 'y', 'e', ' ', 'y', 'u', 'd', 'a', ' ', 'a', 'i', ' ', 'b', 'a', 'b', 'ë', 'n', 'y', 'a', 'ṅ', '.']\n",
|
| 308 |
+
"\n",
|
| 309 |
+
"Original Sentence: Yuda abye Farès ban Zara, ai Tamar, Farès abye Esrom, Esrom abye Aram.\n",
|
| 310 |
+
"Tokens: ['y', 'u', 'd', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'f', 'a', 'r', 'e', '##̀', 's', ' ', 'b', 'a', 'n', ' ', 'z', 'a', 'r', 'a', ',', ' ', 'a', 'i', ' ', 't', 'a', 'm', 'a', 'r', ',', ' ', 'f', 'a', 'r', 'e', '##̀', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'e', 's', 'r', 'o', 'm', ',', ' ', 'e', 's', 'r', 'o', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'r', 'a', 'm', '.']\n",
|
| 311 |
+
"\n",
|
| 312 |
+
"Original Sentence: Aram abye Aminadab, Aminadab abye Naason, Naason abye Salmon.\n",
|
| 313 |
+
"Tokens: ['a', 'r', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'm', 'i', 'n', 'a', 'd', 'a', 'b', ',', ' ', 'a', 'm', 'i', 'n', 'a', 'd', 'a', 'b', ' ', 'a', 'b', 'y', 'e', ' ', 'n', 'a', 'a', 's', 'o', 'n', ',', ' ', 'n', 'a', 'a', 's', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 's', 'a', 'l', 'm', 'o', 'n', '.']\n",
|
| 314 |
+
"\n",
|
| 315 |
+
"Original Sentence: Salmon abye Boaz ai Rahab, Boaz abye Yobèd ai Ruth, Yobèd abye Yesse.\n",
|
| 316 |
+
"Tokens: ['s', 'a', 'l', 'm', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 'bo', 'a', 'z', ' ', 'a', 'i', ' ', 'r', 'a', 'h', 'a', 'b', ',', ' ', 'bo', 'a', 'z', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'b', 'e', '##̀', 'd', ' ', 'a', 'i', ' ', 'r', 'u', 't', 'h', ',', ' ', 'yo', 'b', 'e', '##̀', 'd', ' ', 'a', 'b', 'y', 'e', ' ', 'y', 'e', 's', 's', 'e', '.']\n",
|
| 317 |
+
"\n",
|
| 318 |
+
"Original Sentence: Yesse nyè abye nkukuma David. David abye Salomon ai mininga Uria.\n",
|
| 319 |
+
"Tokens: ['y', 'e', 's', 's', 'e', ' ', 'n', 'y', 'e', '##̀', ' ', 'a', 'b', 'y', 'e', ' ', 'n', 'k', 'u', 'k', 'u', 'm', 'a', ' ', 'd', 'a', 'v', 'i', 'd', '.', ' ', 'd', 'a', 'v', 'i', 'd', ' ', 'a', 'b', 'y', 'e', ' ', 's', 'a', 'l', 'o', 'm', 'o', 'n', ' ', 'a', 'i', ' ', 'm', 'i', 'n', 'i', 'n', 'g', 'a', ' ', 'u', 'r', 'i', 'a', '.']\n",
|
| 320 |
+
"\n",
|
| 321 |
+
"Original Sentence: Salomon abye Roboam, Roboam abye Abia, Abia abye Asaf.\n",
|
| 322 |
+
"Tokens: ['s', 'a', 'l', 'o', 'm', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 'r', '##o', 'b', 'o', 'a', 'm', ',', ' ', 'r', '##o', 'b', 'o', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'b', 'i', 'a', ',', ' ', 'a', 'b', 'i', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 's', 'a', 'f', '.']\n",
|
| 323 |
+
"\n",
|
| 324 |
+
"Original Sentence: Asaf abye Yosafat, Yosafat abye Yoram, Yoram abye Ozia.\n",
|
| 325 |
+
"Tokens: ['a', 's', 'a', 'f', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 's', 'a', 'f', 'a', 't', ',', ' ', 'yo', 's', 'a', 'f', 'a', 't', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'r', 'a', 'm', ',', ' ', 'yo', 'r', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'o', 'z', 'i', 'a', '.']\n",
|
| 326 |
+
"\n",
|
| 327 |
+
"Original Sentence: Ozia abye Yoatam, Yoatam abye Akas, Akas abye Ezekias.\n",
|
| 328 |
+
"Tokens: ['o', 'z', 'i', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'a', 't', 'a', 'm', ',', ' ', 'yo', 'a', 't', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'k', 'a', 's', ',', ' ', 'a', 'k', 'a', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'e', 'z', 'e', 'k', 'i', 'a', 's', '.']\n",
|
| 329 |
+
"\n",
|
| 330 |
+
"Original Sentence: Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\n",
|
| 331 |
+
"Tokens: ['e', 'z', 'e', 'k', 'i', 'a', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'm', 'a', 'n', 'a', 's', 's', 'e', ',', ' ', 'm', 'a', 'n', 'a', 's', 's', 'e', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'm', 'o', 's', ',', ' ', 'a', 'm', 'o', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 's', 'i', 'a', '.']\n",
|
| 332 |
+
"\n",
|
| 333 |
+
"Vocabulary Size: 10230\n",
|
| 334 |
+
"Average tokens per sentence: 62.10\n",
|
| 335 |
+
"OOV Rate: 0.00%\n",
|
| 336 |
+
"Original Sentence: Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\n",
|
| 337 |
+
"Decoded Sentence: [CLS] e z e k i a s a b y e m a n a s s e, m a n a s s e a b y e a m o s, a m o s a b y e yo s i a. [SEP]\n"
|
| 338 |
+
]
|
| 339 |
+
}
|
| 340 |
+
]
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"cell_type": "code",
|
| 344 |
+
"source": [
|
| 345 |
+
"import pandas as pd\n",
|
| 346 |
+
"from transformers import AutoTokenizer\n",
|
| 347 |
+
"\n",
|
| 348 |
+
"# Load the locally saved Ewondo tokenizer\n",
|
| 349 |
+
"ewondo_bert_tokenizer = AutoTokenizer.from_pretrained(\"/content/ewondo_bert_tokenizer\")\n",
|
| 350 |
+
"\n",
|
| 351 |
+
"# Function to annotate sentences with BIO tags\n",
|
| 352 |
+
"def annotate_sentence(sentence, entity_dict):\n",
|
| 353 |
+
" tokens = ewondo_bert_tokenizer.tokenize(sentence)\n",
|
| 354 |
+
" bio_tags = ['O'] * len(tokens)\n",
|
| 355 |
+
"\n",
|
| 356 |
+
" for entity, entity_type in entity_dict.items():\n",
|
| 357 |
+
" entity_tokens = ewondo_bert_tokenizer.tokenize(entity)\n",
|
| 358 |
+
" entity_length = len(entity_tokens)\n",
|
| 359 |
+
"\n",
|
| 360 |
+
" # Find entity in sentence and assign BIO tags\n",
|
| 361 |
+
" for i in range(len(tokens) - entity_length + 1):\n",
|
| 362 |
+
" if tokens[i:i+entity_length] == entity_tokens:\n",
|
| 363 |
+
" bio_tags[i] = f'B-{entity_type}'\n",
|
| 364 |
+
" for j in range(1, entity_length):\n",
|
| 365 |
+
" bio_tags[i+j] = f'I-{entity_type}'\n",
|
| 366 |
+
"\n",
|
| 367 |
+
" return tokens, bio_tags\n",
|
| 368 |
+
"\n",
|
| 369 |
+
"# Load the Bible sentences from the Excel file\n",
|
| 370 |
+
"file_path = '/content/Bible_EN_EWO.xlsx'\n",
|
| 371 |
+
"df = pd.read_excel(file_path)\n",
|
| 372 |
+
"\n",
|
| 373 |
+
"# Assuming the Excel file has columns for English and Ewondo sentences\n",
|
| 374 |
+
"# Replace 'EN_Sentences' and 'EWO_Sentences' with actual column names from the file\n",
|
| 375 |
+
"ewondo_sentences = df['Bible text (EWO)'].tolist()\n",
|
| 376 |
+
"\n",
|
| 377 |
+
"# Apply annotation to the English sentences\n",
|
| 378 |
+
"annotated_corpus = []\n",
|
| 379 |
+
"for sentence in ewondo_sentences:\n",
|
| 380 |
+
" tokens, bio_tags = annotate_sentence(sentence, entity_dict)\n",
|
| 381 |
+
" annotated_corpus.append({\"tokens\": tokens, \"ner_tags\": bio_tags})\n",
|
| 382 |
+
"\n",
|
| 383 |
+
"\n",
|
| 384 |
+
"annotated_df = pd.DataFrame(annotated_corpus)\n",
|
| 385 |
+
"\n",
|
| 386 |
+
"# Save the annotated dataset as an Excel file\n",
|
| 387 |
+
"excel_file_path = '/content/annotated_bible_dataset.xlsx'\n",
|
| 388 |
+
"annotated_df.to_excel(excel_file_path, index=False)\n",
|
| 389 |
+
"\n",
|
| 390 |
+
"\n",
|
| 391 |
+
"print(f\"Annotated {len(annotated_corpus)} sentences.\")\n"
|
| 392 |
+
],
|
| 393 |
+
"metadata": {
|
| 394 |
+
"colab": {
|
| 395 |
+
"base_uri": "https://localhost:8080/"
|
| 396 |
+
},
|
| 397 |
+
"id": "Lj35cEc2lukW",
|
| 398 |
+
"outputId": "9a296219-bab7-40fc-89ae-d98e15f73544"
|
| 399 |
+
},
|
| 400 |
+
"execution_count": 5,
|
| 401 |
+
"outputs": [
|
| 402 |
+
{
|
| 403 |
+
"output_type": "stream",
|
| 404 |
+
"name": "stdout",
|
| 405 |
+
"text": [
|
| 406 |
+
"Annotated 7944 sentences.\n"
|
| 407 |
+
]
|
| 408 |
+
}
|
| 409 |
+
]
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"cell_type": "code",
|
| 413 |
+
"source": [
|
| 414 |
+
"pip install datasets"
|
| 415 |
+
],
|
| 416 |
+
"metadata": {
|
| 417 |
+
"colab": {
|
| 418 |
+
"base_uri": "https://localhost:8080/"
|
| 419 |
+
},
|
| 420 |
+
"id": "zXGKM__KuCn1",
|
| 421 |
+
"outputId": "4dd33b8a-0321-4508-eb4b-7d1f74364f4a"
|
| 422 |
+
},
|
| 423 |
+
"execution_count": 7,
|
| 424 |
+
"outputs": [
|
| 425 |
+
{
|
| 426 |
+
"output_type": "stream",
|
| 427 |
+
"name": "stdout",
|
| 428 |
+
"text": [
|
| 429 |
+
"Collecting datasets\n",
|
| 430 |
+
" Downloading datasets-3.0.1-py3-none-any.whl.metadata (20 kB)\n",
|
| 431 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from datasets) (3.16.1)\n",
|
| 432 |
+
"Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.26.4)\n",
|
| 433 |
+
"Requirement already satisfied: pyarrow>=15.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (16.1.0)\n",
|
| 434 |
+
"Collecting dill<0.3.9,>=0.3.0 (from datasets)\n",
|
| 435 |
+
" Downloading dill-0.3.8-py3-none-any.whl.metadata (10 kB)\n",
|
| 436 |
+
"Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (2.2.2)\n",
|
| 437 |
+
"Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.32.3)\n",
|
| 438 |
+
"Requirement already satisfied: tqdm>=4.66.3 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.66.5)\n",
|
| 439 |
+
"Collecting xxhash (from datasets)\n",
|
| 440 |
+
" Downloading xxhash-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (12 kB)\n",
|
| 441 |
+
"Collecting multiprocess (from datasets)\n",
|
| 442 |
+
" Downloading multiprocess-0.70.17-py310-none-any.whl.metadata (7.2 kB)\n",
|
| 443 |
+
"Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets) (2024.6.1)\n",
|
| 444 |
+
"Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets) (3.10.9)\n",
|
| 445 |
+
"Requirement already satisfied: huggingface-hub>=0.22.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.24.7)\n",
|
| 446 |
+
"Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from datasets) (24.1)\n",
|
| 447 |
+
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (6.0.2)\n",
|
| 448 |
+
"Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (2.4.3)\n",
|
| 449 |
+
"Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.3.1)\n",
|
| 450 |
+
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (24.2.0)\n",
|
| 451 |
+
"Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.4.1)\n",
|
| 452 |
+
"Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (6.1.0)\n",
|
| 453 |
+
"Requirement already satisfied: yarl<2.0,>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.13.1)\n",
|
| 454 |
+
"Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (4.0.3)\n",
|
| 455 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.22.0->datasets) (4.12.2)\n",
|
| 456 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.32.2->datasets) (3.3.2)\n",
|
| 457 |
+
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.32.2->datasets) (3.10)\n",
|
| 458 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.32.2->datasets) (2.2.3)\n",
|
| 459 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.32.2->datasets) (2024.8.30)\n",
|
| 460 |
+
"INFO: pip is looking at multiple versions of multiprocess to determine which version is compatible with other requirements. This could take a while.\n",
|
| 461 |
+
" Downloading multiprocess-0.70.16-py310-none-any.whl.metadata (7.2 kB)\n",
|
| 462 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2.8.2)\n",
|
| 463 |
+
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2024.2)\n",
|
| 464 |
+
"Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2024.2)\n",
|
| 465 |
+
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n",
|
| 466 |
+
"Downloading datasets-3.0.1-py3-none-any.whl (471 kB)\n",
|
| 467 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m471.6/471.6 kB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 468 |
+
"\u001b[?25hDownloading dill-0.3.8-py3-none-any.whl (116 kB)\n",
|
| 469 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m116.3/116.3 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 470 |
+
"\u001b[?25hDownloading multiprocess-0.70.16-py310-none-any.whl (134 kB)\n",
|
| 471 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 472 |
+
"\u001b[?25hDownloading xxhash-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (194 kB)\n",
|
| 473 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m15.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 474 |
+
"\u001b[?25hInstalling collected packages: xxhash, dill, multiprocess, datasets\n",
|
| 475 |
+
"Successfully installed datasets-3.0.1 dill-0.3.8 multiprocess-0.70.16 xxhash-3.5.0\n"
|
| 476 |
+
]
|
| 477 |
+
}
|
| 478 |
+
]
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"cell_type": "code",
|
| 482 |
+
"source": [
|
| 483 |
+
"import pandas as pd\n",
|
| 484 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 485 |
+
"from datasets import Dataset, DatasetDict\n",
|
| 486 |
+
"\n",
|
| 487 |
+
"# Split into train (80%), validation (10%), and test (10%)\n",
|
| 488 |
+
"train_data, temp_data = train_test_split(annotated_corpus, test_size=0.2, random_state=42)\n",
|
| 489 |
+
"validation_data, test_data = train_test_split(temp_data, test_size=0.5, random_state=42)\n",
|
| 490 |
+
"\n",
|
| 491 |
+
"# Create Hugging Face DatasetDict\n",
|
| 492 |
+
"train_dataset = Dataset.from_dict({\"tokens\": [item['tokens'] for item in train_data], \"ner_tags\": [item['ner_tags'] for item in train_data]})\n",
|
| 493 |
+
"validation_dataset = Dataset.from_dict({\"tokens\": [item['tokens'] for item in validation_data], \"ner_tags\": [item['ner_tags'] for item in validation_data]})\n",
|
| 494 |
+
"test_dataset = Dataset.from_dict({\"tokens\": [item['tokens'] for item in test_data], \"ner_tags\": [item['ner_tags'] for item in test_data]})\n",
|
| 495 |
+
"\n",
|
| 496 |
+
"dataset = DatasetDict({\n",
|
| 497 |
+
" \"train\": train_dataset,\n",
|
| 498 |
+
" \"validation\": validation_dataset,\n",
|
| 499 |
+
" \"test\": test_dataset\n",
|
| 500 |
+
"})\n",
|
| 501 |
+
"\n",
|
| 502 |
+
"print(f\"Created Hugging Face dataset: {dataset}\")\n",
|
| 503 |
+
"\n",
|
| 504 |
+
"# Convert datasets to DataFrames\n",
|
| 505 |
+
"train_df = pd.DataFrame({\"tokens\": train_dataset[\"tokens\"], \"ner_tags\": train_dataset[\"ner_tags\"]})\n",
|
| 506 |
+
"validation_df = pd.DataFrame({\"tokens\": validation_dataset[\"tokens\"], \"ner_tags\": validation_dataset[\"ner_tags\"]})\n",
|
| 507 |
+
"test_df = pd.DataFrame({\"tokens\": test_dataset[\"tokens\"], \"ner_tags\": test_dataset[\"ner_tags\"]})\n",
|
| 508 |
+
"\n",
|
| 509 |
+
"# Save the train, validation, and test sets as Excel files\n",
|
| 510 |
+
"train_file_path = '/content/train_dataset.xlsx'\n",
|
| 511 |
+
"validation_file_path = '/content/validation_dataset.xlsx'\n",
|
| 512 |
+
"test_file_path = '/content/test_dataset.xlsx'\n",
|
| 513 |
+
"\n",
|
| 514 |
+
"train_df.to_excel(train_file_path, index=False)\n",
|
| 515 |
+
"validation_df.to_excel(validation_file_path, index=False)\n",
|
| 516 |
+
"test_df.to_excel(test_file_path, index=False)\n",
|
| 517 |
+
"\n",
|
| 518 |
+
"print(f\"Train dataset saved to '{train_file_path}'\")\n",
|
| 519 |
+
"print(f\"Validation dataset saved to '{validation_file_path}'\")\n",
|
| 520 |
+
"print(f\"Test dataset saved to '{test_file_path}'\")\n"
|
| 521 |
+
],
|
| 522 |
+
"metadata": {
|
| 523 |
+
"colab": {
|
| 524 |
+
"base_uri": "https://localhost:8080/"
|
| 525 |
+
},
|
| 526 |
+
"id": "JKaBAe4Wr8wJ",
|
| 527 |
+
"outputId": "eec7dd96-3740-43a5-8172-1442043e0437"
|
| 528 |
+
},
|
| 529 |
+
"execution_count": 9,
|
| 530 |
+
"outputs": [
|
| 531 |
+
{
|
| 532 |
+
"output_type": "stream",
|
| 533 |
+
"name": "stdout",
|
| 534 |
+
"text": [
|
| 535 |
+
"Created Hugging Face dataset: DatasetDict({\n",
|
| 536 |
+
" train: Dataset({\n",
|
| 537 |
+
" features: ['tokens', 'ner_tags'],\n",
|
| 538 |
+
" num_rows: 6355\n",
|
| 539 |
+
" })\n",
|
| 540 |
+
" validation: Dataset({\n",
|
| 541 |
+
" features: ['tokens', 'ner_tags'],\n",
|
| 542 |
+
" num_rows: 794\n",
|
| 543 |
+
" })\n",
|
| 544 |
+
" test: Dataset({\n",
|
| 545 |
+
" features: ['tokens', 'ner_tags'],\n",
|
| 546 |
+
" num_rows: 795\n",
|
| 547 |
+
" })\n",
|
| 548 |
+
"})\n",
|
| 549 |
+
"Train dataset saved to '/content/train_dataset.xlsx'\n",
|
| 550 |
+
"Validation dataset saved to '/content/validation_dataset.xlsx'\n",
|
| 551 |
+
"Test dataset saved to '/content/test_dataset.xlsx'\n"
|
| 552 |
+
]
|
| 553 |
+
}
|
| 554 |
+
]
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"cell_type": "code",
|
| 558 |
+
"source": [],
|
| 559 |
+
"metadata": {
|
| 560 |
+
"id": "8So89cyFuAc3"
|
| 561 |
+
},
|
| 562 |
+
"execution_count": null,
|
| 563 |
+
"outputs": []
|
| 564 |
+
}
|
| 565 |
+
]
|
| 566 |
+
}
|
Ewondo_Tokenizer_Test.ipynb
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"nbformat": 4,
|
| 3 |
+
"nbformat_minor": 0,
|
| 4 |
+
"metadata": {
|
| 5 |
+
"colab": {
|
| 6 |
+
"provenance": []
|
| 7 |
+
},
|
| 8 |
+
"kernelspec": {
|
| 9 |
+
"name": "python3",
|
| 10 |
+
"display_name": "Python 3"
|
| 11 |
+
},
|
| 12 |
+
"language_info": {
|
| 13 |
+
"name": "python"
|
| 14 |
+
}
|
| 15 |
+
},
|
| 16 |
+
"cells": [
|
| 17 |
+
{
|
| 18 |
+
"cell_type": "code",
|
| 19 |
+
"execution_count": 17,
|
| 20 |
+
"metadata": {
|
| 21 |
+
"colab": {
|
| 22 |
+
"base_uri": "https://localhost:8080/"
|
| 23 |
+
},
|
| 24 |
+
"id": "fPrA1gUdvvLK",
|
| 25 |
+
"outputId": "3ba5f80a-5cf5-48a0-e826-5ac4172fa3d5"
|
| 26 |
+
},
|
| 27 |
+
"outputs": [
|
| 28 |
+
{
|
| 29 |
+
"output_type": "execute_result",
|
| 30 |
+
"data": {
|
| 31 |
+
"text/plain": [
|
| 32 |
+
"'import pandas as pd\\n\\ndef load_dataset(file_path):\\n # Load the Ewondo sentences from the Excel file\\n df = pd.read_excel(file_path)\\n ewondo_sentences = df[\\'Ewondo\\'].tolist()\\n \\n # Phonetic data and additional info\\n phonetic_data = {\\n \"alphabet\": [\\n \\'Alpha\\', \\'a\\', \\'b\\', \\'d\\', \\'e\\', \\'ə\\', \\'f\\', \\'g\\', \\'i\\', \\'k\\', \\'l\\', \\n \\'m\\', \\'n\\', \\'ŋ\\', \\'o\\', \\'ɔ\\', \\'s\\', \\'t\\', \\'u\\', \\'v\\', \\'w\\', \\'y\\', \\'z\\'\\n ],\\n \"consonants\": [\\n \\'p\\', \\'b\\', \\'t\\', \\'d\\', \\'ʈ\\', \\'ɖ\\', \\'c\\', \\'ɟ\\', \\'k\\', \\'g\\', \\'q\\', \\'ɢ\\', \\n \\'ʔ\\', \\'m\\', \\'ɱ\\', \\'n\\', \\'ɳ\\', \\'ɲ\\', \\'ŋ\\', \\'ɴ\\', \\'ʙ\\', \\'r\\', \\'ʀ\\', \\n \\'ɾ\\', \\'ɽ\\', \\'ɸ\\', \\'β\\', \\'f\\', \\'v\\', \\'θ\\', \\'ð\\', \\'s\\', \\'z\\', \\'ʃ\\', \\n \\'ʒ\\', \\'ʂ\\', \\'ʐ\\', \\'ç\\', \\'ʝ\\', \\'x\\', \\'ɣ\\', \\'χ\\', \\'ʁ\\', \\'ħ\\', \\'ʕ\\', \\n \\'h\\', \\'ɦ\\', \\'ɬ\\', \\'ɮ\\', \\'ʋ\\', \\'ɹ\\', \\'ɻ\\', \\'j\\', \\'ɰ\\', \\'l\\', \\'ɭ\\', \\n \\'ʎ\\', \\'ʟ\\', \\'ƥ\\', \\'ɓ\\', \\'ƭ\\', \\'ɗ\\', \\'ƈ\\', \\'ʄ\\', \\'ƙ\\', \\'ɠ\\', \\'ʠ\\', \\n \\'ʛ\\'\\n ],\\n \"vowels\": [\\n \\'i\\', \\'y\\', \\'ɨ\\', \\'ʉ\\', \\'ɯ\\', \\'u\\', \\'ɪ\\', \\'ʏ\\', \\'ʊ\\', \\'e\\', \\'ø\\', \\n \\'ɘ\\', \\'ɵ\\', \\'ɤ\\', \\'ə\\', \\'ɛ\\', \\'œ\\', \\'ɜ\\', \\'ɞ\\', \\'ʌ\\', \\'ɔ\\', \\n \\'æ\\', \\'ɐ\\', \\'a\\', \\'ɶ\\', \\'ɑ\\', \\'ɒ\\'\\n ],\\n \"numerals\": {\\n \"0\": \"zəzə\",\\n \"1\": \"fɔ́g\",\\n \"2\": \"bɛ̄\",\\n \"3\": \"lɛ́\",\\n \"4\": \"nyii\",\\n \"5\": \"tán\",\\n \"6\": \"saman\",\\n \"7\": \"zəmgbál\",\\n \"8\": \"moom\",\\n \"9\": \"ebûl\",\\n \"10\": \"awôn\",\\n \"11\": \"awôn ai mbɔ́g\",\\n \"12\": \"awôn ai bɛ̄bɛ̄ɛ̄\",\\n \"13\": \"awôn ai bɛ̄lɛ́\",\\n \"14\": \"awôn ai bɛ̄nyii\",\\n \"15\": \"awôn ai bɛ̄tán\",\\n \"16\": \"awôn ai saman\",\\n \"17\": \"awôn ai zəmgbál\",\\n \"18\": \"awôn ai moom\",\\n \"19\": \"awôn ai ebûl\",\\n # Include more numerals here if needed\\n }\\n }\\n \\n return ewondo_sentences, phonetic_data\\n\\n# Example usage\\nfile_path = \"/content/alphabet_and_numbers.xlsx\"\\newondo_sentences, phonetic_data = load_dataset(file_path)\\n\\n# Access the data\\nprint(ewondo_sentences)\\nprint(phonetic_data)'"
|
| 33 |
+
],
|
| 34 |
+
"application/vnd.google.colaboratory.intrinsic+json": {
|
| 35 |
+
"type": "string"
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"metadata": {},
|
| 39 |
+
"execution_count": 17
|
| 40 |
+
}
|
| 41 |
+
],
|
| 42 |
+
"source": [
|
| 43 |
+
"\"\"\"import pandas as pd\n",
|
| 44 |
+
"\n",
|
| 45 |
+
"def load_dataset(file_path):\n",
|
| 46 |
+
" # Load the Ewondo sentences from the Excel file\n",
|
| 47 |
+
" df = pd.read_excel(file_path)\n",
|
| 48 |
+
" ewondo_sentences = df['Ewondo'].tolist()\n",
|
| 49 |
+
"\n",
|
| 50 |
+
" # Phonetic data and additional info\n",
|
| 51 |
+
" phonetic_data = {\n",
|
| 52 |
+
" \"alphabet\": [\n",
|
| 53 |
+
" 'Alpha', 'a', 'b', 'd', 'e', 'ə', 'f', 'g', 'i', 'k', 'l',\n",
|
| 54 |
+
" 'm', 'n', 'ŋ', 'o', 'ɔ', 's', 't', 'u', 'v', 'w', 'y', 'z'\n",
|
| 55 |
+
" ],\n",
|
| 56 |
+
" \"consonants\": [\n",
|
| 57 |
+
" 'p', 'b', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'g', 'q', 'ɢ',\n",
|
| 58 |
+
" 'ʔ', 'm', 'ɱ', 'n', 'ɳ', 'ɲ', 'ŋ', 'ɴ', 'ʙ', 'r', 'ʀ',\n",
|
| 59 |
+
" 'ɾ', 'ɽ', 'ɸ', 'β', 'f', 'v', 'θ', 'ð', 's', 'z', 'ʃ',\n",
|
| 60 |
+
" 'ʒ', 'ʂ', 'ʐ', 'ç', 'ʝ', 'x', 'ɣ', 'χ', 'ʁ', 'ħ', 'ʕ',\n",
|
| 61 |
+
" 'h', 'ɦ', 'ɬ', 'ɮ', 'ʋ', 'ɹ', 'ɻ', 'j', 'ɰ', 'l', 'ɭ',\n",
|
| 62 |
+
" 'ʎ', 'ʟ', 'ƥ', 'ɓ', 'ƭ', 'ɗ', 'ƈ', 'ʄ', 'ƙ', 'ɠ', 'ʠ',\n",
|
| 63 |
+
" 'ʛ'\n",
|
| 64 |
+
" ],\n",
|
| 65 |
+
" \"vowels\": [\n",
|
| 66 |
+
" 'i', 'y', 'ɨ', 'ʉ', 'ɯ', 'u', 'ɪ', 'ʏ', 'ʊ', 'e', 'ø',\n",
|
| 67 |
+
" 'ɘ', 'ɵ', 'ɤ', 'ə', 'ɛ', 'œ', 'ɜ', 'ɞ', 'ʌ', 'ɔ',\n",
|
| 68 |
+
" 'æ', 'ɐ', 'a', 'ɶ', 'ɑ', 'ɒ'\n",
|
| 69 |
+
" ],\n",
|
| 70 |
+
" \"numerals\": {\n",
|
| 71 |
+
" \"0\": \"zəzə\",\n",
|
| 72 |
+
" \"1\": \"fɔ́g\",\n",
|
| 73 |
+
" \"2\": \"bɛ̄\",\n",
|
| 74 |
+
" \"3\": \"lɛ́\",\n",
|
| 75 |
+
" \"4\": \"nyii\",\n",
|
| 76 |
+
" \"5\": \"tán\",\n",
|
| 77 |
+
" \"6\": \"saman\",\n",
|
| 78 |
+
" \"7\": \"zəmgbál\",\n",
|
| 79 |
+
" \"8\": \"moom\",\n",
|
| 80 |
+
" \"9\": \"ebûl\",\n",
|
| 81 |
+
" \"10\": \"awôn\",\n",
|
| 82 |
+
" \"11\": \"awôn ai mbɔ́g\",\n",
|
| 83 |
+
" \"12\": \"awôn ai bɛ̄bɛ̄ɛ̄\",\n",
|
| 84 |
+
" \"13\": \"awôn ai bɛ̄lɛ́\",\n",
|
| 85 |
+
" \"14\": \"awôn ai bɛ̄nyii\",\n",
|
| 86 |
+
" \"15\": \"awôn ai bɛ̄tán\",\n",
|
| 87 |
+
" \"16\": \"awôn ai saman\",\n",
|
| 88 |
+
" \"17\": \"awôn ai zəmgbál\",\n",
|
| 89 |
+
" \"18\": \"awôn ai moom\",\n",
|
| 90 |
+
" \"19\": \"awôn ai ebûl\",\n",
|
| 91 |
+
" # Include more numerals here if needed\n",
|
| 92 |
+
" }\n",
|
| 93 |
+
" }\n",
|
| 94 |
+
"\n",
|
| 95 |
+
" return ewondo_sentences, phonetic_data\n",
|
| 96 |
+
"\n",
|
| 97 |
+
"# Example usage\n",
|
| 98 |
+
"file_path = \"/content/alphabet_and_numbers.xlsx\"\n",
|
| 99 |
+
"ewondo_sentences, phonetic_data = load_dataset(file_path)\n",
|
| 100 |
+
"\n",
|
| 101 |
+
"# Access the data\n",
|
| 102 |
+
"print(ewondo_sentences)\n",
|
| 103 |
+
"print(phonetic_data)\"\"\""
|
| 104 |
+
]
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"cell_type": "code",
|
| 108 |
+
"source": [
|
| 109 |
+
"import pandas as pd\n",
|
| 110 |
+
"from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers, processors\n",
|
| 111 |
+
"from transformers import BertTokenizerFast\n",
|
| 112 |
+
"\n",
|
| 113 |
+
"# Load the Ewondo dataset\n",
|
| 114 |
+
"def load_dataset(filepath):\n",
|
| 115 |
+
" df = pd.read_excel(filepath)\n",
|
| 116 |
+
" ewondo_sentences = df[\"Bible text (EWO)\"].tolist()\n",
|
| 117 |
+
" return ewondo_sentences\n",
|
| 118 |
+
"\n",
|
| 119 |
+
"# Define Ewondo consonants, vowels, tones, and other special characters\n",
|
| 120 |
+
"ewondo_consonants = [\n",
|
| 121 |
+
" 'p', 'b', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'g', 'q', 'ɢ',\n",
|
| 122 |
+
" 'ʔ', 'm', 'ɱ', 'n', 'ɳ', 'ɲ', 'ŋ', 'ɴ', 'ʙ', 'r', 'ʀ',\n",
|
| 123 |
+
" 'ɾ', 'ɽ', 'ɸ', 'β', 'f', 'v', 'θ', 'ð', 's', 'z', 'ʃ',\n",
|
| 124 |
+
" 'ʒ', 'ʂ', 'ʐ', 'ç', 'ʝ', 'x', 'ɣ', 'χ', 'ʁ', 'ħ', 'ʕ',\n",
|
| 125 |
+
" 'h', 'ɦ', 'ɬ', 'ɮ', 'ʋ', 'ɹ', 'ɻ', 'j', 'ɰ', 'l', 'ɭ',\n",
|
| 126 |
+
" 'ʎ', 'ʟ', 'ƥ', 'ɓ', 'ƭ', 'ɗ', 'ƈ', 'ʄ', 'ƙ', 'ɠ', 'ʠ',\n",
|
| 127 |
+
" 'ʛ'\n",
|
| 128 |
+
"]\n",
|
| 129 |
+
"\n",
|
| 130 |
+
"ewondo_vowels = [\n",
|
| 131 |
+
" 'i', 'y', 'ɨ', 'ʉ', 'ɯ', 'u', 'ɪ', 'ʏ', 'ʊ', 'e', 'ø',\n",
|
| 132 |
+
" 'ɘ', 'ɵ', 'ɤ', 'ə', 'ɛ', 'œ', 'ɜ', 'ɞ', 'ʌ', 'ɔ',\n",
|
| 133 |
+
" 'æ', 'ɐ', 'a', 'ɶ', 'ɑ', 'ɒ'\n",
|
| 134 |
+
"]\n",
|
| 135 |
+
"\n",
|
| 136 |
+
"ewondo_tones = ['́', '̀', '̂', '̃', '̄']\n",
|
| 137 |
+
"other_special_characters = [\"...\", \"-\", \"—\", \"–\", \"_\", \"(\", \")\", \"[\", \"]\", \"<\", \">\",\" \"]\n",
|
| 138 |
+
"\n",
|
| 139 |
+
"# Combine special tokens\n",
|
| 140 |
+
"special_tokens = [\"[UNK]\", \"[PAD]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"] + \\\n",
|
| 141 |
+
" ewondo_consonants + ewondo_vowels + ewondo_tones + other_special_characters\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"# Fine-tune Bert-Tokenizer for Ewondo language\n",
|
| 144 |
+
"def train_bert_tokenizer(filepath):\n",
|
| 145 |
+
" # Load sentences from the dataset\n",
|
| 146 |
+
" ewondo_sentences = load_dataset(filepath)\n",
|
| 147 |
+
"\n",
|
| 148 |
+
" tokenizer = Tokenizer(models.WordPiece(unk_token=\"[UNK]\"))\n",
|
| 149 |
+
"\n",
|
| 150 |
+
" # 1. Normalization\n",
|
| 151 |
+
" tokenizer.normalizer = normalizers.Sequence([\n",
|
| 152 |
+
" normalizers.NFD(), # Decomposes characters\n",
|
| 153 |
+
" normalizers.Lowercase() # Lowercases the text\n",
|
| 154 |
+
" ])\n",
|
| 155 |
+
"\n",
|
| 156 |
+
" # 2. Pre-Tokenization\n",
|
| 157 |
+
" tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()\n",
|
| 158 |
+
"\n",
|
| 159 |
+
" # 3. Model Training\n",
|
| 160 |
+
" trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)\n",
|
| 161 |
+
" tokenizer.train_from_iterator(ewondo_sentences, trainer=trainer)\n",
|
| 162 |
+
"\n",
|
| 163 |
+
" # 4. Post-Processing\n",
|
| 164 |
+
" cls_token_id = tokenizer.token_to_id(\"[CLS]\")\n",
|
| 165 |
+
" sep_token_id = tokenizer.token_to_id(\"[SEP]\")\n",
|
| 166 |
+
"\n",
|
| 167 |
+
" tokenizer.post_processor = processors.TemplateProcessing(\n",
|
| 168 |
+
" single=f\"[CLS]:0 $A:0 [SEP]:0\",\n",
|
| 169 |
+
" pair=f\"[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1\",\n",
|
| 170 |
+
" special_tokens=[\n",
|
| 171 |
+
" (\"[CLS]\", cls_token_id),\n",
|
| 172 |
+
" (\"[SEP]\", sep_token_id),\n",
|
| 173 |
+
" ],\n",
|
| 174 |
+
" )\n",
|
| 175 |
+
"\n",
|
| 176 |
+
" # Wrap the tokenizer inside Transformers for easy use\n",
|
| 177 |
+
" bert_tokenizer = BertTokenizerFast(tokenizer_object=tokenizer)\n",
|
| 178 |
+
" return bert_tokenizer\n",
|
| 179 |
+
"\n",
|
| 180 |
+
"# Train the Ewondo tokenizer\n",
|
| 181 |
+
"ewondo_bert_tokenizer = train_bert_tokenizer('/content/Bible_EN_EWO.xlsx')\n",
|
| 182 |
+
"\n",
|
| 183 |
+
"# Example: Test tokenization on sample sentences\n",
|
| 184 |
+
"sample_sentences = [\n",
|
| 185 |
+
" \"Kalara mvòṅ bod Yesus Kristus, man David, man Abraham.\",\n",
|
| 186 |
+
" \"Abraham abye Isaak, Isaak ny'abye Yakòb, Yakòb abye Yuda ai babënyaṅ.\",\n",
|
| 187 |
+
" \"Yuda abye Farès ban Zara, ai Tamar, Farès abye Esrom, Esrom abye Aram.\",\n",
|
| 188 |
+
" \"Aram abye Aminadab, Aminadab abye Naason, Naason abye Salmon.\",\n",
|
| 189 |
+
" \"Salmon abye Boaz ai Rahab, Boaz abye Yobèd ai Ruth, Yobèd abye Yesse.\",\n",
|
| 190 |
+
" \"Yesse nyè abye nkukuma David. David abye Salomon ai mininga Uria.\",\n",
|
| 191 |
+
" \"Salomon abye Roboam, Roboam abye Abia, Abia abye Asaf.\",\n",
|
| 192 |
+
" \"Asaf abye Yosafat, Yosafat abye Yoram, Yoram abye Ozia.\",\n",
|
| 193 |
+
" \"Ozia abye Yoatam, Yoatam abye Akas, Akas abye Ezekias.\",\n",
|
| 194 |
+
" \"Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\"\n",
|
| 195 |
+
"]\n",
|
| 196 |
+
"\n",
|
| 197 |
+
"# Test tokenizer on sample sentences\n",
|
| 198 |
+
"for sentence in sample_sentences:\n",
|
| 199 |
+
" tokens = ewondo_bert_tokenizer.tokenize(sentence)\n",
|
| 200 |
+
" print(f\"Original Sentence: {sentence}\")\n",
|
| 201 |
+
" print(f\"Tokens: {tokens}\\n\")\n",
|
| 202 |
+
"\n",
|
| 203 |
+
"# Evaluate the Tokenizer\n",
|
| 204 |
+
"vocab_size = len(ewondo_bert_tokenizer.get_vocab())\n",
|
| 205 |
+
"print(f\"Vocabulary Size: {vocab_size}\")\n",
|
| 206 |
+
"\n",
|
| 207 |
+
"# Measure tokenization efficiency\n",
|
| 208 |
+
"def calculate_tokenization_efficiency(tokenizer, sentences):\n",
|
| 209 |
+
" total_tokens = 0\n",
|
| 210 |
+
" total_sentences = len(sentences)\n",
|
| 211 |
+
"\n",
|
| 212 |
+
" for sentence in sentences:\n",
|
| 213 |
+
" encoding = tokenizer(sentence)\n",
|
| 214 |
+
" total_tokens += len(encoding['input_ids']) # Count the number of tokens for each sentence\n",
|
| 215 |
+
"\n",
|
| 216 |
+
" avg_tokens_per_sentence = total_tokens / total_sentences\n",
|
| 217 |
+
" print(f\"Average tokens per sentence: {avg_tokens_per_sentence:.2f}\")\n",
|
| 218 |
+
"\n",
|
| 219 |
+
"# Test tokenization efficiency on sample sentences\n",
|
| 220 |
+
"calculate_tokenization_efficiency(ewondo_bert_tokenizer, sample_sentences)\n",
|
| 221 |
+
"\n",
|
| 222 |
+
"# Calculate the Out-of-Vocabulary (OOV) rate\n",
|
| 223 |
+
"def calculate_oov_rate(tokenizer, sentences):\n",
|
| 224 |
+
" oov_count = 0\n",
|
| 225 |
+
" total_tokens = 0\n",
|
| 226 |
+
"\n",
|
| 227 |
+
" for sentence in sentences:\n",
|
| 228 |
+
" encoding = tokenizer(sentence)\n",
|
| 229 |
+
" total_tokens += len(encoding['input_ids'])\n",
|
| 230 |
+
" oov_count += encoding['input_ids'].count(tokenizer.unk_token_id)\n",
|
| 231 |
+
"\n",
|
| 232 |
+
" oov_rate = (oov_count / total_tokens) * 100\n",
|
| 233 |
+
" print(f\"OOV Rate: {oov_rate:.2f}%\")\n",
|
| 234 |
+
"\n",
|
| 235 |
+
"# Evaluate the OOV rate\n",
|
| 236 |
+
"calculate_oov_rate(ewondo_bert_tokenizer, sample_sentences)\n",
|
| 237 |
+
"\n",
|
| 238 |
+
"# Test decoding accuracy\n",
|
| 239 |
+
"sentence = \"Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\"\n",
|
| 240 |
+
"encoded = ewondo_bert_tokenizer(sentence)['input_ids']\n",
|
| 241 |
+
"decoded_sentence = ewondo_bert_tokenizer.decode(encoded)\n",
|
| 242 |
+
"\n",
|
| 243 |
+
"print(f\"Original Sentence: {sentence}\")\n",
|
| 244 |
+
"print(f\"Decoded Sentence: {decoded_sentence}\")\n"
|
| 245 |
+
],
|
| 246 |
+
"metadata": {
|
| 247 |
+
"colab": {
|
| 248 |
+
"base_uri": "https://localhost:8080/"
|
| 249 |
+
},
|
| 250 |
+
"id": "2wHiYIvuDuGV",
|
| 251 |
+
"outputId": "4f72b790-03de-4aef-dfa0-aa13ee0461ed"
|
| 252 |
+
},
|
| 253 |
+
"execution_count": 22,
|
| 254 |
+
"outputs": [
|
| 255 |
+
{
|
| 256 |
+
"output_type": "stream",
|
| 257 |
+
"name": "stdout",
|
| 258 |
+
"text": [
|
| 259 |
+
"Original Sentence: Kalara mvòṅ bod Yesus Kristus, man David, man Abraham.\n",
|
| 260 |
+
"Tokens: ['k', 'a', 'l', 'a', 'r', 'a', ' ', 'm', 'v', 'òṅ', ' ', 'b', 'o', 'd', ' ', 'y', 'e', 's', 'u', 's', ' ', 'k', 'r', 'i', 's', 't', 'u', 's', ',', ' ', 'm', 'a', 'n', ' ', 'd', 'a', 'v', 'i', 'd', ',', ' ', 'm', 'a', 'n', ' ', 'a', 'b', 'r', 'a', 'h', 'a', 'm', '.']\n",
|
| 261 |
+
"\n",
|
| 262 |
+
"Original Sentence: Abraham abye Isaak, Isaak ny'abye Yakòb, Yakòb abye Yuda ai babënyaṅ.\n",
|
| 263 |
+
"Tokens: ['a', 'b', 'r', 'a', 'h', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'i', 's', 'a', 'a', 'k', ',', ' ', 'i', 's', 'a', 'a', 'k', ' ', 'n', 'y', \"'\", 'a', 'b', 'y', 'e', ' ', 'y', 'a', 'k', 'ò', 'b', ',', ' ', 'y', 'a', 'k', 'ò', 'b', ' ', 'a', 'b', 'y', 'e', ' ', 'y', 'u', 'd', 'a', ' ', 'a', 'i', ' ', 'b', 'a', 'b', 'ë', 'n', 'y', 'a', 'ṅ', '.']\n",
|
| 264 |
+
"\n",
|
| 265 |
+
"Original Sentence: Yuda abye Farès ban Zara, ai Tamar, Farès abye Esrom, Esrom abye Aram.\n",
|
| 266 |
+
"Tokens: ['y', 'u', 'd', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'f', 'a', 'r', 'e', '##̀', 's', ' ', 'b', 'a', 'n', ' ', 'z', 'a', 'r', 'a', ',', ' ', 'a', 'i', ' ', 't', 'a', 'm', 'a', 'r', ',', ' ', 'f', 'a', 'r', 'e', '##̀', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'e', 's', 'r', 'o', 'm', ',', ' ', 'e', 's', 'r', 'o', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'r', 'a', 'm', '.']\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"Original Sentence: Aram abye Aminadab, Aminadab abye Naason, Naason abye Salmon.\n",
|
| 269 |
+
"Tokens: ['a', 'r', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'm', 'i', 'n', 'a', 'd', 'a', 'b', ',', ' ', 'a', 'm', 'i', 'n', 'a', 'd', 'a', 'b', ' ', 'a', 'b', 'y', 'e', ' ', 'n', 'a', 'a', 's', 'o', 'n', ',', ' ', 'n', 'a', 'a', 's', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 's', 'a', 'l', 'm', 'o', 'n', '.']\n",
|
| 270 |
+
"\n",
|
| 271 |
+
"Original Sentence: Salmon abye Boaz ai Rahab, Boaz abye Yobèd ai Ruth, Yobèd abye Yesse.\n",
|
| 272 |
+
"Tokens: ['s', 'a', 'l', 'm', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 'bo', 'a', 'z', ' ', 'a', 'i', ' ', 'r', 'a', 'h', 'a', 'b', ',', ' ', 'bo', 'a', 'z', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'b', 'e', '##̀', 'd', ' ', 'a', 'i', ' ', 'r', 'u', 't', 'h', ',', ' ', 'yo', 'b', 'e', '##̀', 'd', ' ', 'a', 'b', 'y', 'e', ' ', 'y', 'e', 's', 's', 'e', '.']\n",
|
| 273 |
+
"\n",
|
| 274 |
+
"Original Sentence: Yesse nyè abye nkukuma David. David abye Salomon ai mininga Uria.\n",
|
| 275 |
+
"Tokens: ['y', 'e', 's', 's', 'e', ' ', 'n', 'y', 'e', '##̀', ' ', 'a', 'b', 'y', 'e', ' ', 'n', 'k', 'u', 'k', 'u', 'm', 'a', ' ', 'd', 'a', 'v', 'i', 'd', '.', ' ', 'd', 'a', 'v', 'i', 'd', ' ', 'a', 'b', 'y', 'e', ' ', 's', 'a', 'l', 'o', 'm', 'o', 'n', ' ', 'a', 'i', ' ', 'm', 'i', 'n', 'i', 'n', 'g', 'a', ' ', 'u', 'r', 'i', 'a', '.']\n",
|
| 276 |
+
"\n",
|
| 277 |
+
"Original Sentence: Salomon abye Roboam, Roboam abye Abia, Abia abye Asaf.\n",
|
| 278 |
+
"Tokens: ['s', 'a', 'l', 'o', 'm', 'o', 'n', ' ', 'a', 'b', 'y', 'e', ' ', 'r', '##o', 'b', 'o', 'a', 'm', ',', ' ', 'r', '##o', 'b', 'o', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'b', 'i', 'a', ',', ' ', 'a', 'b', 'i', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 's', 'a', 'f', '.']\n",
|
| 279 |
+
"\n",
|
| 280 |
+
"Original Sentence: Asaf abye Yosafat, Yosafat abye Yoram, Yoram abye Ozia.\n",
|
| 281 |
+
"Tokens: ['a', 's', 'a', 'f', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 's', 'a', 'f', 'a', 't', ',', ' ', 'yo', 's', 'a', 'f', 'a', 't', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'r', 'a', 'm', ',', ' ', 'yo', 'r', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'o', 'z', 'i', 'a', '.']\n",
|
| 282 |
+
"\n",
|
| 283 |
+
"Original Sentence: Ozia abye Yoatam, Yoatam abye Akas, Akas abye Ezekias.\n",
|
| 284 |
+
"Tokens: ['o', 'z', 'i', 'a', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 'a', 't', 'a', 'm', ',', ' ', 'yo', 'a', 't', 'a', 'm', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'k', 'a', 's', ',', ' ', 'a', 'k', 'a', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'e', 'z', 'e', 'k', 'i', 'a', 's', '.']\n",
|
| 285 |
+
"\n",
|
| 286 |
+
"Original Sentence: Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\n",
|
| 287 |
+
"Tokens: ['e', 'z', 'e', 'k', 'i', 'a', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'm', 'a', 'n', 'a', 's', 's', 'e', ',', ' ', 'm', 'a', 'n', 'a', 's', 's', 'e', ' ', 'a', 'b', 'y', 'e', ' ', 'a', 'm', 'o', 's', ',', ' ', 'a', 'm', 'o', 's', ' ', 'a', 'b', 'y', 'e', ' ', 'yo', 's', 'i', 'a', '.']\n",
|
| 288 |
+
"\n",
|
| 289 |
+
"Vocabulary Size: 10228\n",
|
| 290 |
+
"Average tokens per sentence: 62.10\n",
|
| 291 |
+
"OOV Rate: 0.00%\n",
|
| 292 |
+
"Original Sentence: Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\n",
|
| 293 |
+
"Decoded Sentence: [CLS] e z e k i a s a b y e m a n a s s e, m a n a s s e a b y e a m o s, a m o s a b y e yo s i a. [SEP]\n"
|
| 294 |
+
]
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"output_type": "stream",
|
| 298 |
+
"name": "stderr",
|
| 299 |
+
"text": [
|
| 300 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:1601: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
|
| 301 |
+
" warnings.warn(\n"
|
| 302 |
+
]
|
| 303 |
+
}
|
| 304 |
+
]
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "markdown",
|
| 308 |
+
"source": [
|
| 309 |
+
"\n",
|
| 310 |
+
"# Explanation of Results\n",
|
| 311 |
+
"*Vocabulary Size*: **10,228**\n",
|
| 312 |
+
"\n",
|
| 313 |
+
"Think of vocabulary size as the number of unique words the tokenizer knows. With 10,228 unique tokens, your tokenizer has a pretty good grasp of the Ewondo language. This means it can recognize a wide range of words and phrases, which is great for understanding and processing text.\n",
|
| 314 |
+
"Average Tokens per Sentence: 62.10\n",
|
| 315 |
+
"\n",
|
| 316 |
+
"This number tells us how many pieces (or tokens) the tokenizer breaks each sentence into, on average. An average of 62.10 tokens per sentence suggests that the sentences are likely a bit complex, or the tokenizer is dividing words into smaller parts. While this allows it to capture more nuances in the language, it also means that the sentences are longer and may take more effort to process.\n",
|
| 317 |
+
"\n",
|
| 318 |
+
"*Out-of-Vocabulary (OOV) Rate:* **0.00%**\n",
|
| 319 |
+
"\n",
|
| 320 |
+
"The OOV rate shows how many words the tokenizer couldn't recognize. A perfect score of 0.00% means that every single word in your sample sentences was understood by the tokenizer! That’s fantastic because it indicates that your tokenizer is really well-tuned to the vocabulary of Ewondo, making it reliable for processing text.\n",
|
| 321 |
+
"Original Sentence:\n",
|
| 322 |
+
"\n",
|
| 323 |
+
"The sentence used for testing is:\n",
|
| 324 |
+
"\"Ezekias abye Manasse, Manasse abye Amos, Amos abye Yosia.\" This is a real example from your Ewondo dataset, and it helps to see how the tokenizer works in practice.\n",
|
| 325 |
+
"*Decoded Sentence:*\n",
|
| 326 |
+
"\n",
|
| 327 |
+
"The decoded version looks like this:\n",
|
| 328 |
+
"[CLS] e z e k i a s a b y e m a n a s s e, m a n a s s e a b y e a m o s, a m o s a b y e yo s i a. [SEP].\n",
|
| 329 |
+
" Here, the tokenizer has broken down the original sentence into individual tokens. The [CLS] and [SEP]\n",
|
| 330 |
+
"tokens are like markers telling the model where the sentence starts and ends. The rest of the tokens show how the words have been split into smaller parts, which helps the model understand the structure of the language better.\n",
|
| 331 |
+
"# Conclusion\n",
|
| 332 |
+
"Overall, these results are really promising! our Ewondo tokenizer seems to be doing an excellent job. It knows a lot of words, handles sentences well, and recognizes everything without missing a beat. This sets a strong foundation for any further work we want to do."
|
| 333 |
+
],
|
| 334 |
+
"metadata": {
|
| 335 |
+
"id": "UIqflsvzTEvn"
|
| 336 |
+
}
|
| 337 |
+
}
|
| 338 |
+
]
|
| 339 |
+
}
|
Successful_Ewondo_Bible_Scarping_Python.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|