Upload Arabic_QG_Pipeline.ipynb
Browse files- Arabic_QG_Pipeline.ipynb +810 -0
Arabic_QG_Pipeline.ipynb
ADDED
|
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"id": "a4080da7",
|
| 7 |
+
"metadata": {
|
| 8 |
+
"id": "a4080da7"
|
| 9 |
+
},
|
| 10 |
+
"outputs": [],
|
| 11 |
+
"source": [
|
| 12 |
+
"!pip install -q stanza transformers sentencepiece torch sentence-transformers arabert pyarabic yake bert-score python-bidi"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "code",
|
| 17 |
+
"execution_count": null,
|
| 18 |
+
"id": "0b9b3227",
|
| 19 |
+
"metadata": {
|
| 20 |
+
"id": "0b9b3227"
|
| 21 |
+
},
|
| 22 |
+
"outputs": [],
|
| 23 |
+
"source": [
|
| 24 |
+
"import re\n",
|
| 25 |
+
"import difflib\n",
|
| 26 |
+
"import numpy as np\n",
|
| 27 |
+
"import torch\n",
|
| 28 |
+
"import pyarabic.araby as araby\n",
|
| 29 |
+
"import stanza\n",
|
| 30 |
+
"from transformers import AutoTokenizer, AutoModel\n",
|
| 31 |
+
"from sentence_transformers import SentenceTransformer, util\n",
|
| 32 |
+
"import arabert.preprocess\n",
|
| 33 |
+
"import yake\n",
|
| 34 |
+
"from bert_score import score as bertscore\n",
|
| 35 |
+
"from sentence_transformers import util\n",
|
| 36 |
+
"\n",
|
| 37 |
+
"torch.set_grad_enabled(False)\n",
|
| 38 |
+
"\n",
|
| 39 |
+
"ARAELECTRA_NAME = \"aubmindlab/araelectra-base-discriminator\"\n",
|
| 40 |
+
"SBERT_MODEL = \"sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2\"\n",
|
| 41 |
+
"QG_MODEL = \"Mihakram/AraT5-base-question-generation\"\n",
|
| 42 |
+
"print(\"ARAELECTRA_NAME:\", ARAELECTRA_NAME)\n",
|
| 43 |
+
"print(\"SBERT_MODEL:\", SBERT_MODEL)\n",
|
| 44 |
+
"print(\"QG_MODEL:\", QG_MODEL)\n"
|
| 45 |
+
]
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"cell_type": "code",
|
| 49 |
+
"execution_count": null,
|
| 50 |
+
"id": "1ecb47e2",
|
| 51 |
+
"metadata": {
|
| 52 |
+
"colab": {
|
| 53 |
+
"base_uri": "https://localhost:8080/"
|
| 54 |
+
},
|
| 55 |
+
"id": "1ecb47e2",
|
| 56 |
+
"outputId": "0e68e4f5-e34e-482b-d77b-c8cfd165db31"
|
| 57 |
+
},
|
| 58 |
+
"outputs": [
|
| 59 |
+
{
|
| 60 |
+
"data": {
|
| 61 |
+
"application/vnd.jupyter.widget-view+json": {
|
| 62 |
+
"model_id": "49432146f2b74114be2ae0548515d574",
|
| 63 |
+
"version_major": 2,
|
| 64 |
+
"version_minor": 0
|
| 65 |
+
},
|
| 66 |
+
"text/plain": [
|
| 67 |
+
"Downloading https://raw.githubusercontent.com/stanfordnlp/stanza-resources/main/resources_1.10.0.json: 0%| …"
|
| 68 |
+
]
|
| 69 |
+
},
|
| 70 |
+
"metadata": {},
|
| 71 |
+
"output_type": "display_data"
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"metadata": {
|
| 75 |
+
"tags": null
|
| 76 |
+
},
|
| 77 |
+
"name": "stderr",
|
| 78 |
+
"output_type": "stream",
|
| 79 |
+
"text": [
|
| 80 |
+
"INFO:stanza:Downloaded file to /root/stanza_resources/resources.json\n",
|
| 81 |
+
"INFO:stanza:Downloading default packages for language: ar (Arabic) ...\n"
|
| 82 |
+
]
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"data": {
|
| 86 |
+
"application/vnd.jupyter.widget-view+json": {
|
| 87 |
+
"model_id": "b2db9589812540deb72baf8c80312dbf",
|
| 88 |
+
"version_major": 2,
|
| 89 |
+
"version_minor": 0
|
| 90 |
+
},
|
| 91 |
+
"text/plain": [
|
| 92 |
+
"Downloading https://huggingface.co/stanfordnlp/stanza-ar/resolve/v1.10.0/models/default.zip: 0%| | …"
|
| 93 |
+
]
|
| 94 |
+
},
|
| 95 |
+
"metadata": {},
|
| 96 |
+
"output_type": "display_data"
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"metadata": {
|
| 100 |
+
"tags": null
|
| 101 |
+
},
|
| 102 |
+
"name": "stderr",
|
| 103 |
+
"output_type": "stream",
|
| 104 |
+
"text": [
|
| 105 |
+
"INFO:stanza:Downloaded file to /root/stanza_resources/ar/default.zip\n",
|
| 106 |
+
"INFO:stanza:Finished downloading models and saved to /root/stanza_resources\n",
|
| 107 |
+
"INFO:stanza:Checking for updates to resources.json in case models have been updated. Note: this behavior can be turned off with download_method=None or download_method=DownloadMethod.REUSE_RESOURCES\n"
|
| 108 |
+
]
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"data": {
|
| 112 |
+
"application/vnd.jupyter.widget-view+json": {
|
| 113 |
+
"model_id": "a7acd3cf34cc4fe8aa89fcfe87adbd66",
|
| 114 |
+
"version_major": 2,
|
| 115 |
+
"version_minor": 0
|
| 116 |
+
},
|
| 117 |
+
"text/plain": [
|
| 118 |
+
"Downloading https://raw.githubusercontent.com/stanfordnlp/stanza-resources/main/resources_1.10.0.json: 0%| …"
|
| 119 |
+
]
|
| 120 |
+
},
|
| 121 |
+
"metadata": {},
|
| 122 |
+
"output_type": "display_data"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"metadata": {
|
| 126 |
+
"tags": null
|
| 127 |
+
},
|
| 128 |
+
"name": "stderr",
|
| 129 |
+
"output_type": "stream",
|
| 130 |
+
"text": [
|
| 131 |
+
"INFO:stanza:Downloaded file to /root/stanza_resources/resources.json\n",
|
| 132 |
+
"WARNING:stanza:Language ar package default expects mwt, which has been added\n"
|
| 133 |
+
]
|
| 134 |
+
}
|
| 135 |
+
],
|
| 136 |
+
"source": [
|
| 137 |
+
"stanza.download('ar')\n",
|
| 138 |
+
"nlp = stanza.Pipeline(lang='ar', processors='tokenize,pos,lemma,depparse', tokenize_no_ssplit=False)\n",
|
| 139 |
+
"arabert_prep = arabert.preprocess.ArabertPreprocessor(ARAELECTRA_NAME)\n",
|
| 140 |
+
"\n",
|
| 141 |
+
"tokenizer_electra = AutoTokenizer.from_pretrained(ARAELECTRA_NAME)\n",
|
| 142 |
+
"model_electra = AutoModel.from_pretrained(ARAELECTRA_NAME)\n",
|
| 143 |
+
"\n",
|
| 144 |
+
"sbert = SentenceTransformer(SBERT_MODEL)\n",
|
| 145 |
+
"\n",
|
| 146 |
+
"from transformers import AutoTokenizer as HFTokenizer, AutoModelForSeq2SeqLM\n",
|
| 147 |
+
"qg_tokenizer = HFTokenizer.from_pretrained(QG_MODEL)\n",
|
| 148 |
+
"qg_model = AutoModelForSeq2SeqLM.from_pretrained(QG_MODEL)\n",
|
| 149 |
+
"\n",
|
| 150 |
+
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
| 151 |
+
"qg_model = qg_model.to(device)"
|
| 152 |
+
]
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"cell_type": "code",
|
| 156 |
+
"execution_count": 33,
|
| 157 |
+
"id": "9c2da7d8",
|
| 158 |
+
"metadata": {
|
| 159 |
+
"colab": {
|
| 160 |
+
"base_uri": "https://localhost:8080/"
|
| 161 |
+
},
|
| 162 |
+
"id": "9c2da7d8",
|
| 163 |
+
"outputId": "7a362049-8853-408d-ba7a-07f0086fb174"
|
| 164 |
+
},
|
| 165 |
+
"outputs": [
|
| 166 |
+
{
|
| 167 |
+
"output_type": "stream",
|
| 168 |
+
"name": "stdout",
|
| 169 |
+
"text": [
|
| 170 |
+
"The Original:\n",
|
| 171 |
+
" يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءاً من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n"
|
| 172 |
+
]
|
| 173 |
+
}
|
| 174 |
+
],
|
| 175 |
+
"source": [
|
| 176 |
+
"# text = \"\"\"المحور العصبي هو نتوء طويل ونحيل، يحمل النبضات الكهربائية بعيدًا عن جسم الخلية إلى الخلايا العصبية الأخرى، أو العضلات، أو الغدد.\n",
|
| 177 |
+
"# يُغطى العديد من المحاور العصبية بمادة دهنية تُسمى غمد الميالين، والتي تعمل كعازل وتُسرّع من نقل الإشارات.\n",
|
| 178 |
+
"# ينتهي المحور العصبي عند نهايات المحور العصبي، حيث يتم إطلاق النواقل العصبية للتواصل مع الخلايا العصبية الأخرى، أو الخلايا المستهدفة.\"\"\"\n",
|
| 179 |
+
"# text = \"\"\"الجهاز العصبي المركزي:\n",
|
| 180 |
+
"# كثُر في الآونة الأخيرة انتشار حالات السكتة الدماغية، وهي حالة تحدث نتيجة عدم وصول الدم المحمّل بالأكسجين إلى الدماغ؛ كحالة طبية طارئة تبدأ فيها خلايا الدماغ بالموت بعد بضع دقائق من عدم وصول الأكسجين. وهناك نوعان رئيسان من السكتة هما السكتة الدماغية التي تحدث بسبب الجلطات الدموية، وتشكل\n",
|
| 181 |
+
"# 87% من الحالات، والسكتة الدماغية التي تحدث بسبب النزيف في الدماغ أو حوله.\n",
|
| 182 |
+
"# وتختلف أعراضها، إذ تشمل: الخدر المفاجئ، وعدم القدرة على تحريك الوجه أو الذراع أو الساق (لاسيما في أحد جانبي الجسم)، والارتباك، ومشاكل في التحدث والرؤية والدوخة، وصعوبة في المشي، وفقدان التوازن، والصداع المفاجئ والشديد، ومشاكل في التنفس، وفقدان الوعي.\"\"\"\n",
|
| 183 |
+
"text = \"يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءاً من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\"\n",
|
| 184 |
+
"print(\"The Original:\\n\", text)\n"
|
| 185 |
+
]
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"cell_type": "code",
|
| 189 |
+
"execution_count": 34,
|
| 190 |
+
"id": "0f2a3f6a",
|
| 191 |
+
"metadata": {
|
| 192 |
+
"colab": {
|
| 193 |
+
"base_uri": "https://localhost:8080/"
|
| 194 |
+
},
|
| 195 |
+
"id": "0f2a3f6a",
|
| 196 |
+
"outputId": "39092572-679c-4a58-bfdc-ba47492227de"
|
| 197 |
+
},
|
| 198 |
+
"outputs": [
|
| 199 |
+
{
|
| 200 |
+
"output_type": "stream",
|
| 201 |
+
"name": "stdout",
|
| 202 |
+
"text": [
|
| 203 |
+
"Text after normalization:\n",
|
| 204 |
+
" يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n"
|
| 205 |
+
]
|
| 206 |
+
}
|
| 207 |
+
],
|
| 208 |
+
"source": [
|
| 209 |
+
"def normalize(s: str) -> str:\n",
|
| 210 |
+
" t = araby.strip_tashkeel(s)\n",
|
| 211 |
+
" t = t.replace('آ','ا').replace('أ','ا').replace('إ','ا').replace('ى','ي')\n",
|
| 212 |
+
" t = t.replace('ـ','')\n",
|
| 213 |
+
" t = ' '.join(t.split())\n",
|
| 214 |
+
" return t\n",
|
| 215 |
+
"text_norm = normalize(text)\n",
|
| 216 |
+
"print(\"Text after normalization:\\n\", text_norm)"
|
| 217 |
+
]
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"cell_type": "code",
|
| 221 |
+
"execution_count": 35,
|
| 222 |
+
"id": "b7db97d3",
|
| 223 |
+
"metadata": {
|
| 224 |
+
"id": "b7db97d3"
|
| 225 |
+
},
|
| 226 |
+
"outputs": [],
|
| 227 |
+
"source": [
|
| 228 |
+
"def build_char_map(src: str, tgt: str):\n",
|
| 229 |
+
" sm = difflib.SequenceMatcher(a=src, b=tgt)\n",
|
| 230 |
+
" src2tgt = [-1] * len(src)\n",
|
| 231 |
+
" for tag, i1, i2, j1, j2 in sm.get_opcodes():\n",
|
| 232 |
+
" if tag == 'equal':\n",
|
| 233 |
+
" for k in range(i2 - i1):\n",
|
| 234 |
+
" src2tgt[i1 + k] = j1 + k\n",
|
| 235 |
+
" elif tag in ('replace', 'delete'):\n",
|
| 236 |
+
" for k in range(i2 - i1):\n",
|
| 237 |
+
" src2tgt[i1 + k] = j1\n",
|
| 238 |
+
" elif tag == 'insert':\n",
|
| 239 |
+
" pass\n",
|
| 240 |
+
" last = 0\n",
|
| 241 |
+
" for i in range(len(src2tgt)):\n",
|
| 242 |
+
" if src2tgt[i] == -1:\n",
|
| 243 |
+
" src2tgt[i] = last\n",
|
| 244 |
+
" else:\n",
|
| 245 |
+
" last = src2tgt[i]\n",
|
| 246 |
+
" return src2tgt\n",
|
| 247 |
+
"\n",
|
| 248 |
+
"def map_span_src_to_tgt(src2tgt, start, end, tgt_len):\n",
|
| 249 |
+
" if start >= len(src2tgt): start = max(0, len(src2tgt)-1)\n",
|
| 250 |
+
" if end == 0: end = 1\n",
|
| 251 |
+
" if end-1 >= len(src2tgt): end = len(src2tgt)\n",
|
| 252 |
+
" ts = src2tgt[start]\n",
|
| 253 |
+
" te = src2tgt[end-1] + 1\n",
|
| 254 |
+
" ts = max(0, min(ts, max(0, tgt_len-1)))\n",
|
| 255 |
+
" te = max(ts+1, min(te, tgt_len))\n",
|
| 256 |
+
" return ts, te\n",
|
| 257 |
+
"\n",
|
| 258 |
+
"def token_indices_overlapping_span(offsets, span_start, span_end):\n",
|
| 259 |
+
" idxs = []\n",
|
| 260 |
+
" for i, (s, e) in enumerate(offsets):\n",
|
| 261 |
+
" if e > span_start and s < span_end:\n",
|
| 262 |
+
" idxs.append(i)\n",
|
| 263 |
+
" return idxs\n",
|
| 264 |
+
"\n",
|
| 265 |
+
"def electra_hidden_states(prep_text):\n",
|
| 266 |
+
" encoded = tokenizer_electra(prep_text, return_tensors=\"pt\", return_offsets_mapping=True, padding=False, truncation=True)\n",
|
| 267 |
+
" offsets = encoded.pop('offset_mapping')[0].tolist()\n",
|
| 268 |
+
" with torch.no_grad():\n",
|
| 269 |
+
" out = model_electra(**encoded)\n",
|
| 270 |
+
" H = out.last_hidden_state.squeeze(0)\n",
|
| 271 |
+
" return offsets, H\n",
|
| 272 |
+
"\n",
|
| 273 |
+
"def word_span_list_from_stanza(doc):\n",
|
| 274 |
+
" spans = []\n",
|
| 275 |
+
" for si, sent in enumerate(doc.sentences):\n",
|
| 276 |
+
" for ti, tok in enumerate(sent.tokens):\n",
|
| 277 |
+
" for w in tok.words:\n",
|
| 278 |
+
" spans.append({\n",
|
| 279 |
+
" \"text\": w.text,\n",
|
| 280 |
+
" \"start\": tok.start_char,\n",
|
| 281 |
+
" \"end\": tok.end_char,\n",
|
| 282 |
+
" \"upos\": w.upos,\n",
|
| 283 |
+
" \"feats\": getattr(w, \"feats\", None),\n",
|
| 284 |
+
" \"deprel\": w.deprel,\n",
|
| 285 |
+
" \"head\": w.head,\n",
|
| 286 |
+
" \"sent_idx\": si,\n",
|
| 287 |
+
" \"tok_idx\": ti\n",
|
| 288 |
+
" })\n",
|
| 289 |
+
" return spans\n",
|
| 290 |
+
"\n",
|
| 291 |
+
"def electra_phrase_vec_via_offsets(span_start, span_end, src2tgt, prep_text, offsets, H):\n",
|
| 292 |
+
" ts, te = map_span_src_to_tgt(src2tgt, span_start, span_end, len(prep_text))\n",
|
| 293 |
+
" tok_ids = token_indices_overlapping_span(offsets, ts, te)\n",
|
| 294 |
+
" if not tok_ids:\n",
|
| 295 |
+
" return None\n",
|
| 296 |
+
" vecs = [H[i] for i in tok_ids]\n",
|
| 297 |
+
" return torch.stack(vecs, dim=0).mean(dim=0)\n"
|
| 298 |
+
]
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"cell_type": "code",
|
| 302 |
+
"execution_count": 36,
|
| 303 |
+
"id": "c6a6d4ff",
|
| 304 |
+
"metadata": {
|
| 305 |
+
"colab": {
|
| 306 |
+
"base_uri": "https://localhost:8080/"
|
| 307 |
+
},
|
| 308 |
+
"id": "c6a6d4ff",
|
| 309 |
+
"outputId": "a132a016-6217-4050-f798-5d8d501b48db"
|
| 310 |
+
},
|
| 311 |
+
"outputs": [
|
| 312 |
+
{
|
| 313 |
+
"output_type": "stream",
|
| 314 |
+
"name": "stdout",
|
| 315 |
+
"text": [
|
| 316 |
+
"\n",
|
| 317 |
+
"=== Sentence 1 ===\n",
|
| 318 |
+
"Word: يتشكل UPOS: VERB Dep: root Head: ROOT Feats: Aspect=Imp|Gender=Masc|Mood=Ind|Number=Sing|Person=3|VerbForm=Fin|Voice=Act\n",
|
| 319 |
+
"Word: الماضي UPOS: ADJ Dep: nsubj Head: يتشكل Feats: Case=Nom|Definite=Def|Gender=Masc|Number=Sing\n",
|
| 320 |
+
"Word: غمد UPOS: NOUN Dep: nsubj Head: يتشكل Feats: Case=Nom|Definite=Cons|Number=Sing\n",
|
| 321 |
+
"Word: النخاعين UPOS: NOUN Dep: nmod Head: غمد Feats: Case=Gen|Definite=Def|Number=Dual\n",
|
| 322 |
+
"Word: في UPOS: ADP Dep: case Head: الجهاز Feats: AdpType=Prep\n",
|
| 323 |
+
"Word: الجهاز UPOS: NOUN Dep: nmod Head: غمد Feats: Case=Gen|Definite=Def|Number=Sing\n",
|
| 324 |
+
"Word: العصبي UPOS: ADJ Dep: amod Head: الجهاز Feats: Case=Gen|Definite=Def|Gender=Masc|Number=Sing\n",
|
| 325 |
+
"Word: المركزي UPOS: ADJ Dep: amod Head: الجهاز Feats: Case=Gen|Definite=Def|Gender=Masc|Number=Sing\n",
|
| 326 |
+
"Word: بدءا UPOS: NOUN Dep: obl Head: يتشكل Feats: Case=Acc|Definite=Ind|Number=Sing\n",
|
| 327 |
+
"Word: من UPOS: ADP Dep: fixed Head: بدءا Feats: AdpType=Prep\n",
|
| 328 |
+
"Word: خلايا UPOS: NOUN Dep: nmod Head: بدءا Feats: Case=Gen|Definite=Cons|Number=Plur\n",
|
| 329 |
+
"Word: الدبق UPOS: NOUN Dep: nmod Head: خلايا Feats: Case=Gen|Definite=Def|Number=Sing\n",
|
| 330 |
+
"Word: قليلة UPOS: ADJ Dep: amod Head: الدبق Feats: Case=Gen|Definite=Cons|Gender=Fem|Number=Sing\n",
|
| 331 |
+
"Word: الاستطالات UPOS: NOUN Dep: nmod Head: قليلة Feats: Case=Gen|Definite=Def|Number=Plur\n",
|
| 332 |
+
"Word: و UPOS: CCONJ Dep: cc Head: الجهاز Feats: _\n",
|
| 333 |
+
"Word: في UPOS: ADP Dep: case Head: الجهاز Feats: AdpType=Prep\n",
|
| 334 |
+
"Word: الجهاز UPOS: NOUN Dep: conj Head: خلايا Feats: Case=Gen|Definite=Def|Number=Sing\n",
|
| 335 |
+
"Word: العصبي UPOS: ADJ Dep: amod Head: الجهاز Feats: Case=Gen|Definite=Def|Gender=Masc|Number=Sing\n",
|
| 336 |
+
"Word: المحيطي UPOS: ADJ Dep: amod Head: الجهاز Feats: Case=Gen|Definite=Def|Gender=Masc|Number=Sing\n",
|
| 337 |
+
"Word: من UPOS: ADP Dep: case Head: خلايا Feats: AdpType=Prep\n",
|
| 338 |
+
"Word: خلايا UPOS: X Dep: obl Head: المحيطي Feats: _\n",
|
| 339 |
+
"Word: شوان UPOS: X Dep: nmod Head: خلايا Feats: _\n"
|
| 340 |
+
]
|
| 341 |
+
}
|
| 342 |
+
],
|
| 343 |
+
"source": [
|
| 344 |
+
"doc = nlp(text_norm)\n",
|
| 345 |
+
"for si, sentence in enumerate(doc.sentences, start=1):\n",
|
| 346 |
+
" print(f\"\\n=== Sentence {si} ===\")\n",
|
| 347 |
+
" for w in sentence.words:\n",
|
| 348 |
+
" feats = w.feats if w.feats else \"_\"\n",
|
| 349 |
+
" head_text = sentence.words[w.head-1].text if w.head and w.head-1 < len(sentence.words) else \"ROOT\"\n",
|
| 350 |
+
" print(f\"Word: {w.text:<15} UPOS: {w.upos:<6} Dep: {w.deprel:<9} Head: {head_text:<12} Feats: {feats}\")"
|
| 351 |
+
]
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"cell_type": "code",
|
| 355 |
+
"execution_count": 37,
|
| 356 |
+
"id": "bbde14e6",
|
| 357 |
+
"metadata": {
|
| 358 |
+
"colab": {
|
| 359 |
+
"base_uri": "https://localhost:8080/"
|
| 360 |
+
},
|
| 361 |
+
"id": "bbde14e6",
|
| 362 |
+
"outputId": "05fb00d4-262c-4c1e-acb2-c6c1d347ce2d"
|
| 363 |
+
},
|
| 364 |
+
"outputs": [
|
| 365 |
+
{
|
| 366 |
+
"output_type": "stream",
|
| 367 |
+
"name": "stdout",
|
| 368 |
+
"text": [
|
| 369 |
+
"Number of nominal phrases : 6\n",
|
| 370 |
+
" 1. غمد النخاعين في الجهاز (span=6:28, head=غمد)\n",
|
| 371 |
+
" 2. الجهاز العصبي المركزي (span=22:43, head=الجهاز)\n",
|
| 372 |
+
" 3. بدءا من خلايا (span=44:57, head=بدءا)\n",
|
| 373 |
+
" 4. خلايا الدبق (span=52:63, head=خلايا)\n",
|
| 374 |
+
" 5. الدبق قليلة (span=58:69, head=الدبق)\n",
|
| 375 |
+
" 6. الجهاز العصبي المحيطي (span=85:106, head=الجهاز)\n"
|
| 376 |
+
]
|
| 377 |
+
}
|
| 378 |
+
],
|
| 379 |
+
"source": [
|
| 380 |
+
"def build_noun_phrases(doc, text_norm):\n",
|
| 381 |
+
" noun_phrases = []\n",
|
| 382 |
+
" for si, sent in enumerate(doc.sentences):\n",
|
| 383 |
+
" words_info = []\n",
|
| 384 |
+
" for ti, tok in enumerate(sent.tokens):\n",
|
| 385 |
+
" for w in tok.words:\n",
|
| 386 |
+
" words_info.append({\n",
|
| 387 |
+
" \"id\": w.id,\n",
|
| 388 |
+
" \"text\": w.text,\n",
|
| 389 |
+
" \"upos\": w.upos,\n",
|
| 390 |
+
" \"deprel\": w.deprel,\n",
|
| 391 |
+
" \"head\": w.head,\n",
|
| 392 |
+
" \"start\": tok.start_char,\n",
|
| 393 |
+
" \"end\": tok.end_char,\n",
|
| 394 |
+
" \"tok_idx\": ti\n",
|
| 395 |
+
" })\n",
|
| 396 |
+
" id2info = {wi[\"id\"]: wi for wi in words_info}\n",
|
| 397 |
+
"\n",
|
| 398 |
+
" for wi in words_info:\n",
|
| 399 |
+
" if wi[\"upos\"] not in {\"NOUN\",\"PROPN\"}:\n",
|
| 400 |
+
" continue\n",
|
| 401 |
+
" head = wi\n",
|
| 402 |
+
" left_mods, right_mods = [], []\n",
|
| 403 |
+
" for cj in words_info:\n",
|
| 404 |
+
" if cj[\"head\"] == head[\"id\"] and cj[\"deprel\"] in {\"amod\",\"compound\",\"nmod\"}:\n",
|
| 405 |
+
" if cj[\"start\"] <= head[\"start\"]:\n",
|
| 406 |
+
" left_mods.append(cj)\n",
|
| 407 |
+
" else:\n",
|
| 408 |
+
" right_mods.append(cj)\n",
|
| 409 |
+
" left_mods = sorted(left_mods, key=lambda x: x[\"start\"])\n",
|
| 410 |
+
" right_mods = sorted(right_mods, key=lambda x: x[\"start\"])\n",
|
| 411 |
+
" phrase_tokens = left_mods + [head] + right_mods\n",
|
| 412 |
+
" if not phrase_tokens:\n",
|
| 413 |
+
" continue\n",
|
| 414 |
+
" if len(phrase_tokens) < 2 and head[\"upos\"] != \"PROPN\":\n",
|
| 415 |
+
" continue\n",
|
| 416 |
+
" span_start = min(t[\"start\"] for t in phrase_tokens)\n",
|
| 417 |
+
" span_end = max(t[\"end\"] for t in phrase_tokens)\n",
|
| 418 |
+
" phrase_text = text_norm[span_start:span_end].strip()\n",
|
| 419 |
+
" phrase_text = re.sub(r\"\\s+\",\" \", phrase_text)\n",
|
| 420 |
+
" if len(phrase_text) < 2:\n",
|
| 421 |
+
" continue\n",
|
| 422 |
+
" noun_phrases.append({\n",
|
| 423 |
+
" \"text\": phrase_text,\n",
|
| 424 |
+
" \"start\": span_start,\n",
|
| 425 |
+
" \"end\": span_end,\n",
|
| 426 |
+
" \"head_text\": head[\"text\"],\n",
|
| 427 |
+
" \"sent_idx\": si,\n",
|
| 428 |
+
" \"token_indices\": [t[\"tok_idx\"] for t in phrase_tokens]\n",
|
| 429 |
+
" })\n",
|
| 430 |
+
" uniq = {}\n",
|
| 431 |
+
" for np_item in noun_phrases:\n",
|
| 432 |
+
" key = np_item[\"text\"]\n",
|
| 433 |
+
" if key not in uniq or (np_item[\"end\"] - np_item[\"start\"]) > (uniq[key][\"end\"] - uniq[key][\"start\"]):\n",
|
| 434 |
+
" uniq[key] = np_item\n",
|
| 435 |
+
" return list(uniq.values())\n",
|
| 436 |
+
"\n",
|
| 437 |
+
"nps = build_noun_phrases(doc, text_norm)\n",
|
| 438 |
+
"print(\"Number of nominal phrases :\", len(nps))\n",
|
| 439 |
+
"for i, p in enumerate(nps[:20], 1):\n",
|
| 440 |
+
" print(f\"{i:>2}. {p['text']} (span={p['start']}:{p['end']}, head={p['head_text']})\")"
|
| 441 |
+
]
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"cell_type": "code",
|
| 445 |
+
"execution_count": 38,
|
| 446 |
+
"id": "47771183",
|
| 447 |
+
"metadata": {
|
| 448 |
+
"colab": {
|
| 449 |
+
"base_uri": "https://localhost:8080/"
|
| 450 |
+
},
|
| 451 |
+
"id": "47771183",
|
| 452 |
+
"outputId": "7cf0361c-d250-4c85-bcb2-07856fb752a4"
|
| 453 |
+
},
|
| 454 |
+
"outputs": [
|
| 455 |
+
{
|
| 456 |
+
"output_type": "stream",
|
| 457 |
+
"name": "stdout",
|
| 458 |
+
"text": [
|
| 459 |
+
"MMR selection (various): ['الجهاز العصبي المركزي', 'الجهاز العصبي المحيطي', 'بدءا من خلايا', 'خلايا الدبق', 'غمد النخاعين في الجهاز', 'الدبق قليلة']\n",
|
| 460 |
+
"\n",
|
| 461 |
+
"Top 15 by blended (phrase, blended, sBERT, ELECTRA_ctx):\n",
|
| 462 |
+
"الجهاز العصبي المركزي -> 0.8126 | sBERT=0.7804 | ELECTRA=0.9415\n",
|
| 463 |
+
"الجهاز العصبي المحيطي -> 0.7930 | sBERT=0.7494 | ELECTRA=0.9674\n",
|
| 464 |
+
"خلايا الدبق -> 0.6421 | sBERT=0.5611 | ELECTRA=0.9663\n",
|
| 465 |
+
"بدءا من خلايا -> 0.6291 | sBERT=0.5483 | ELECTRA=0.9524\n",
|
| 466 |
+
"الدبق قليلة -> 0.3127 | sBERT=0.1528 | ELECTRA=0.9525\n",
|
| 467 |
+
"غمد النخاعين في الجهاز -> 0.2822 | sBERT=0.1064 | ELECTRA=0.9855\n"
|
| 468 |
+
]
|
| 469 |
+
}
|
| 470 |
+
],
|
| 471 |
+
"source": [
|
| 472 |
+
"def mmr_select(doc_emb, cand_embs, candidates, k=10, lam=0.7):\n",
|
| 473 |
+
" if len(candidates) == 0: return []\n",
|
| 474 |
+
" chosen_idx, cand_idx = [], list(range(len(candidates)))\n",
|
| 475 |
+
" sim_doc = util.cos_sim(doc_emb, cand_embs)[0]\n",
|
| 476 |
+
" first = int(np.argmax(sim_doc.cpu().numpy()))\n",
|
| 477 |
+
" chosen_idx.append(first); cand_idx.remove(first)\n",
|
| 478 |
+
" if len(candidates) == 1 or k == 1:\n",
|
| 479 |
+
" return [candidates[first]]\n",
|
| 480 |
+
" sim_between = util.cos_sim(cand_embs, cand_embs)\n",
|
| 481 |
+
" for _ in range(min(k, len(candidates)) - 1):\n",
|
| 482 |
+
" best_i, best_score = None, -1e9\n",
|
| 483 |
+
" for i in cand_idx:\n",
|
| 484 |
+
" redundancy = max(sim_between[i, j].item() for j in chosen_idx) if chosen_idx else 0.0\n",
|
| 485 |
+
" score = lam*sim_doc[i].item() - (1-lam)*redundancy\n",
|
| 486 |
+
" if score > best_score:\n",
|
| 487 |
+
" best_score, best_i = score, i\n",
|
| 488 |
+
" chosen_idx.append(best_i); cand_idx.remove(best_i)\n",
|
| 489 |
+
" return [candidates[i] for i in chosen_idx]\n",
|
| 490 |
+
"\n",
|
| 491 |
+
"def rank_keyphrases_with_mmr(text_norm, nps, arabert_prep, sbert, TOP_K=12, alpha=0.8, lam=0.7):\n",
|
| 492 |
+
" if not nps: return [], []\n",
|
| 493 |
+
" phrases = [p[\"text\"] for p in nps]\n",
|
| 494 |
+
" text_prep = arabert_prep.preprocess(text_norm)\n",
|
| 495 |
+
" src2tgt = build_char_map(text_norm, text_prep)\n",
|
| 496 |
+
" # sBERT\n",
|
| 497 |
+
" doc_emb_sbert = sbert.encode([text_prep], convert_to_tensor=True)\n",
|
| 498 |
+
" phr_embs_sbert = sbert.encode(phrases, convert_to_tensor=True)\n",
|
| 499 |
+
" sims_sbert = util.cos_sim(doc_emb_sbert, phr_embs_sbert).cpu().numpy()[0]\n",
|
| 500 |
+
" # ELECTRA doc vec\n",
|
| 501 |
+
" prep_offsets, prep_H = electra_hidden_states(text_prep)\n",
|
| 502 |
+
" with torch.no_grad():\n",
|
| 503 |
+
" doc_vec_electra = prep_H.mean(dim=0)\n",
|
| 504 |
+
" # ELECTRA phrase sims via span\n",
|
| 505 |
+
" sims_electra = []\n",
|
| 506 |
+
" for p in nps:\n",
|
| 507 |
+
" v = electra_phrase_vec_via_offsets(p[\"start\"], p[\"end\"], src2tgt, text_prep, prep_offsets, prep_H)\n",
|
| 508 |
+
" if v is None:\n",
|
| 509 |
+
" sims_electra.append(0.0)\n",
|
| 510 |
+
" else:\n",
|
| 511 |
+
" num = torch.dot(doc_vec_electra, v).item()\n",
|
| 512 |
+
" den = (doc_vec_electra.norm().item() * v.norm().item() + 1e-9)\n",
|
| 513 |
+
" sims_electra.append(num/den)\n",
|
| 514 |
+
" sims_electra = np.array(sims_electra)\n",
|
| 515 |
+
" final_scores = alpha * sims_sbert + (1 - alpha) * sims_electra\n",
|
| 516 |
+
" order = np.argsort(-final_scores)\n",
|
| 517 |
+
" ranked = [(phrases[i], float(final_scores[i]), float(sims_sbert[i]), float(sims_electra[i])) for i in order]\n",
|
| 518 |
+
" top_diverse = mmr_select(doc_emb_sbert, phr_embs_sbert, phrases, k=min(TOP_K, len(phrases)), lam=lam)\n",
|
| 519 |
+
" return ranked, top_diverse\n",
|
| 520 |
+
"\n",
|
| 521 |
+
"ranked, top_diverse = rank_keyphrases_with_mmr(text_norm, nps, arabert_prep, sbert, TOP_K=12, alpha=0.8, lam=0.7)\n",
|
| 522 |
+
"print(\"MMR selection (various):\", top_diverse[:10])\n",
|
| 523 |
+
"\n",
|
| 524 |
+
"print(\"\\nTop 15 by blended (phrase, blended, sBERT, ELECTRA_ctx):\")\n",
|
| 525 |
+
"for phr, sc, sb, el in ranked[:15]:\n",
|
| 526 |
+
" print(f\"{phr:<40s} -> {sc:.4f} | sBERT={sb:.4f} | ELECTRA={el:.4f}\")\n"
|
| 527 |
+
]
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"cell_type": "code",
|
| 531 |
+
"execution_count": 39,
|
| 532 |
+
"id": "7e47f31f",
|
| 533 |
+
"metadata": {
|
| 534 |
+
"colab": {
|
| 535 |
+
"base_uri": "https://localhost:8080/"
|
| 536 |
+
},
|
| 537 |
+
"id": "7e47f31f",
|
| 538 |
+
"outputId": "6235fb61-57de-4f36-849d-c900ee119610"
|
| 539 |
+
},
|
| 540 |
+
"outputs": [
|
| 541 |
+
{
|
| 542 |
+
"output_type": "stream",
|
| 543 |
+
"name": "stdout",
|
| 544 |
+
"text": [
|
| 545 |
+
"Top 15 (Blended SEM+YAKE):\n",
|
| 546 |
+
"الجهاز العصبي المركزي -> final=0.8688 | sem=0.8126 | yake=1.0000 | sBERT=0.7804 | ELECTRA=0.9415\n",
|
| 547 |
+
"الجهاز العصبي المحيطي -> final=0.8551 | sem=0.7930 | yake=1.0000 | sBERT=0.7494 | ELECTRA=0.9674\n",
|
| 548 |
+
"خلايا الدبق -> final=0.4495 | sem=0.6421 | yake=0.0000 | sBERT=0.5611 | ELECTRA=0.9663\n",
|
| 549 |
+
"بدءا من خلايا -> final=0.4404 | sem=0.6291 | yake=0.0000 | sBERT=0.5483 | ELECTRA=0.9524\n",
|
| 550 |
+
"غمد النخاعين في الجهاز -> final=0.4177 | sem=0.2822 | yake=0.7339 | sBERT=0.1064 | ELECTRA=0.9855\n",
|
| 551 |
+
"الدبق قليلة -> final=0.2565 | sem=0.3127 | yake=0.1254 | sBERT=0.1528 | ELECTRA=0.9525\n"
|
| 552 |
+
]
|
| 553 |
+
}
|
| 554 |
+
],
|
| 555 |
+
"source": [
|
| 556 |
+
"def yake_scores_for_phrases(text_norm, phrases, max_ngram_size=5, lan='ar'):\n",
|
| 557 |
+
" kw_extractor = yake.KeywordExtractor(lan=lan, n=max_ngram_size, dedupLim=0.9, top=1000)\n",
|
| 558 |
+
" scored = kw_extractor.extract_keywords(text_norm)\n",
|
| 559 |
+
" norm = lambda s: re.sub(r\"\\s+\",\" \", s).strip().lower()\n",
|
| 560 |
+
" scored_norm = {norm(k): v for k, v in scored}\n",
|
| 561 |
+
" score_map = {}\n",
|
| 562 |
+
" for p in phrases:\n",
|
| 563 |
+
" pn = norm(p)\n",
|
| 564 |
+
" score_map[p] = scored_norm.get(pn, None)\n",
|
| 565 |
+
" return score_map\n",
|
| 566 |
+
"\n",
|
| 567 |
+
"def invert_and_minmax_yake(score_map):\n",
|
| 568 |
+
" vals = []\n",
|
| 569 |
+
" for v in score_map.values():\n",
|
| 570 |
+
" vals.append(None if v is None else 1.0/(1.0+v))\n",
|
| 571 |
+
" finite_vals = [x for x in vals if x is not None]\n",
|
| 572 |
+
" if not finite_vals:\n",
|
| 573 |
+
" return {k: 0.0 for k in score_map.keys()}\n",
|
| 574 |
+
" vmin, vmax = min(finite_vals), max(finite_vals)\n",
|
| 575 |
+
" rng = (vmax - vmin) if vmax > vmin else 1.0\n",
|
| 576 |
+
" out = {}\n",
|
| 577 |
+
" for (k, v), pos in zip(score_map.items(), vals):\n",
|
| 578 |
+
" out[k] = 0.0 if pos is None else (pos - vmin)/rng\n",
|
| 579 |
+
" return out\n",
|
| 580 |
+
"\n",
|
| 581 |
+
"def blend_semantic_with_yake(ranked_semantic, yake_norm_map, w_sem=0.7, w_yake=0.3):\n",
|
| 582 |
+
" merged = []\n",
|
| 583 |
+
" for phr, sem_sc, sb, el in ranked_semantic:\n",
|
| 584 |
+
" y = yake_norm_map.get(phr, 0.0)\n",
|
| 585 |
+
" final = w_sem*sem_sc + w_yake*y\n",
|
| 586 |
+
" merged.append((phr, final, sem_sc, y, sb, el))\n",
|
| 587 |
+
" merged.sort(key=lambda x: -x[1])\n",
|
| 588 |
+
" return merged\n",
|
| 589 |
+
"\n",
|
| 590 |
+
"phrases = [r[0] for r in ranked]\n",
|
| 591 |
+
"yake_raw = yake_scores_for_phrases(text_norm, phrases, max_ngram_size=5, lan='ar')\n",
|
| 592 |
+
"yake_norm = invert_and_minmax_yake(yake_raw)\n",
|
| 593 |
+
"\n",
|
| 594 |
+
"ranked_blended = blend_semantic_with_yake(ranked, yake_norm, w_sem=0.7, w_yake=0.3)\n",
|
| 595 |
+
"\n",
|
| 596 |
+
"print(\"Top 15 (Blended SEM+YAKE):\")\n",
|
| 597 |
+
"for phr, final, sem_sc, yake_sc, sb, el in ranked_blended[:15]:\n",
|
| 598 |
+
" print(f\"{phr:<40s} -> final={final:.4f} | sem={sem_sc:.4f} | yake={yake_sc:.4f} | sBERT={sb:.4f} | ELECTRA={el:.4f}\")\n"
|
| 599 |
+
]
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"cell_type": "code",
|
| 603 |
+
"source": [
|
| 604 |
+
"def split_by_dots(text: str):\n",
|
| 605 |
+
" parts = re.split(r\"\\.{1,}\\s*\", text)\n",
|
| 606 |
+
" sentences = [p.strip() for p in parts if p.strip()]\n",
|
| 607 |
+
" return sentences\n",
|
| 608 |
+
"\n",
|
| 609 |
+
"def sentence_kind_from_root(stanza_sentence):\n",
|
| 610 |
+
" root = next((w for w in stanza_sentence.words if w.deprel == \"root\"), None)\n",
|
| 611 |
+
" if not root:\n",
|
| 612 |
+
" return \"unknown\"\n",
|
| 613 |
+
" return \"verbal\" if root.upos == \"VERB\" else \"nominal\"\n",
|
| 614 |
+
"\n",
|
| 615 |
+
"def split_and_tag_nominal_verbal_by_dots(text_norm, nlp):\n",
|
| 616 |
+
" sents = split_by_dots(text_norm)\n",
|
| 617 |
+
" tagged = []\n",
|
| 618 |
+
" for s in sents:\n",
|
| 619 |
+
" doc_s = nlp(s)\n",
|
| 620 |
+
" if not doc_s.sentences:\n",
|
| 621 |
+
" tagged.append({\"text\": s, \"kind\": \"unknown\"})\n",
|
| 622 |
+
" continue\n",
|
| 623 |
+
" kind = sentence_kind_from_root(doc_s.sentences[0])\n",
|
| 624 |
+
" tagged.append({\"text\": s, \"kind\": kind})\n",
|
| 625 |
+
" return tagged\n",
|
| 626 |
+
"def link_phrases_to_sentences_by_dots(text_norm, phrases, nlp, sbert, top_k_per_phrase=2):\n",
|
| 627 |
+
" sentences_tagged = split_and_tag_nominal_verbal_by_dots(text_norm, nlp)\n",
|
| 628 |
+
" if not sentences_tagged:\n",
|
| 629 |
+
" return [], {p: [] for p in phrases}\n",
|
| 630 |
+
"\n",
|
| 631 |
+
" sent_texts = [m[\"text\"] for m in sentences_tagged]\n",
|
| 632 |
+
" sent_embs = sbert.encode(sent_texts, convert_to_tensor=True)\n",
|
| 633 |
+
"\n",
|
| 634 |
+
" phrase_links = {}\n",
|
| 635 |
+
" for p in phrases:\n",
|
| 636 |
+
" p_emb = sbert.encode([p], convert_to_tensor=True)\n",
|
| 637 |
+
" sims = util.cos_sim(p_emb, sent_embs)[0].cpu().numpy()\n",
|
| 638 |
+
" order = np.argsort(-sims)\n",
|
| 639 |
+
" links = []\n",
|
| 640 |
+
" for idx in order[:min(top_k_per_phrase, len(order))]:\n",
|
| 641 |
+
" links.append({\n",
|
| 642 |
+
" \"sent\": sent_texts[idx],\n",
|
| 643 |
+
" \"sim\": float(sims[idx]),\n",
|
| 644 |
+
" \"kind\": sentences_tagged[idx][\"kind\"]\n",
|
| 645 |
+
" })\n",
|
| 646 |
+
" phrase_links[p] = links\n",
|
| 647 |
+
"\n",
|
| 648 |
+
" return sentences_tagged, phrase_links\n",
|
| 649 |
+
"\n",
|
| 650 |
+
"\n",
|
| 651 |
+
"tagged_sents = split_and_tag_nominal_verbal_by_dots(text_norm, nlp)\n",
|
| 652 |
+
"\n",
|
| 653 |
+
"print(\"\\nSentences (divided by points only) and their classification:\")\n",
|
| 654 |
+
"for i, it in enumerate(tagged_sents, 1):\n",
|
| 655 |
+
" print(f\"{i:>2}. ({it['kind']}) {it['text']}\")\n",
|
| 656 |
+
"\n",
|
| 657 |
+
"topK_for_support = 1\n",
|
| 658 |
+
"phr_top = [x[0] for x in ranked_blended[:5]]\n",
|
| 659 |
+
"sentences_tagged, phrase_links = link_phrases_to_sentences_by_dots(\n",
|
| 660 |
+
" text_norm, phr_top, nlp, sbert, top_k_per_phrase=topK_for_support\n",
|
| 661 |
+
")\n",
|
| 662 |
+
"\n",
|
| 663 |
+
"print(\"\\nLinking phrases to supporting sentences (highest similarity):\")\n",
|
| 664 |
+
"for p in phr_top:\n",
|
| 665 |
+
" print(f\"- عبارة: {p}\")\n",
|
| 666 |
+
" for l in phrase_links.get(p, []):\n",
|
| 667 |
+
" print(f\" • ({l['kind']}) sim={l['sim']:.3f} | {l['sent']}\")"
|
| 668 |
+
],
|
| 669 |
+
"metadata": {
|
| 670 |
+
"colab": {
|
| 671 |
+
"base_uri": "https://localhost:8080/"
|
| 672 |
+
},
|
| 673 |
+
"id": "msca6UdcjU09",
|
| 674 |
+
"outputId": "39e28ae0-d184-455d-c2f2-b014bbb3c5ee"
|
| 675 |
+
},
|
| 676 |
+
"id": "msca6UdcjU09",
|
| 677 |
+
"execution_count": 40,
|
| 678 |
+
"outputs": [
|
| 679 |
+
{
|
| 680 |
+
"output_type": "stream",
|
| 681 |
+
"name": "stdout",
|
| 682 |
+
"text": [
|
| 683 |
+
"\n",
|
| 684 |
+
"Sentences (divided by points only) and their classification:\n",
|
| 685 |
+
" 1. (verbal) يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 686 |
+
"\n",
|
| 687 |
+
"Linking phrases to supporting sentences (highest similarity):\n",
|
| 688 |
+
"- عبارة: الجهاز العصبي المركزي\n",
|
| 689 |
+
" • (verbal) sim=0.780 | يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 690 |
+
"- عبارة: الجهاز العصبي المحيطي\n",
|
| 691 |
+
" • (verbal) sim=0.749 | يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 692 |
+
"- عبارة: خلايا الدبق\n",
|
| 693 |
+
" • (verbal) sim=0.561 | يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 694 |
+
"- عبارة: بدءا من خلايا\n",
|
| 695 |
+
" • (verbal) sim=0.548 | يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 696 |
+
"- عبارة: غمد النخاعين في الجهاز\n",
|
| 697 |
+
" • (verbal) sim=0.106 | يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n"
|
| 698 |
+
]
|
| 699 |
+
}
|
| 700 |
+
]
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"cell_type": "code",
|
| 704 |
+
"source": [
|
| 705 |
+
"def gen_unified_question_freeform(phrases, supports, context_text, max_len=96, num_beams=5):\n",
|
| 706 |
+
" context_short = context_text.strip()[:600]\n",
|
| 707 |
+
" items_block = \"\\n\".join(\n",
|
| 708 |
+
" [f\"- العبارة: {p}\\n جملة داعمة: {s}\" for p, s in zip(phrases, supports)]\n",
|
| 709 |
+
" )\n",
|
| 710 |
+
" prompt = (\n",
|
| 711 |
+
" \"حوّل العبارات التالية إلى سؤال واحد شامل بالعربية يعتمد على السياق. \"\n",
|
| 712 |
+
" \"يجب أن يغطي جميع العبارات بشكل موجز وواضح.\\n\"\n",
|
| 713 |
+
" f\"{items_block}\\n\"\n",
|
| 714 |
+
" f\"سياق: {context_short}\\n\"\n",
|
| 715 |
+
" \"السؤال الموحد:\"\n",
|
| 716 |
+
" )\n",
|
| 717 |
+
"\n",
|
| 718 |
+
" inputs = qg_tokenizer(prompt, return_tensors=\"pt\", truncation=True).to(device)\n",
|
| 719 |
+
" outputs = qg_model.generate(\n",
|
| 720 |
+
" **inputs,\n",
|
| 721 |
+
" max_length=max_len,\n",
|
| 722 |
+
" num_beams=num_beams,\n",
|
| 723 |
+
" early_stopping=True,\n",
|
| 724 |
+
" no_repeat_ngram_size=3\n",
|
| 725 |
+
" )\n",
|
| 726 |
+
" q = qg_tokenizer.decode(outputs[0], skip_special_tokens=True).strip()\n",
|
| 727 |
+
" q = q.rstrip(\"?.؟\")\n",
|
| 728 |
+
" if q and not q.endswith(\"؟\"):\n",
|
| 729 |
+
" q += \"؟\"\n",
|
| 730 |
+
" return q\n",
|
| 731 |
+
"\n",
|
| 732 |
+
"def unified_question_from_top5_phrases(text_norm, ranked_blended, nlp, sbert, top_k=5):\n",
|
| 733 |
+
" if not ranked_blended:\n",
|
| 734 |
+
" print(\"لا توجد عبارات.\")\n",
|
| 735 |
+
" return {\"phrases\": [], \"supports\": [], \"question\": \"\"}\n",
|
| 736 |
+
" top_n = min(top_k, len(ranked_blended))\n",
|
| 737 |
+
" phrases = [ranked_blended[i][0] for i in range(top_n)]\n",
|
| 738 |
+
"\n",
|
| 739 |
+
" supports = []\n",
|
| 740 |
+
" for p in phrases:\n",
|
| 741 |
+
" s = best_support_sentence_by_dots(text_norm, p, nlp, sbert)\n",
|
| 742 |
+
" supports.append(s)\n",
|
| 743 |
+
"\n",
|
| 744 |
+
" unified_q = gen_unified_question_freeform(phrases, supports, context_text=text_norm)\n",
|
| 745 |
+
" print(\"The context : :\\n\", text_norm, \"\\n\")\n",
|
| 746 |
+
" print(\"The selected phrase (Top):\")\n",
|
| 747 |
+
" for i, p in enumerate(phrases, 1):\n",
|
| 748 |
+
" print(f\"{i}. {p}\")\n",
|
| 749 |
+
" print(\"\\The supporting sentences :\")\n",
|
| 750 |
+
" for i, s in enumerate(supports, 1):\n",
|
| 751 |
+
" print(f\"{i}. {s}\")\n",
|
| 752 |
+
" print(\"\\nUnified Generated Question:\")\n",
|
| 753 |
+
" print(unified_q)\n",
|
| 754 |
+
"\n",
|
| 755 |
+
" return {\"phrases\": phrases, \"supports\": supports, \"question\": unified_q}\n",
|
| 756 |
+
"unified_result = unified_question_from_top5_phrases(text_norm, ranked_blended, nlp, sbert, top_k=5)"
|
| 757 |
+
],
|
| 758 |
+
"metadata": {
|
| 759 |
+
"colab": {
|
| 760 |
+
"base_uri": "https://localhost:8080/"
|
| 761 |
+
},
|
| 762 |
+
"id": "mGtgQgSAwv8H",
|
| 763 |
+
"outputId": "a901de5f-ba37-49ee-8eb9-15750e1b3ce8"
|
| 764 |
+
},
|
| 765 |
+
"id": "mGtgQgSAwv8H",
|
| 766 |
+
"execution_count": 42,
|
| 767 |
+
"outputs": [
|
| 768 |
+
{
|
| 769 |
+
"output_type": "stream",
|
| 770 |
+
"name": "stdout",
|
| 771 |
+
"text": [
|
| 772 |
+
"السياق:\n",
|
| 773 |
+
" يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان \n",
|
| 774 |
+
"\n",
|
| 775 |
+
"العبارات المختارة (Top):\n",
|
| 776 |
+
"1. الجهاز العصبي المركزي\n",
|
| 777 |
+
"2. الجهاز العصبي المحيطي\n",
|
| 778 |
+
"3. خلايا الدبق\n",
|
| 779 |
+
"4. بدءا من خلايا\n",
|
| 780 |
+
"5. غمد النخاعين في الجهاز\n",
|
| 781 |
+
"\n",
|
| 782 |
+
"الجمل الداعمة:\n",
|
| 783 |
+
"1. يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 784 |
+
"2. يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 785 |
+
"3. يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 786 |
+
"4. يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 787 |
+
"5. يتشكل غمد النخاعين في الجهاز العصبي المركزي بدءا من خلايا الدبق قليلة الاستطالات وفي الجهاز العصبي المحيطي من خلايا شوان\n",
|
| 788 |
+
"\n",
|
| 789 |
+
"السؤال الموحد المولّد:\n",
|
| 790 |
+
"question: من أين يتكون غمد النخاعين؟\n"
|
| 791 |
+
]
|
| 792 |
+
}
|
| 793 |
+
]
|
| 794 |
+
}
|
| 795 |
+
],
|
| 796 |
+
"metadata": {
|
| 797 |
+
"colab": {
|
| 798 |
+
"provenance": []
|
| 799 |
+
},
|
| 800 |
+
"language_info": {
|
| 801 |
+
"name": "python"
|
| 802 |
+
},
|
| 803 |
+
"kernelspec": {
|
| 804 |
+
"name": "python3",
|
| 805 |
+
"display_name": "Python 3"
|
| 806 |
+
}
|
| 807 |
+
},
|
| 808 |
+
"nbformat": 4,
|
| 809 |
+
"nbformat_minor": 5
|
| 810 |
+
}
|