| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:58:52.444337Z" |
| }, |
| "title": "Quranic Verses Semantic Relatedness Using AraBERT", |
| "authors": [ |
| { |
| "first": "Abdullah", |
| "middle": [ |
| "N" |
| ], |
| "last": "Alsaleh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Leeds", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Atwell", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Leeds", |
| "location": {} |
| }, |
| "email": "e.s.atwell@leeds.ac.uk" |
| }, |
| { |
| "first": "Abdulrahman", |
| "middle": [], |
| "last": "Altahhan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Leeds", |
| "location": {} |
| }, |
| "email": "a.altahhan@leeds.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Bidirectional Encoder Representations from Transformers (BERT) has gained popularity in recent years producing state-of-the-art performances across Natural Language Processing tasks. In this paper, we used AraBERT language model to binary classify pairs of verses provided by the QurSim dataset to either be semantically related or not. We have pre-processed The QurSim dataset and formed three datasets for comparisons. Also, we have used both versions of AraBERT, which are AraBERTv0.2 and AraBERTv2, to recognise which version performs the best with the given datasets. The best results was AraBERTv0.2 with 92% accuracy score using a dataset comprised of label '2' and label '-1', the latter was generated outside of QurSim dataset.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Bidirectional Encoder Representations from Transformers (BERT) has gained popularity in recent years producing state-of-the-art performances across Natural Language Processing tasks. In this paper, we used AraBERT language model to binary classify pairs of verses provided by the QurSim dataset to either be semantically related or not. We have pre-processed The QurSim dataset and formed three datasets for comparisons. Also, we have used both versions of AraBERT, which are AraBERTv0.2 and AraBERTv2, to recognise which version performs the best with the given datasets. The best results was AraBERTv0.2 with 92% accuracy score using a dataset comprised of label '2' and label '-1', the latter was generated outside of QurSim dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In recent years, Natural Language Processing (NLP) has been evolved with the introduction of Transformer architecture by Vaswani et al. (2017) . BERT, built on the transformer layer, has showed and produced state-of-the-art accuracy in a number of NLP tasks such as machine translation and text classification (Devlin et al., 2019) . There are two stages of BERT: pre-training and fine-tuning. Pre-training is used for masked language modeling and next sentence prediction. For fine-tuning, is to add one or more layers designed for specific task on top of the final encoder layer (Rogers et al., 2020) . Google provides pre-trained models for English and other languages including Arabic. Several studies provide their own language model based on BERT to perform better on specific tasks. AraBERT is recently published that contributes to Arabic language model that is pre-trained to suit a wide range of Arabic NLP related tasks. In this paper, we have used AraBERT language model to classify QurSim dataset in semantic relatedness task. Section 2 will outline the related work in semantic similarity and relatedness while section 3 will discuss AraBERT. Section 4 will discuss Qur-Sim dataset and filtration process. Finally, section 5 will showcase the results of using QurSim datasets using AraBERT. Several studies that involved Arabic and Quranic text using different methods to extract semantic similarity or relatedness. Mohamed et al. (2015) built a system Al-Bayan for evaluating semantic interpreter between Arabic questions and answers.", |
| "cite_spans": [ |
| { |
| "start": 121, |
| "end": 142, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 310, |
| "end": 331, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 581, |
| "end": 602, |
| "text": "(Rogers et al., 2020)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1430, |
| "end": 1451, |
| "text": "Mohamed et al. (2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The system used Morphological Analysis and Disambiguation for Arabic (Habash et al., 2009) as preprocessing tool and Decision Tree Classifier to predict the label. The proposed system achieved 74.5% accuracy score. Another study by Al-Bataineh et al. (2019) presented a system to identify similar Arabic questions in Quora using ELMo (Peters et al., 2018) , Word2vec (Mikolov et al., 2013a ) (Mikolov et al., 2013b) and Sent2vec (Pagliardini et al., 2018) . The dataset used for training word and sentence embeddings includes Mawdoo3 question-to-question (Q2Q) (Seelawi et al., 2019b) and Madar Dialect Q2Q (Bouamor et al., 2018) datasets. The study found that ELMo performed better with 93% in Modern Standard Arabic and 82% in Arabic Dialects compared to other models such as Sent2vec and Word2vec.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 90, |
| "text": "(Habash et al., 2009)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 334, |
| "end": 355, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 367, |
| "end": 389, |
| "text": "(Mikolov et al., 2013a", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 392, |
| "end": 415, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 429, |
| "end": 455, |
| "text": "(Pagliardini et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 561, |
| "end": 584, |
| "text": "(Seelawi et al., 2019b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 607, |
| "end": 629, |
| "text": "(Bouamor et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are studies that use BERT models for identifying semantic similarity between text. A team competing at NSURL-2019 used BERT model with pre-trained multilingual to detect similar Arabic questions (Al-Theiabat and Al-Sadi, 2019). The dataset used for this competition mostly from Mad-woo3 (Seelawi et al., 2019a) . The results showcased BERT model outperform other models with F1-Score of 95.92%. Another study by Peinelt et al.", |
| "cite_spans": [ |
| { |
| "start": 293, |
| "end": 316, |
| "text": "(Seelawi et al., 2019a)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Similarity using BERT", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(2020) combines topic model and BERT (tBERT) to enhance semantic similarity detection and prediction between pairs of English sentences. Reimers and Gurevych (2019) presented Sentence-BERT (SBERT) that uses siamese and triplets network structure with modified pre-trained BERT to derive semantically meaningful text embeddings that can be compared using cosine-similarity. However, to the best of our knowledge there is no research on semantic similarity or relatedness using BERT or AraBERT on Holy Quran text.", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 164, |
| "text": "Reimers and Gurevych (2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Similarity using BERT", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "AraBERT is an Arabic language model in which BERT was trained on a large Arabic corpus (Antoun et al., 2020) . The dataset includes Arabic Wikipedia, 1.5 billion words from Arabic corpora (El-Khair, 2016) and the Open Source International Arabic News Corpus (Zeroual et al., 2019) . The corpus covers news articles from several Arab news media with different topics and from different Arab countries. The size of the pre-training dataset is 70 million sentences that amounts to approximately 24GB of text (Antoun et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 108, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 258, |
| "end": 280, |
| "text": "(Zeroual et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 505, |
| "end": 526, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "AraBERT", |
| "sec_num": "3" |
| }, |
| { |
| "text": "AraBERT has produced better results on various Arabic NLP tasks. In sentiment analysis, AraBERT performed better than mBERT, which is a multilingual BERT model developed by Google, on most tested datasets (Antoun et al., 2020) . Also, AraBERT outperformed mBERT and TF-IDF on the new Twitter-based benchmark dataset for Arabic Sentiment Analysis (Alharbi et al., 2020). In Named Entity Recognition (NER), AraBERTv01 had better results over Bi-LSTM-CRF model with macro-F1 score of 84.2, in which AraBERT new state of the art for NER on ANERcorp (Antoun et al., 2020) . AraBERT also outperformed other Arabic NER tools such MADAMIRA and FARASA in NER tasks using AQMAR and NEWS datasets (Helwe et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 226, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 545, |
| "end": 566, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 686, |
| "end": 706, |
| "text": "(Helwe et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "AraBERT", |
| "sec_num": "3" |
| }, |
| { |
| "text": "QurSim is a work of Arabic text that pertains to the Holy Quran (Sharaf and Atwell, 2012) . The dataset showcases 7679 pairs of verses that are similar or related verses according to comments of Ibn Kathir's Tafsir, which is highly respected for its interpretation of the Holy Quran. It also improves its dataset using lexical similarity approach such as Term Frequency-Inverse Document Frequency (TF-IDF). QurSim dataset classifies pairs of Quranic verses that are related into three classes.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 89, |
| "text": "(Sharaf and Atwell, 2012)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "QurSim", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Label '2' indicates that the two verses are strongly related and they share similar lexicon. Label '1' means that the two verses are related based on the main topics that were mentioned in these verses but they share less similar lexicon. Finally, label '0' is being classified as not obvious relation between verses as, for example, it draws analogies that are different from each other but they serve the same purpose. Label '2', '1' and '0' comprises 40.09%, 48.41% and 11.49% respectively of the total QurSim dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "QurSim", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The QurSim data contains numeric values that entail the location of chapters and verses. However, for this research, BERT needs text as inputs in order to perform its models including BERTforSe-quenceClassification. Therefore, We needed to map Quranic numeric verses to texts. A Quran dataset by Aloufi (2019) has been used to map the numeric verses to their texts. Then, the verses were manually checked to see if they are in correct order.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 309, |
| "text": "Aloufi (2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping QurSim dataset to Quranic Verses Text", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We found there were duplicated pairs of verses in QurSim datasets with the total of 764 records of duplicated pairs of verses. When we examined the duplicates, there were 592 records of duplicated pairs were labelled the same but ordered differently. For example, chapter 2 verse 2 is paired with chapter 2 verse 3 and the pair is labelled '2' and vice versa, shown in Table 1 . For this case, we opted to remove the duplicates since they were redundant considering that the relationship is naturally bidirectional.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 369, |
| "end": 376, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Duplicated Pairs Elimination", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "ID SS SV TS TV Label 91 2 2 2 3 1 98 2 3 2 2 1 The other 172 records of duplicated pairs of verses were labelled differently. For example, chapter 1 verse 5 paired with chapter 73 verse 9 were labelled '2'; however, they were labelled '1' when ordered differently, shown in Table 2 . Since the label assignments were mainly influenced by Ibn Kathir's Tafsir, it is difficult for the authors to interpret Ibn Kathirs's comments in order to assign which appropriate label to pair of verses. So, it was important for this research to remove all pairs of verses that were labelled differently to ensure the dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 274, |
| "end": 281, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Duplicated Pairs Elimination", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "SS SV TS TV Label 66 1 5 73 9 2 7339 73 9 1 5 1 The purpose of the paper is to binary classify pairs of Quranic verses for semantic relatedness using AraBERT. Label '1' pairs are related, however, the degree of similarity/relatedness are weaker than label '2'. Pairs of verses that share the label '1' have fewer words and concepts in common, which could affect the results based on AraBERT limitation on classical Arabic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ID", |
| "sec_num": null |
| }, |
| { |
| "text": "And the weighing [of deeds] that Day will be the truth. So those whose scales are heavy -it is they who will be successful. [7:8] So when the Horn is blown, no relationship will there be among them that Day, nor will they ask about one another [23:101] In this example, the two verses are labelled '1' and they are describing events that happen on the judgment day. Both verses are related in which they share the topic of judgement day; however, they are mentioning different events that happen on the judgement day. Both verses also do not share any Arabic lexical item except for 'That Day' which references to the judgement day.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 129, |
| "text": "[7:8]", |
| "ref_id": null |
| }, |
| { |
| "start": 244, |
| "end": 252, |
| "text": "[23:101]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ID", |
| "sec_num": null |
| }, |
| { |
| "text": "Also, this is a preliminary research to use the AraBERT model for semantic similarity and the authors chose the extreme similarity label, which is label '2', for training and testing. Therefore, pairs of verses that have been assigned label '1' were removed from the dataset. In future studies, the authors may expand to include and train label '1' for multi-classification research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ID", |
| "sec_num": null |
| }, |
| { |
| "text": "After deducing duplicated pairs of verses and label '1', we found that the dataset was imbalanced between label '2' and '0'. Label '2' has 2548 records while label '0' has 857 records. This was an issue because the dataset is imbalanced and could produce poorly results as we will mention it in the results section. We also needed pairs of verses that are not related to train and test against label '2'. Therefore, we randomly generated 2548 pairs of verses from the Holy Quran that are not in the Qur-Sim dataset and labelled them as '-1'. Since the QurSim dataset is according to Ibn Kathir's Tafsir, we assume that these randomly generated pairs of verses are not related according to Ibn Kathir. Therefore, it is fair to say that we are building a model to test the relatedness of pairs of verses that is only based on Ibn Kathir's opinions and interpretations. We have experimented with the latest two different versions of AraBERT, which are AraBERTv0.2 and AraBERTv2 (Antoun et al., 2020) . AraBERTv2 version uses Farasa to segment the words into stems, prefix and suffix. AraBERTv0.2 does not require any Farasa segmentation. We could not use the LARGE version of the models as we only used the BASE version due to hardware limitations. The BASE model of both AraBERT versions has 136M parameters and 200 million sentences with a size of 77G of text. Furthermore, there are three groups of experiments based on dataset parameters which we will entail in this section. Summary of the experiments are shown in Table 3 .", |
| "cite_spans": [ |
| { |
| "start": 975, |
| "end": 996, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1517, |
| "end": 1524, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Balancing", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "The experiments were performed using Google Colab. AraBERT along with Transformers library were installed while performing the experiments. The model was fine tuned for semantic similarity task. All of the experiments have batch size of 32, learning rate is 2e-5 and 8 epochs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "The first dataset has label '2' and label '0' of QurSim dataset after the filtration process. Label '2' has 2548 records while label '0' has 857 records. The dataset was split into training set, validation set and testing set. The training set has 2052 records, the validation set has 229 records and the testing set has 1124 records. AraBERTv0.2 scored better accuracy score metric than AraBERTv2. Although the dataset is small and imbalanced, both AraBERT versions achieved good accuracy score, which is shown in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 515, |
| "end": 522, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "The second dataset has label '2' from QurSim dataset and label '-1' for pairs that were generated randomly. Both labels have the same number of records, which are 2548 pairs of verses. The dataset was split into training set, validation set and testing set. The training set has 3072 records, the validation set has 342 records and the testing set has 1682 records. We found that AraBERTv0.2 performed better than AraBERTv2 with the accuracy score of 92%. AraBERTv0.2 scored the best accuracy score compared to the other datasets. This is due to generated pairs of verses labelled '-1' being not related.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "The third and final dataset comprises of label '2' and label '0' augmented with '-1'. They also have the same number of records of 2548 pairs of verses. The reason to combine label '0' and label '-1' for the experiment is to test the BERT model if it would identify the label '0' and '-1' as one class. The dataset was again split into training set (3072 records), validation set (342 records) and testing set (1682 records). The results turned out to be worse than previous two for AraBERTv0.2 while AraBERTv2 maintaining consistency with its accuracy score, which similar to the previous two experiments. ID SS SV TS TV Label 5648 32 27 80 24 1 5649 32 27 80 25 1 5650 32 27 80 26 1 5651 32 27 80 27 1 5652 32 27 80 28 1 5653 32 27 80 29 1 5654 32 27 80 30 1 5655 32 27 80 31 1 5656 32 27 80 32 2 Table 4 : Series of verses are related to a single verse", |
| "cite_spans": [ |
| { |
| "start": 607, |
| "end": 609, |
| "text": "ID", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 619, |
| "end": 835, |
| "text": "TV Label 5648 32 27 80 24 1 5649 32 27 80 25 1 5650 32 27 80 26 1 5651 32 27 80 27 1 5652 32 27 80 28 1 5653 32 27 80 29 1 5654 32 27 80 30 1 5655 32 27 80 31 1 5656 32 27 80 32 2 Table 4", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the results were promising, we have looked at the output files of these experiments and we found common problems that we will examine in this section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In regards to the dataset, we found there are a series of verses that are discussing and describing a particular topic and are related to one verse. However, in the QurSim dataset, those series of verses are paired with a single verse, an example shown in Table 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 256, |
| "end": 263, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "[As] enjoyment [i.e., provision] for you, and for your livestock. [80:32] Do they not see how We conduct the water to a dry land, and with it We produce vegetation, from which their livestock eat, and themselves? Do they not see? [32:27] In this example, both verses [80:32] and [32:27] are related as they are mentioning the livelihood for people and the cattle. In verse [32:27] , Allah mentions His kindness by providing water to dry land, and herewith bring forth crops and vegetation for people and their cattle to eat. The verse [80:32] is the last verse of a series of verses describing the same meaning of verse [32:27] ; however, they were broken into several small verses. Therefore, the model failed to predict that both verses are related.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 73, |
| "text": "[80:32]", |
| "ref_id": null |
| }, |
| { |
| "start": 230, |
| "end": 237, |
| "text": "[32:27]", |
| "ref_id": null |
| }, |
| { |
| "start": 267, |
| "end": 286, |
| "text": "[80:32] and [32:27]", |
| "ref_id": null |
| }, |
| { |
| "start": 373, |
| "end": 380, |
| "text": "[32:27]", |
| "ref_id": null |
| }, |
| { |
| "start": 535, |
| "end": 542, |
| "text": "[80:32]", |
| "ref_id": null |
| }, |
| { |
| "start": 620, |
| "end": 627, |
| "text": "[32:27]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "We also found that in QurSim dataset, a very few pairs of verses that are strongly related have less similar words; however, they use different words to achieve the same meaning. Therefore, AraBERT did not predict a few of those verses to be related. Also, there are phrases in classical Arabic that are pertain to Islamic teachings that the model did not predict correctly between related verses. 23:13] , Allah draws the analogy of the sperm or zygote as an insignificant fluid or a disdained liquid to serve the purpose of its weakness in comparison to the power of the Creator. The model did not understand the analogy and the context of these verses which is why the model failed to correctly predict to be related.", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 404, |
| "text": "23:13]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Synonyms", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Arise [to pray] the night, except for a little [73:2] Their sides shun their beds, as they pray to their Lord, out of reverence and hope; and from Our provisions to them, they spend. [32:16] As for this example, both verses [73:2] and [32:16] are related as they mention praying during the night; however, the model did not predict they were related. The verse [73:2] mentions \"Arise the night\" as staying up at night, which is correct literal translation; however, the phrase in the classical Arabic religious context entails that a person staying up the night to pray. Therefore, the model did not predict both verses to be related.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 53, |
| "text": "[73:2]", |
| "ref_id": null |
| }, |
| { |
| "start": 183, |
| "end": 190, |
| "text": "[32:16]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Synonyms", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "The paper presented experiments on the QurSim dataset using fine-tuned AraBERT model to classify pairs of verses either to be semantically related or not. The paper applied data filtration to the QurSim dataset to avoid redundancy and generate unrelated pairs of verses. Also, the paper used both versions of AraBERT and the experiments suggested that AraBERTv0.2 has better results than AraBERTv2 across all three datasets. The best performance was achieved by AraBERTv0.2 with 92% accuracy score. However, by examining the results, AraBERT could not identify some of the classical Arabic lexical synonyms and religious context which could be a result of classical Arabic limitation in the AraBERT corpus. Finally, this study has a lot of potential for improvement on both the datasets and fine-tuning the AraBERT model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Deep contextualized pairwise semantic similarity for arabic language questions", |
| "authors": [ |
| { |
| "first": "Hesham", |
| "middle": [], |
| "last": "Al-Bataineh", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Farhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Mustafa", |
| "suffix": "" |
| }, |
| { |
| "first": "Haitham", |
| "middle": [], |
| "last": "Seelawi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hussein T Al-Natsheh", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "1586--1591", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hesham Al-Bataineh, Wael Farhan, Ahmad Mustafa, Haitham Seelawi, and Hussein T Al-Natsheh. 2019. Deep contextualized pairwise semantic similarity for arabic language questions. In 2019 IEEE 31st Inter- national Conference on Tools with Artificial Intelli- gence (ICTAI), pages 1586-1591. IEEE.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The inception team at NSURL-2019 task 8: Semantic question similarity in Arabic", |
| "authors": [ |
| { |
| "first": "Hana", |
| "middle": [], |
| "last": "Al-Theiabat", |
| "suffix": "" |
| }, |
| { |
| "first": "Aisha", |
| "middle": [], |
| "last": "Al-Sadi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of The First International Workshop on NLP Solutions for Under Resourced Languages (NSURL 2019) co-located with ICNLSP 2019 -Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "112--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hana Al-Theiabat and Aisha Al-Sadi. 2019. The incep- tion team at NSURL-2019 task 8: Semantic question similarity in Arabic. In Proceedings of The First International Workshop on NLP Solutions for Un- der Resourced Languages (NSURL 2019) co-located with ICNLSP 2019 -Short Papers, pages 112-117, Trento, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Inji Ibrahim Jaber, and Xiangliang Zhang. 2020. Asad: A twitter-based benchmark arabic sentiment analysis dataset", |
| "authors": [ |
| { |
| "first": "Hind", |
| "middle": [], |
| "last": "Basma Alharbi", |
| "suffix": "" |
| }, |
| { |
| "first": "Manal", |
| "middle": [], |
| "last": "Alamro", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuhair", |
| "middle": [], |
| "last": "Alshehri", |
| "suffix": "" |
| }, |
| { |
| "first": "Manal", |
| "middle": [], |
| "last": "Khayyat", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kalkatawi", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Basma Alharbi, Hind Alamro, Manal Alshehri, Zuhair Khayyat, Manal Kalkatawi, Inji Ibrahim Jaber, and Xiangliang Zhang. 2020. Asad: A twitter-based benchmark arabic sentiment analysis dataset.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Arabert: Transformer-based model for arabic language understanding", |
| "authors": [ |
| { |
| "first": "Wissam", |
| "middle": [], |
| "last": "Antoun", |
| "suffix": "" |
| }, |
| { |
| "first": "Fady", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Hazem", |
| "middle": [], |
| "last": "Hajj", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "LREC 2020 Workshop Language Resources and Evaluation Conference 11-16", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wissam Antoun, Fady Baly, and Hazem Hajj. 2020. Arabert: Transformer-based model for arabic lan- guage understanding. In LREC 2020 Workshop Lan- guage Resources and Evaluation Conference 11-16 May 2020, page 9.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The MADAR Arabic dialect corpus and lexicon", |
| "authors": [ |
| { |
| "first": "Houda", |
| "middle": [], |
| "last": "Bouamor", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "Wajdi", |
| "middle": [], |
| "last": "Zaghouani", |
| "suffix": "" |
| }, |
| { |
| "first": "Owen", |
| "middle": [], |
| "last": "Rambow", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Abdulrahim", |
| "suffix": "" |
| }, |
| { |
| "first": "Ossama", |
| "middle": [], |
| "last": "Obeid", |
| "suffix": "" |
| }, |
| { |
| "first": "Salam", |
| "middle": [], |
| "last": "Khalifa", |
| "suffix": "" |
| }, |
| { |
| "first": "Fadhl", |
| "middle": [], |
| "last": "Eryani", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Erdmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Kemal", |
| "middle": [], |
| "last": "Oflazer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Houda Bouamor, Nizar Habash, Mohammad Salameh, Wajdi Zaghouani, Owen Rambow, Dana Abdul- rahim, Ossama Obeid, Salam Khalifa, Fadhl Eryani, Alexander Erdmann, and Kemal Oflazer. 2018. The MADAR Arabic dialect corpus and lexicon. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Re- sources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "1.5 billion words arabic corpus. ArXiv", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [ |
| "A" |
| ], |
| "last": "El-Khair", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. A. El-Khair. 2016. 1.5 billion words arabic corpus. ArXiv, abs/1611.04033.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Mada+tokan: A toolkit for arabic tokenization, diacritization, morphological disambiguation, pos tagging, stemming and lemmatization", |
| "authors": [ |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| }, |
| { |
| "first": "Owen", |
| "middle": [], |
| "last": "Rambow", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Second International Conference on Arabic Language Resources and Tools", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nizar Habash, Owen Rambow, and Ryan Roth. 2009. Mada+tokan: A toolkit for arabic tokenization, dia- critization, morphological disambiguation, pos tag- ging, stemming and lemmatization. In Proceedings of the Second International Conference on Arabic Language Resources and Tools, Cairo, Egypt. The MEDAR Consortium.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A semi-supervised BERT approach for Arabic named entity recognition", |
| "authors": [ |
| { |
| "first": "Chadi", |
| "middle": [], |
| "last": "Helwe", |
| "suffix": "" |
| }, |
| { |
| "first": "Ghassan", |
| "middle": [], |
| "last": "Dib", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohsen", |
| "middle": [], |
| "last": "Shamas", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "49--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chadi Helwe, Ghassan Dib, Mohsen Shamas, and Shady Elbassuoni. 2020. A semi-supervised BERT approach for Arabic named entity recognition. In Proceedings of the Fifth Arabic Natural Language Processing Workshop, pages 49-57, Barcelona, Spain (Online). Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word represen- tations in vector space.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013b. Distributed repre- sentations of words and phrases and their composi- tionality.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Albayan: A knowledge-based system for Arabic answer selection", |
| "authors": [ |
| { |
| "first": "Reham", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Maha", |
| "middle": [], |
| "last": "Ragab", |
| "suffix": "" |
| }, |
| { |
| "first": "Heba", |
| "middle": [], |
| "last": "Abdelnasser", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Nagwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Marwan", |
| "middle": [], |
| "last": "El-Makky", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Torki", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "226--230", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S15-2040" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reham Mohamed, Maha Ragab, Heba Abdelnasser, Nagwa M. El-Makky, and Marwan Torki. 2015. Al- bayan: A knowledge-based system for Arabic an- swer selection. In Proceedings of the 9th Interna- tional Workshop on Semantic Evaluation (SemEval 2015), pages 226-230, Denver, Colorado. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Unsupervised learning of sentence embeddings using compositional n-gram features", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Pagliardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jaggi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n18-1049" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matteo Pagliardini, Prakhar Gupta, and Martin Jaggi. 2018. Unsupervised learning of sentence embed- dings using compositional n-gram features. Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers).", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "2020. tBERT: Topic models and BERT joining forces for semantic similarity detection", |
| "authors": [ |
| { |
| "first": "Nicole", |
| "middle": [], |
| "last": "Peinelt", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7047--7055", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.630" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicole Peinelt, Dong Nguyen, and Maria Liakata. 2020. tBERT: Topic models and BERT joining forces for semantic similarity detection. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 7047-7055, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A primer in bertology: What we know about how bert works", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Kovaleva", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in bertology: What we know about how bert works.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Nsurl-2019 shared task 8: Semantic question similarity in arabic", |
| "authors": [ |
| { |
| "first": "Haitham", |
| "middle": [], |
| "last": "Seelawi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Mustafa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hesham", |
| "middle": [], |
| "last": "Al-Bataineh", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Farhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hussein", |
| "middle": [ |
| "T" |
| ], |
| "last": "Al-Natsheh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haitham Seelawi, Ahmad Mustafa, Hesham Al- Bataineh, Wael Farhan, and Hussein T. Al-Natsheh. 2019a. Nsurl-2019 shared task 8: Semantic question similarity in arabic.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "NSURL-2019 task 8: Semantic question similarity in Arabic", |
| "authors": [ |
| { |
| "first": "Haitham", |
| "middle": [], |
| "last": "Seelawi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Mustafa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hesham", |
| "middle": [], |
| "last": "Al-Bataineh", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Farhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hussein", |
| "middle": [ |
| "T" |
| ], |
| "last": "Al-Natsheh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of The First International Workshop on NLP Solutions for Under Resourced Languages (NSURL 2019) co-located with ICNLSP 2019 -Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haitham Seelawi, Ahmad Mustafa, Hesham Al- Bataineh, Wael Farhan, and Hussein T. Al-Natsheh. 2019b. NSURL-2019 task 8: Semantic question similarity in Arabic. In Proceedings of The First International Workshop on NLP Solutions for Un- der Resourced Languages (NSURL 2019) co-located with ICNLSP 2019 -Short Papers, pages 1-8, Trento, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Qursim: A corpus for evaluation of relatedness in short texts", |
| "authors": [ |
| { |
| "first": "Abdul-Baquee", |
| "middle": [], |
| "last": "Sharaf", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Atwell", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.13140/2.1.4007.0088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdul-Baquee Sharaf and Eric Atwell. 2012. Qursim: A corpus for evaluation of relatedness in short texts.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Osian: Open source international arabic news corpus -preparation and integration into the clarininfrastructure", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Zeroual", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Goldhahn", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Eckart", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lakhouaja", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "WANLP@ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. Zeroual, Dirk Goldhahn, T. Eckart, and A. Lakhouaja. 2019. Osian: Open source international arabic news corpus -preparation and integration into the clarin- infrastructure. In WANLP@ACL 2019.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "num": null, |
| "text": "Duplicated pairs of verses with same label, where SS stands for source Soura (chapter) , SV is the source verse, TS is target Soura and TV is target verse", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF2": { |
| "num": null, |
| "text": "Duplicated pairs of verses with different label, where SS stands for source Soura (chapter) , SV is the source verse, TS is target Soura and TV is target verse", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "Results of both versions of AraBERT with three datasets.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |