| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T05:59:10.025564Z" |
| }, |
| "title": "DiaLex: A Benchmark for Evaluating Multidialectal Arabic Word Embeddings", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab", |
| "institution": "The University of British Columbia", |
| "location": { |
| "settlement": "Vancouver", |
| "country": "Canada" |
| } |
| }, |
| "email": "muhammad.mageed@ubc.ca" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "American University of Beirut", |
| "location": { |
| "settlement": "Beirut", |
| "country": "Lebanon" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jad", |
| "middle": [], |
| "last": "Doughman", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "American University of Beirut", |
| "location": { |
| "settlement": "Beirut", |
| "country": "Lebanon" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Abdelrahim", |
| "middle": [], |
| "last": "Elmadany", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab", |
| "institution": "The University of British Columbia", |
| "location": { |
| "settlement": "Vancouver", |
| "country": "Canada" |
| } |
| }, |
| "email": "a.elmadany@ubc.ca" |
| }, |
| { |
| "first": "El", |
| "middle": [], |
| "last": "Moatez", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Billah", |
| "middle": [], |
| "last": "Nagoudi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab", |
| "institution": "The University of British Columbia", |
| "location": { |
| "settlement": "Vancouver", |
| "country": "Canada" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yorgo", |
| "middle": [], |
| "last": "Zoughby", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "American University of Beirut", |
| "location": { |
| "settlement": "Beirut", |
| "country": "Lebanon" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Shaher", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab", |
| "institution": "The University of British Columbia", |
| "location": { |
| "settlement": "Vancouver", |
| "country": "Canada" |
| } |
| }, |
| "email": "ahmad-shaher@ubc.ca" |
| }, |
| { |
| "first": "Iskander", |
| "middle": [], |
| "last": "Gaba", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "American University of Beirut", |
| "location": { |
| "settlement": "Beirut", |
| "country": "Lebanon" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Helal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Concordia University", |
| "location": { |
| "settlement": "Montreal", |
| "country": "Canada" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mohammed", |
| "middle": [], |
| "last": "El-Razzaz", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mohammed.elrzzaz@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Word embeddings are a core component of modern natural language processing systems, making the ability to thoroughly evaluate them a vital task. We describe DiaLex, a benchmark for intrinsic evaluation of dialectal Arabic word embeddings. DiaLex covers five important Arabic dialects: Algerian, Egyptian, Lebanese, Syrian, and Tunisian. Across these dialects, DiaLex provides a testbank for six syntactic and semantic relations, namely male to female, singular to dual, singular to plural, antonym, comparative, and genitive to past tense. DiaLex thus consists of a collection of word pairs representing each of the six relations in each of the five dialects. To demonstrate the utility of DiaLex, we use it to evaluate a set of existing and new Arabic word embeddings that we developed. Beyond evaluation of word embeddings, DiaLex supports efforts to integrate dialects into the Arabic language curriculum. It can be easily translated into Modern Standard Arabic and English, which can be useful for evaluating word translation. Our benchmark, evaluation code, and new word embedding models will be publicly available. 1", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Word embeddings are a core component of modern natural language processing systems, making the ability to thoroughly evaluate them a vital task. We describe DiaLex, a benchmark for intrinsic evaluation of dialectal Arabic word embeddings. DiaLex covers five important Arabic dialects: Algerian, Egyptian, Lebanese, Syrian, and Tunisian. Across these dialects, DiaLex provides a testbank for six syntactic and semantic relations, namely male to female, singular to dual, singular to plural, antonym, comparative, and genitive to past tense. DiaLex thus consists of a collection of word pairs representing each of the six relations in each of the five dialects. To demonstrate the utility of DiaLex, we use it to evaluate a set of existing and new Arabic word embeddings that we developed. Beyond evaluation of word embeddings, DiaLex supports efforts to integrate dialects into the Arabic language curriculum. It can be easily translated into Modern Standard Arabic and English, which can be useful for evaluating word translation. Our benchmark, evaluation code, and new word embedding models will be publicly available. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word embeddings are the backbone of modern natural language processing (NLP) systems. They encode semantic and syntactic relations between words by representing them in a low-dimensional space. Many techniques have been proposed to learn such embeddings (Pennington et al., 2014; Mikolov et al., 2013a; Mnih and Kavukcuoglu, 2013) from large text corpora. As of today, a 1 https://github.com/UBC-NLP/dialex. large number of such embeddings are available in many languages including Arabic. Due to their importance, it is vital to be able to evaluate word embeddings, and various methods have been proposed for evaluating them. These methods can be broadly categorized into intrinsic evaluation methods and extrinsic evaluation ones. For extrinsic evaluation, word embeddings are assessed based on performance in downstream applications. For intrinsic evaluation, they are assessed based on how well they capture syntactic and semantic relations between words.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 279, |
| "text": "(Pennington et al., 2014;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 280, |
| "end": 302, |
| "text": "Mikolov et al., 2013a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 303, |
| "end": 330, |
| "text": "Mnih and Kavukcuoglu, 2013)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Although there exists a benchmark for evaluating modern standard Arabic (MSA) word embeddings (Elrazzaz et al., 2017) , no such resource that we know of exists for Arabic dialects. This makes it difficult to measure progress on Arabic dialect processing. In this paper, our goal is to facilitate intrinsic evaluation of dialectal Arabic word embeddings. To this end, we build a new benchmark spanning five different Arabic dialects, from Eastern, Middle, and Western Arab World. Namely, our benchmark covers Algerian (ALG), Egyptian (EGY), Lebanese (LEB), Syr-ian (SYR), and Tunisian (TUN). Figure 1 shows a map of the five Arab countries covered by Di-aLex. For each one of these dialects, DiaLex consists of a set of word pairs that are syntactically or semantically related by one of six different relations: Male to Female, Singular to Plural, Singular to Dual, Antonym, Comparative, and Genitive to Past Tense. Overall, DiaLex consists of over 3, 000 word pairs in those five dialects, evenly distributed. To the best of our knowledge, DiaLex is the first benchmark that can be used to assess the quality of Arabic word embeddings in the five dialects it covers.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 117, |
| "text": "(Elrazzaz et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 591, |
| "end": 599, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To be able to use DiaLex to evaluate Arabic word embeddings, we generate a set of word analogy questions from the word pairs in DiaLex. A word analogy question is generated from two word pairs from a given relation. These questions have recently become the standard in intrinsic evaluation of word embeddings (Mikolov et al., 2013a; Gao et al., 2014; Schnabel et al., 2015) . To demonstrate the usefulness of DiaLex in evaluating Arabic word embeddings, we use it to evaluate a set of existing and new Arabic word embeddings. We conclude that both available and newly-developed word embedding models have moderate-to-serious coverage issues and are not sufficiently representative of the respective dialects under study. In addition to the benchmark of word pairs, our newlydeveloped dialectal Arabic word embeddings will also be publicly available. 2 Beyond evaluation of word embeddings, we envision DiaLex as a basis for creating multidialectal Arabic resources that can facilitate study of the semantics and syntax of Arabic dialects, for example for pedagogical applications (Mubarak et al., 2020) . More broadly, we hope DiaLex will contribute to efforts for integrating dialects in the Arabic language curriculum (Al-Batal, 2017). Di-aLex can also be used to complement a growing interest in contextual word embeddings (Peters et al., 2018) and self-supervised language models (Devlin et al., 2019) , including in Arabic (Antoun et al., 2020; Abdul-Mageed et al., 2020a; Lan et al., 2020a) . Extensions of DiaLex can also be valuable for NLP, for example, DiaLex can be easily translated into MSA, other Arabic dialects, English, or other languages. This extension can enable evaluation of word-level translation sys-tems, including in cross-lingual settings (Aldarmaki et al., 2018; Aldarmaki and Diab, 2019) . Our resources can also be used in comparisons against contextual embeddings (Peters et al., 2018) and embeddings acquired from language models such as BERT (Devlin et al., 2019) . For example, it can be used in evaluation settings with Arabic language models such as AraBert (Antoun et al., 2020) , GigaBERT (Lan et al., 2020b) , and the recently developed ARBERT and MAR-BERT (Abdul-Mageed et al., 2020a) . More generally, our efforts are motivated by the fact that the study of Arabic dialects and computational Arabic dialect processing is a nascent area with several existing gaps to fill (Bouamor et al., 2019; Abbes et al., 2020; Abdul-Mageed et al., 2020b , 2021 , 2020c .", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 332, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 333, |
| "end": 350, |
| "text": "Gao et al., 2014;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 351, |
| "end": 373, |
| "text": "Schnabel et al., 2015)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 850, |
| "end": 851, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 1080, |
| "end": 1102, |
| "text": "(Mubarak et al., 2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 1326, |
| "end": 1347, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1384, |
| "end": 1405, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1428, |
| "end": 1449, |
| "text": "(Antoun et al., 2020;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1450, |
| "end": 1477, |
| "text": "Abdul-Mageed et al., 2020a;", |
| "ref_id": null |
| }, |
| { |
| "start": 1478, |
| "end": 1496, |
| "text": "Lan et al., 2020a)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1766, |
| "end": 1790, |
| "text": "(Aldarmaki et al., 2018;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1791, |
| "end": 1816, |
| "text": "Aldarmaki and Diab, 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1895, |
| "end": 1916, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1975, |
| "end": 1996, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 2094, |
| "end": 2115, |
| "text": "(Antoun et al., 2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 2127, |
| "end": 2146, |
| "text": "(Lan et al., 2020b)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 2196, |
| "end": 2224, |
| "text": "(Abdul-Mageed et al., 2020a)", |
| "ref_id": null |
| }, |
| { |
| "start": 2412, |
| "end": 2434, |
| "text": "(Bouamor et al., 2019;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 2435, |
| "end": 2454, |
| "text": "Abbes et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 2455, |
| "end": 2481, |
| "text": "Abdul-Mageed et al., 2020b", |
| "ref_id": null |
| }, |
| { |
| "start": 2482, |
| "end": 2488, |
| "text": ", 2021", |
| "ref_id": null |
| }, |
| { |
| "start": 2489, |
| "end": 2496, |
| "text": ", 2020c", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. In Section 2, we describe how DiaLex was constructed. Section 3 offers our methodical generation of a testbank for evaluating word embeddings. In Section 4, we provide a case study for evaluating various word embedding models, some of which are newly developed by us. Section 5 is about related work. Section 6 is where we conclude and present future directions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "DiaLex consists of a set of word pairs in five different Arabic dialects and for six different semantic and syntactic relations, namely Male to Female, Singular to Plural, Singular to Dual, Antonym, Comparative, and Genitive to Past Tense. We chose only this set of relations as they are standard in previous literature. In addition, they are comprehensive enough to reflect dialect specificity. A good word embeddings model should thus have close representation of word pairs for each of these relations in the embeddings space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Benchmark Construction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For each dialect, word pairs were manually generated by at least one native speaker of the dialect. Each person independently came up with the word pairs representing a given relation based on their knowledge of the dialect and while trying to include words that are typically representative and unique in that dialect. That is, to the best of our ability, the words were chosen so that they are frequently-used words in the dialect and are not the same as in MSA. One challenge we faced when generating the word pairs was orthographic variation. For example, consider the antonym of the word (\"behind\") in the Egyptian dialect. It can be written as or (\"in front of\") . We decided to include all variations of the same word in the benchmark. A second challenge was the need to present the relationship using more than one word. For instance, consider Algerian and the relationship dual, the word (\"pair\") is sometimes used to describe two items of something. So, for instance, for the word (\"hour\"), the dual can be either (\"pair of hours\") or (\"two hours\"). Again, we opted for including both variants in the benchmark. Overall, for each dialect and each relation, around 100 word pairs were generated. Table 1 shows the statistics of DiaLex's word pairs lists and Table 2 shows some example word pairs in DiaLex and their English and MSA translations. Overall, DiaLex consists of a total of 3, 070 word pairs distributed evenly among the dialects and the relations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1205, |
| "end": 1212, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1267, |
| "end": 1274, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Benchmark Construction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given the word pair lists in DiaLex, we generate a testbank consisting of 260,827 tuples. Each tuple consists of two word pairs (a, b) and (c, d) from the same relation and the same dialect. For each of our five dialects and for each of our six relations, we generate a tuple by combining two different word pairs from the same relation in the same dialect. Once tuples have been generated, they can be used as word analogy questions to evaluate different word embeddings as defined by Mikolov et al. (Mikolov et al., 2013a) . A word analogy question for a tuple consisting of two word pairs (a, b) and (c, d) can be formulated as follows: \"a to b is like c to ?\". Each such question will then be answered by calculating a target vector t = b \u2212 a + c.", |
| "cite_spans": [ |
| { |
| "start": 486, |
| "end": 524, |
| "text": "Mikolov et al. (Mikolov et al., 2013a)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We then calculate the cosine similarity between the target vector t and the vector representation of each word w in a given word embeddings V . Finally, we retrieve the most similar word w to t, i.e.,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "argmax", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "w\u2208V &w / \u2208{a,b,c} w\u2022t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "||w||||t|| . If w = d (i.e., the same word) then we assume that the word embeddings V has answered the question correctly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Moreover, we extend the traditional word analogy task by taking into consideration if the correct answer is among the top K, with K \u2208 {5, 10}, closest words in the embedding space to the target vector t, which allows us to more leniently evaluate the embeddings. This is particularly important in the case of Arabic since many forms of the same word exist, usually with additional prefixes or suffixes such as the equivalent of the article \"the\" or possessive determiners such as \"her\", \"his\", or \"their\". For example, consider one question which asks to is like to \" ?\", i.e., \"man\" to \"woman\" is like \"prince\" to \"?\", with the answer being \" \" or \"princess\". Now, if we rely only on the top-1 word and it happens to be \" \" which means \"for the princess\" in English, the question would be considered to be answered wrongly. To relax this, and ensure that different forms of the same word will not result in a mismatch, we use the top-5 and top-10 words for evaluation rather than just the top-1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Note that we consider a question to be answered wrongly if at least one of the words in the question are not present in the word embeddings. That is, we take into consideration the coverage of the embeddings as well (Gao et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 234, |
| "text": "(Gao et al., 2014)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Finally, we report the number of questions that were answered correctly over the total numbers of questions available. That is, assume the number of questions for a given dialect and a given relation is n, and assume that a given embeddings model M correctly answered m out of those n questions as explained above. Then, the accuracy of the model M will be m n .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testbank for Evaluating Word Embeddings and Evaluation Metric", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we demonstrate how DiaLex can be used to evaluate word embeddings across the different dialects it covers. Particularly, we evaluate two large word embeddings models based on Word2Vec (Mikolov et al., 2013b) released by Zahran et al. (2015) . One model is based on skip grams (Zah SG) and the other is a continuous bag-of-words (Zah CBOW). Both of these models have a vocabulary size of 626, 3435 words. We also create four CBOW Word2Vec models, all of which have 300 word vector embedding dimensions, as we describe next.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 224, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 237, |
| "end": 257, |
| "text": "Zahran et al. (2015)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation of Arabic Word Embeddings Using DiaLex", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The following are our four newly-developed dialectal Arabic word embedding models: Twitter-1B. Our first model was trained using a one billion in-house Arabic tweet collection. All tweets were crawled by putting a bounding box crawler around the whole Arab world. Since this collection is large, we only performed light preprocessing on it. This involved removing hashtags, URLs, and reducing consecutive repetitions of the same character into only 2. We then trained a CBOW Word2Vec model using the Python library gensim. We set the minimum word frequency at 100 and a window size of 5 words. This model has a vocabulary size of 929, 803 words. Twitter-250K-MC50. This model uses the same data as Twitter-1B, yet with stricter preprocessing. Namely, we normalize usually orthographically confused Arabic characters by converting Alif maksura to Ya, reducing all hamzated Alif to plain Alif, and removing all non-Arabic characters. We then only keep tweets with length \u2265 5 words. This gives us a total of 223, 387, 189 tweets (and hence the name Twitter-250K). We then use the same parameters as the 1B model, but we set the minimum count to 50 words (again, hence the name Twitter-250K-MC50 where MC50 meaning minimum count of 50). We acquire a model with a vocabulary size of 536, 846 words. Twitter-250K-MC100. This model is identical with the Twitter-250K-MC50 model, but uses a minimum count of 100 words when training Word2Vec. This model has a vocabulary size of 202, 690 words. Twitter-Seeds. We use all unigram entries in our benchmark to crawl Twitter, using the search API. In order to avoid overfitting to our benchmark, we randomly sample only 400 words from it for this process. This gives us about \u223c 3M tweets. We then crawl up to 3, 200 tweets from the timelines of all \u223c 300K users who have posted the initially collected 3M tweets. The resulting collection is at 214, 161, 138 tweets after strict cleaning and removal of all tweets of length < 5 words. To train an embedding model on this dataset, we use the same Word2Vec parameters as with Twitter-250K-MC50 (i.e., with a minimum count of 50 words). This model has a vocabulary size of 773, 311 words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Newly-Developed Word Embedding Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We evaluate the two models from Zahran et al.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "and our four models described above using DiaLex. Table 4 shows per-relation results evaluation of the Twitter-250K-MC100 model. We show allrelations results only for this model for space limi-tations, and we choose this model since it tends to perform well compared to other models. As Table 4 shows, the model works best on the LEB dialect (17.87 accuracy for top-10) and worst on TUN (12.14 accuracy for top-10). The table also shows that the sing-plural relationship is the one most challenging for the model. Clearly, the dual feature either involves (1) bigrams (in which case these unigram models do not work and hence the \"None\" values) or (2) different ways of expressing the same meaning of duality. For example, in the LEB pair -\"corridor-two corridors\", duality can be expressed also by the phrase , using the digit \"2\" instead of the dual suffix in . Overall, these results suggest that even our developed models are neither sufficiently powerful nor large enough (even with large vocabularies close to 1M words) to cover all the dialects. We also observe that models need to see enough contexts (words > 50 minimum count) to scale well. This calls for the development of more robust models with wider coverage and more frequent contexts. Our Twitter collection of 5M words can be used towards that goal, but we opted for not exploiting it for building a word embeddings model since this would be considered overfitting to our benchmark.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 50, |
| "end": 57, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The number of available Arabic word embeddings is increasing rapidly. Some of these are strictly trained using textual corpora written in MSA, while others were trained using dialectal data. We review the most popular embedding models we are aware of here.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Arabic Word Embeddings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Zahran et al. (2015) built three models for Arabic word embeddings (CBOW, SKIP-G, and GloVe). To train these models, they used a large collection of MSA texts totaling \u223c 5.8B words. The sources used include Arabic Wikipedia, Arabic Gigaword (Parker et al., 2009) , Open Source Arabic Corpora (OSAC) (Saad and Ashour, 2010), OPUS (Tiedemann, 2012) , MultiUN (Chen and Eisele, 2012) , and a few others. Soliman et al. 2017proposed AraVec a set of Arabic word embedding models. It consist of six word embedding models built on top of three different Arabic content domains; Wikipedia Arabic, World Wide Web pages, and Tweets with more than 3.3 billion word. Both CBOW and SKIP-Gram architecture are investigated in this work. Abdul-Mageed et al. (2018) build an SKIP-G model using \u223c 234M tweets, with vector dimensions at 300. The authors, however, do not exploit their model in downstream tasks. Abu Farha and Magdy (2019) built two word embeddings models (CBOW and SKIP-Gram) exploiting 250M tweets. The authors used the models in the context of training the sentiment analysis system Mazajak. The dimensions of each embedding vector in the Mazajak models are at 300.", |
| "cite_spans": [ |
| { |
| "start": 241, |
| "end": 262, |
| "text": "(Parker et al., 2009)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 329, |
| "end": 346, |
| "text": "(Tiedemann, 2012)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 357, |
| "end": 380, |
| "text": "(Chen and Eisele, 2012)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 723, |
| "end": 749, |
| "text": "Abdul-Mageed et al. (2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Arabic Word Embeddings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "More recently, El-Haj (2020) developed Habibi, a Multi Dialect Multi-National Arabic Song Lyrics Corpus which comprises more than 30, 000 Arabic song lyrics from 18 Arab countries and six Arabic dialects for singers. Habibi contains 500, 000 sentences (song verses) with more than 3.5 million words. Moreover, the authors provided a 300 dimension CBOW word embeddings of the corpus. Doughman et al. (2020) built a set of word embeddings learnt from three large Lebanese news archives, which collectively consist of 609, 386 scanned newspaper images and spanning a total of 151 years, ranging from 1933 till 2011. To train the word embeddings, Optical Character Recognition (OCR) was employed to transcribe the scanned news archives, and various archive-level as well as decade-level word embeddings were trained. In addition, models were also built using a mixture of Arabic and English data. For example, Lachraf et al. 2019presented AraEngVec an Arabic-English cross-lingual word embedding models. To train their bilingual models, they used a large dataset with more than 93 million pairs of Arabic-English parallel sentences (with more than 1.8 billion words) mainly extracted from the Open Parallel Corpus Project (OPUS) (Tiedemann, 2012) . In order to train the models, they have chosen CBOW and SKIP-Gram as an architecture. Indeed, they propose three methods for pre-processing the opus dataset: parallel sentences, word-level alignment and random shuffling. Both extrinsic and intrinsic evaluations for the different AraEngVec model variants. The extrinsic evaluation assesses the performance of models on the Arabic-English Cross-Language Semantic Textual Similarity (CL-STS) task (Nagoudi et al., 2018) , while the intrinsic evaluation is based on the Word Translation (WT) task.", |
| "cite_spans": [ |
| { |
| "start": 383, |
| "end": 405, |
| "text": "Doughman et al. (2020)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1225, |
| "end": 1242, |
| "text": "(Tiedemann, 2012)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1690, |
| "end": 1712, |
| "text": "(Nagoudi et al., 2018)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Arabic Word Embeddings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Some works have also investigated the utility of using morphological knowledge to enhance word embeddings. For example, Erdmann and Habash (2018) demonstrated that out-of-context rule-based knowledge of morphological structure can complement what word embeddings can learn about morphology from words' in-context behaviors. They quantified the value of leveraging subword information when learning embeddings and the further value of noise reduction techniques targeting the sparsity caused by complex morphology such as in the Arabic language case.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 145, |
| "text": "Erdmann and Habash (2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Arabic Word Embeddings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "El-Kishky et al. 2019tackled the problem of root extraction from words in the Semitic language family. They proposed a constrained sequenceto-sequence root extraction method. Furthermore, they demonstrated how one can leverage the root information alongside a simple slot-based morphological decomposition to improve upon word embedding representations as evaluated through word similarity, word analogy, and language modeling tasks. In this paper, we have demonstrated the effectiveness of our benchmark DiaLex in the evaluation of a chosen set of these available word embedding models and compared them to newlydeveloped ones by us as we explain in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Arabic Word Embeddings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "There is a wealth of research on evaluating unsupervised word embeddings, which can be broadly divided into intrinsic and extrinsic evaluations. Intrinsic evaluations mostly rely on word analogy questions and measure the similarity of words in the low-dimensional embedding space (Mikolov et al., 2013a; Gao et al., 2014; Schnabel et al., 2015) . Extrinsic evaluations assess the quality of the embeddings as features in models for other tasks, such as semantic role labeling and part-ofspeech tagging (Collobert et al., 2011) , or nounphrase chunking and sentiment analysis (Schnabel et al., 2015) . However, all of these tasks and benchmarks are built for English and thus cannot be used to assess the quality of Arabic word embeddings, which is the main focus here.", |
| "cite_spans": [ |
| { |
| "start": 280, |
| "end": 303, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 304, |
| "end": 321, |
| "text": "Gao et al., 2014;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 322, |
| "end": 344, |
| "text": "Schnabel et al., 2015)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 502, |
| "end": 526, |
| "text": "(Collobert et al., 2011)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 575, |
| "end": 598, |
| "text": "(Schnabel et al., 2015)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embeddings Evaluation", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To the best of our knowledge, only a handful of recent studies attempted evaluating Arabic word embeddings. Zahran et al. (2015) translated the English benchmark in (Mikolov et al., 2013a) and used it to evaluate different embedding techniques when applied on a large Arabic corpus. However, as the authors themselves point out, translating an English benchmark is not the best strategy to evaluate Arabic embeddings. Zahran et al. (2015) also consider extrinsic evaluation on two NLP tasks, namely query expansion for Information Retrieval and short answer grading. Dahou et al. (2016) used the analogy questions from Zahran et al. (2015) after correcting some Arabic spelling mistakes resulting from the translation and after adding new analogy questions to make up for the inadequacy of the English questions for the Arabic language. They also performed an extrinsic evaluation using sentiment analysis. Finally, Al-Rfou et al. (2013) generated word embeddings for 100 different languages, including Arabic, and evaluated the embeddings using part-of-speech tagging, however the evaluation was done only for a handful of European languages. Elrazzaz et al. (2017) built a benchmark in MSA that can be utilized to perform intrinsic evaluation of different word embeddings using word analogy questions. They then used the constructed benchmark to evaluate various Arabic word embeddings. They also performed extrinsic evaluation of these word embeddings using two NLP tasks, namely Document Classification and Named Entity Recognition. Salama et al. (2018) investigated enhancing Arabic word embeddings by incorporating morphological annotations to the embeddings model. They tuned the generated word vectors to their lemma forms using linear compositionality to generate lemma-based embeddings. To assess the effectiveness of their model, they used the benchmark built by Elrazzaz et al. (2017) . Taylor and Brychc\u00edn (2018b) demonstrated several ways to use morphological Arabic word analogies to examine the representation of complex words in semantic vector spaces. They presented a set of morphological relations, each of which can be used to generate many word analogies.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 188, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 567, |
| "end": 586, |
| "text": "Dahou et al. (2016)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1144, |
| "end": 1166, |
| "text": "Elrazzaz et al. (2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1537, |
| "end": 1557, |
| "text": "Salama et al. (2018)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1874, |
| "end": 1896, |
| "text": "Elrazzaz et al. (2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1899, |
| "end": 1926, |
| "text": "Taylor and Brychc\u00edn (2018b)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embeddings Evaluation", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "El Bazi and Laachfoubi (2017) investigated the effect of stemming on Arabic word representations. They applied various stemmers on different word representations approaches, and conducted an extrinsic evaluation to assess the quality of these word vectors by evaluating their impact on the Named Entity Recognition task for Arabic. Taylor and Brychc\u00edn (2018a) provided a corpus of Arabic analogies focused on the morphological constructs which can participate in verb, noun and prepositional phrases. They conducted an examination of ten different semantic spaces to see which of them is most appropriate for this set of analogies, and they illustrated the use of the corpus to examine phrase-building. Barhoumi et al. (2020) proposed intrinsic and extrinsic methods to evaluate word embeddings for the specific task of Arabic sentiment analysis. For intrinsic evaluation, they proposed a new method that assesses what they define as the \"sentiment stability\" in the embedding space. For extrinsic evaluation, they relied on the performance of the word embeddings to be evaluated for the task of sentiment analysis. They also trained various word embeddings using different types of corpora (polar and non-polar) and evaluated them using their proposed methods. To the best of our knowledge, our proposed benchmark is the first benchmark developed in various Arabic dialects and that can be used to perform intrinsic evaluation of Arabic word embedding with respect to those dialects.", |
| "cite_spans": [ |
| { |
| "start": 3, |
| "end": 29, |
| "text": "Bazi and Laachfoubi (2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 703, |
| "end": 725, |
| "text": "Barhoumi et al. (2020)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embeddings Evaluation", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We described DiaLex, a benchmark for evaluating dialectal Arabic word embeddings. DiaLex comes in five major Arabic dialects, namely Algerian, Egyptian, Lebanese, Syrian, and Tunisian. Across these dialects, DiaLex offers a testbank of word pairs for six syntactic and semantic relations, namely male to female, singular to dual, singular to plural, antonym, comparative, and genitive to past tense. To demonstrate the utility of Di-aLex, we used it to evaluate a set of available and newly-developed Arabic word embedding models. Our evaluations are intended to showcase the utility of our new benchmark. DiaLex as well as the newly-developed word embeddings will be publicly available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "DiaLex can be used to support integration of dialects in the Arabic language curriculum, and for the study of the syntax and semantics of Arabic dialects. It can also complement evaluations of contextual word embeddings. In the future, we plan to use DiaLex to for more extensive evaluation of all publicly-available Arabic word embedding models. We will also train a dialectal Arabic word embeddings model with a larger dataset and evaluate it using DiaLex. Finally, we will translate DiaLex into MSA and English to facilitate use of the resource for evaluating word translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our benchmark, evaluation code, and new word embedding models will be available at: https://github. com/UBC-NLP/dialex.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Daict: A dialectal Arabic irony corpus extracted from twitter", |
| "authors": [ |
| { |
| "first": "Ines", |
| "middle": [], |
| "last": "Abbes", |
| "suffix": "" |
| }, |
| { |
| "first": "Wajdi", |
| "middle": [], |
| "last": "Zaghouani", |
| "suffix": "" |
| }, |
| { |
| "first": "Omaima", |
| "middle": [], |
| "last": "El-Hardlo", |
| "suffix": "" |
| }, |
| { |
| "first": "Faten", |
| "middle": [], |
| "last": "Ashour", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "6265--6271", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ines Abbes, Wajdi Zaghouani, Omaima El-Hardlo, and Faten Ashour. 2020. Daict: A dialectal Arabic irony corpus extracted from twitter. In Proceedings of The 12th Language Resources and Evaluation Confer- ence, pages 6265-6271.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "You tweet what you speak: A city-level dataset of arabic dialects", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Alhuzali", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Elaraby", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "3653--3659", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, Hassan Alhuzali, and Mo- hamed Elaraby. 2018. You tweet what you speak: A city-level dataset of arabic dialects. In LREC, pages 3653-3659.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "2020a. ARBERT & MARBERT: Deep bidirectional transformers for Arabic", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.01785" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, AbdelRahim Elmadany, and El Moatez Billah Nagoudi. 2020a. ARBERT & MARBERT: Deep bidirectional transformers for Arabic. arXiv preprint arXiv:2101.01785.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Houda Bouamor, and Nizar Habash. 2020b. Nadi 2020: The first nuanced arabic dialect identification shared task", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the Fifth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "97--110", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, Chiyu Zhang, Houda Bouamor, and Nizar Habash. 2020b. Nadi 2020: The first nuanced arabic dialect identification shared task. In Proceedings of the Fifth Arabic Natural Language Processing Workshop, pages 97-110.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Abdel-Rahim Elmadany, Houda Bouamor, and Nizar Habash. 2021. NADI 2021: The Second Nuanced Arabic Dialect Identification Shared Task", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the Sixth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, Chiyu Zhang, Abdel- Rahim Elmadany, Houda Bouamor, and Nizar Habash. 2021. NADI 2021: The Second Nuanced Arabic Dialect Identification Shared Task. In Pro- ceedings of the Sixth Arabic Natural Language Pro- cessing Workshop (WANLP 2021).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Microdialect identification in diaglossic and codeswitched environments", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdel-Rahim", |
| "middle": [], |
| "last": "Elmadany", |
| "suffix": "" |
| }, |
| { |
| "first": "Lyle", |
| "middle": [], |
| "last": "Ungar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5855--5876", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, Chiyu Zhang, Abdel- Rahim Elmadany, and Lyle Ungar. 2020c. Micro- dialect identification in diaglossic and code- switched environments. In Proceedings of the 2020 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 5855-5876.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Mazajak: An online Arabic sentiment analyser", |
| "authors": [ |
| { |
| "first": "Ibrahim", |
| "middle": [], |
| "last": "Abu Farha", |
| "suffix": "" |
| }, |
| { |
| "first": "Walid", |
| "middle": [], |
| "last": "Magdy", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "192--198", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ibrahim Abu Farha and Walid Magdy. 2019. Mazajak: An online Arabic sentiment analyser. In Proceed- ings of the Fourth Arabic Natural Language Pro- cessing Workshop, pages 192-198, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Arabic as one language: Integrating dialect in the Arabic language curriculum", |
| "authors": [ |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Al-Batal", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mahmoud Al-Batal. 2017. Arabic as one language: In- tegrating dialect in the Arabic language curriculum. Georgetown University Press.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Polyglot: Distributed word representations for multilingual nlp", |
| "authors": [ |
| { |
| "first": "Rami", |
| "middle": [], |
| "last": "Al-Rfou", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Perozzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Skiena", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1307.1662" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rami Al-Rfou, Bryan Perozzi, and Steven Skiena. 2013. Polyglot: Distributed word represen- tations for multilingual nlp. arXiv preprint arXiv:1307.1662.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Contextaware cross-lingual mapping", |
| "authors": [ |
| { |
| "first": "Hanan", |
| "middle": [], |
| "last": "Aldarmaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1903.03243" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanan Aldarmaki and Mona Diab. 2019. Context- aware cross-lingual mapping. arXiv preprint arXiv:1903.03243.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Unsupervised word mapping using structural similarities in monolingual embeddings", |
| "authors": [ |
| { |
| "first": "Hanan", |
| "middle": [], |
| "last": "Aldarmaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [], |
| "last": "Mohan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "185--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanan Aldarmaki, Mahesh Mohan, and Mona Diab. 2018. Unsupervised word mapping using structural similarities in monolingual embeddings. Transac- tions of the Association for Computational Linguis- tics, 6:185-196.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Arabert: Transformer-based model for Arabic language understanding", |
| "authors": [ |
| { |
| "first": "Wissam", |
| "middle": [], |
| "last": "Antoun", |
| "suffix": "" |
| }, |
| { |
| "first": "Fady", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Hazem", |
| "middle": [], |
| "last": "Hajj", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.00104" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wissam Antoun, Fady Baly, and Hazem Hajj. 2020. Arabert: Transformer-based model for Arabic language understanding. arXiv preprint arXiv:2003.00104.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Toward qualitative evaluation of embeddings for arabic sentiment analysis", |
| "authors": [ |
| { |
| "first": "Amira", |
| "middle": [], |
| "last": "Barhoumi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathalie", |
| "middle": [], |
| "last": "Camelin", |
| "suffix": "" |
| }, |
| { |
| "first": "Chafik", |
| "middle": [], |
| "last": "Aloulou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannick", |
| "middle": [], |
| "last": "Est\u00e8ve", |
| "suffix": "" |
| }, |
| { |
| "first": "Lamia Hadrich", |
| "middle": [], |
| "last": "Belguith", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4955--4963", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amira Barhoumi, Nathalie Camelin, Chafik Aloulou, Yannick Est\u00e8ve, and Lamia Hadrich Belguith. 2020. Toward qualitative evaluation of embeddings for arabic sentiment analysis. In Proceedings of The 12th Language Resources and Evaluation Confer- ence, pages 4955-4963.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The madar shared task on arabic finegrained dialect identification", |
| "authors": [ |
| { |
| "first": "Houda", |
| "middle": [], |
| "last": "Bouamor", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabit", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop (WANLP19)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Houda Bouamor, Sabit Hassan, and Nizar Habash. 2019. The madar shared task on arabic fine- grained dialect identification. In Proceedings of the Fourth Arabic Natural Language Processing Work- shop (WANLP19), Florence, Italy.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Multiun v2: Un documents with multilingual alignments", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Eisele", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "2500--2504", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Chen and Andreas Eisele. 2012. Multiun v2: Un documents with multilingual alignments. In LREC, pages 2500-2504.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. The Journal of Machine Learning Re- search, 12:2493-2537.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Word embeddings and convolutional neural network for arabic sentiment classification", |
| "authors": [ |
| { |
| "first": "Abdelghani", |
| "middle": [], |
| "last": "Dahou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shengwu", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Junwei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2418--2427", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdelghani Dahou, Shengwu Xiong, Junwei Zhou, Mohamed Houcine Haddoud, and Pengfei Duan. 2016. Word embeddings and convolutional neural network for arabic sentiment classification. In Inter- national Conference on Computational Linguistics, pages 2418-2427.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Time-aware word embeddings for three lebanese news archives", |
| "authors": [ |
| { |
| "first": "Jad", |
| "middle": [], |
| "last": "Doughman", |
| "suffix": "" |
| }, |
| { |
| "first": "Fatima", |
| "middle": [ |
| "Abu" |
| ], |
| "last": "Salem", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4717--4725", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jad Doughman, Fatima Abu Salem, and Shady El- bassuoni. 2020. Time-aware word embeddings for three lebanese news archives. In Proceedings of The 12th Language Resources and Evaluation Confer- ence, pages 4717-4725.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Is stemming beneficial for learning better arabic word representations?", |
| "authors": [ |
| { |
| "first": "Ismail", |
| "middle": [ |
| "El" |
| ], |
| "last": "Bazi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Laachfoubi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "First International Conference on Real Time Intelligent Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "508--517", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ismail El Bazi and Nabil Laachfoubi. 2017. Is stem- ming beneficial for learning better arabic word rep- resentations? In First International Conference on Real Time Intelligent Systems, pages 508-517. Springer.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Habibi -a multi dialect multi national Arabic song lyrics corpus", |
| "authors": [ |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "El-Haj", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1318--1326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mahmoud El-Haj. 2020. Habibi -a multi dialect multi national Arabic song lyrics corpus. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 1318-1326, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Constrained sequence-to-sequence semitic root extraction for enriching word embeddings", |
| "authors": [ |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "El-Kishky", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingyu", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Aseel", |
| "middle": [], |
| "last": "Addawood", |
| "suffix": "" |
| }, |
| { |
| "first": "Nahil", |
| "middle": [], |
| "last": "Sobh", |
| "suffix": "" |
| }, |
| { |
| "first": "Clare", |
| "middle": [], |
| "last": "Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "88--96", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmed El-Kishky, Xingyu Fu, Aseel Addawood, Nahil Sobh, Clare Voss, and Jiawei Han. 2019. Con- strained sequence-to-sequence semitic root extrac- tion for enriching word embeddings. In Proceedings of the Fourth Arabic Natural Language Processing Workshop, pages 88-96.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Methodical evaluation of arabic word embeddings", |
| "authors": [ |
| { |
| "first": "Mohammed", |
| "middle": [], |
| "last": "Elrazzaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "454--458", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammed Elrazzaz, Shady Elbassuoni, Khaled Sha- ban, and Chadi Helwe. 2017. Methodical evaluation of arabic word embeddings. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 454-458.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Complementary strategies for low resourced morphological modeling", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Erdmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Fifteenth Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "54--65", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Erdmann and Nizar Habash. 2018. Comple- mentary strategies for low resourced morphological modeling. In Proceedings of the Fifteenth Workshop on Computational Research in Phonetics, Phonol- ogy, and Morphology, pages 54-65.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Wordrep: A benchmark for research on learning word representations", |
| "authors": [ |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Bian", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1407.1640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bin Gao, Jiang Bian, and Tie-Yan Liu. 2014. Wordrep: A benchmark for research on learning word repre- sentations. arXiv preprint arXiv:1407.1640.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "ArbEngVec : Arabic-English cross-lingual word embedding model", |
| "authors": [], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "40--48", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4605" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raki Lachraf, El Moatez Billah Nagoudi, Youcef Ay- achi, Ahmed Abdelali, and Didier Schwab. 2019. ArbEngVec : Arabic-English cross-lingual word embedding model. In Proceedings of the Fourth Arabic Natural Language Processing Workshop, pages 40-48, Florence, Italy. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "An empirical study of pre-trained transformers for arabic information extraction", |
| "authors": [ |
| { |
| "first": "Wuwei", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4727--4734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wuwei Lan, Yang Chen, Wei Xu, and Alan Ritter. 2020a. An empirical study of pre-trained transform- ers for arabic information extraction. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4727-4734.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Gigabert: Zero-shot transfer learning from english to arabic", |
| "authors": [ |
| { |
| "first": "Wuwei", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 2020 Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wuwei Lan, Yang Chen, Wei Xu, and Alan Ritter. 2020b. Gigabert: Zero-shot transfer learning from english to arabic. In Proceedings of The 2020 Con- ference on Empirical Methods on Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Learning word embeddings efficiently with noise-contrastive estimation", |
| "authors": [ |
| { |
| "first": "Andriy", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2265--2273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andriy Mnih and Koray Kavukcuoglu. 2013. Learning word embeddings efficiently with noise-contrastive estimation. In Advances in Neural Information Pro- cessing Systems, pages 2265-2273.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Arabic curriculum analysis", |
| "authors": [ |
| { |
| "first": "Hamdy", |
| "middle": [], |
| "last": "Mubarak", |
| "suffix": "" |
| }, |
| { |
| "first": "Shimaa", |
| "middle": [], |
| "last": "Amer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Abdelali", |
| "suffix": "" |
| }, |
| { |
| "first": "Kareem", |
| "middle": [], |
| "last": "Darwish", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "80--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hamdy Mubarak, Shimaa Amer, Ahmed Abdelali, and Kareem Darwish. 2020. Arabic curriculum analysis. In Proceedings of the 28th International Conference on Computational Linguistics: System Demonstra- tions, pages 80-86.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Word embedding-based approaches for measuring semantic similarity of arabic-english sentences", |
| "authors": [ |
| { |
| "first": "Billah", |
| "middle": [], |
| "last": "El Moatez", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00e9r\u00e9my", |
| "middle": [], |
| "last": "Nagoudi", |
| "suffix": "" |
| }, |
| { |
| "first": "Didier", |
| "middle": [], |
| "last": "Ferrero", |
| "suffix": "" |
| }, |
| { |
| "first": "Hadda", |
| "middle": [], |
| "last": "Schwab", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cherroun", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Arabic Language Processing: From Theory to Practice", |
| "volume": "", |
| "issue": "", |
| "pages": "19--33", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "El Moatez Billah Nagoudi, J\u00e9r\u00e9my Ferrero, Di- dier Schwab, and Hadda Cherroun. 2018. Word embedding-based approaches for measuring seman- tic similarity of arabic-english sentences. In Ara- bic Language Processing: From Theory to Practice, pages 19-33, Cham. Springer International Publish- ing.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Arabic gigaword", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Parker", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Graff", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazuaki", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Parker, David Graff, Ke Chen, Junbo Kong, and Kazuaki Maeda. 2009. Arabic gigaword.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Glove: Global Vectors for Word Representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global Vectors for Word Representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), volume 1, pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Osac: Open source arabic corpora", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Motaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Wesam", |
| "middle": [], |
| "last": "Saad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ashour", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "6th ArchEng Int. Symposiums", |
| "volume": "10", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Motaz K Saad and Wesam Ashour. 2010. Osac: Open source arabic corpora. In 6th ArchEng Int. Sympo- siums, EEECS, volume 10.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Morphological word embedding for arabic", |
| "authors": [ |
| { |
| "first": "Rana", |
| "middle": [ |
| "Aref" |
| ], |
| "last": "Salama", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdou", |
| "middle": [], |
| "last": "Youssef", |
| "suffix": "" |
| }, |
| { |
| "first": "Aly", |
| "middle": [], |
| "last": "Fahmy", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Procedia computer science", |
| "volume": "142", |
| "issue": "", |
| "pages": "83--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rana Aref Salama, Abdou Youssef, and Aly Fahmy. 2018. Morphological word embedding for arabic. Procedia computer science, 142:83-93.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Evaluation methods for unsupervised word embeddings", |
| "authors": [ |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Schnabel", |
| "suffix": "" |
| }, |
| { |
| "first": "Igor", |
| "middle": [], |
| "last": "Labutov", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "298--307", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tobias Schnabel, Igor Labutov, David M Mimno, and Thorsten Joachims. 2015. Evaluation methods for unsupervised word embeddings. In EMNLP, pages 298-307.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Aravec: A set of arabic word embedding models for use in arabic nlp", |
| "authors": [ |
| { |
| "first": "Kareem", |
| "middle": [], |
| "last": "Abu Bakr Soliman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Eissa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Samhaa R El-Beltagy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Procedia Computer Science", |
| "volume": "117", |
| "issue": "", |
| "pages": "256--265", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abu Bakr Soliman, Kareem Eissa, and Samhaa R El- Beltagy. 2017. Aravec: A set of arabic word embed- ding models for use in arabic nlp. Procedia Com- puter Science, 117:256-265.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Arabic word analogies and semantics of simple phrases", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Brychc\u00edn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 2nd International Conference on Natural Language and Speech Processing (ICNLSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Taylor and Tom\u00e1\u0161 Brychc\u00edn. 2018a. Arabic word analogies and semantics of simple phrases. In 2018 2nd International Conference on Natural Lan- guage and Speech Processing (ICNLSP), pages 1-6. IEEE.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "The representation of some phrases in arabic word semantic vector spaces", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Brychc\u00edn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Open Computer Science", |
| "volume": "8", |
| "issue": "1", |
| "pages": "182--193", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Taylor and Tom\u00e1\u0161 Brychc\u00edn. 2018b. The rep- resentation of some phrases in arabic word semantic vector spaces. Open Computer Science, 8(1):182- 193.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Parallel data, tools and interfaces in opus", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "LREC", |
| "volume": "2012", |
| "issue": "", |
| "pages": "2214--2218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. In LREC, volume 2012, pages 2214- 2218.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Word representations in vector space and their applications for arabic", |
| "authors": [ |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Mohamed A Zahran", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Magooda", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ashraf", |
| "suffix": "" |
| }, |
| { |
| "first": "Hazem", |
| "middle": [], |
| "last": "Mahgoub", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohsen", |
| "middle": [], |
| "last": "Raafat", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Rashwan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Atyia", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "430--443", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohamed A Zahran, Ahmed Magooda, Ashraf Y Mah- goub, Hazem Raafat, Mohsen Rashwan, and Amir Atyia. 2015. Word representations in vector space and their applications for arabic. In International Conference on Intelligent Text Processing and Com- putational Linguistics, pages 430-443. Springer.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "A map of the five Arab countries covered by DiaLex. The five countries cover different regions in the Arab world: two in the western region (Algeria and Tunisia), one in the middle (Egypt), and two in the eastern region (Lebanon and Syria).", |
| "uris": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "text": "Statistics of DiaLex across different relations. (Genitive-pt= genitive-past tense).", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>our benchmark across the five dialects. In most cases, our</td></tr><tr><td>Twitter-250K-MC100 (Ours-MC-100) achieves the best per-</td></tr><tr><td>formance.</td></tr></table>", |
| "text": "Evaluation of six word embedding models using", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td colspan=\"5\">Dialect Relation K=1 K=5 K=10</td></tr><tr><td/><td colspan=\"4\">sing-dual None None None</td></tr><tr><td/><td>sing-pl</td><td colspan=\"3\">17.39 38.51 47.20</td></tr><tr><td/><td>gen-pt</td><td>0.00</td><td>2.52</td><td>2.52</td></tr><tr><td>ALG</td><td colspan=\"2\">antonym 0.83</td><td>5.19</td><td>7.91</td></tr><tr><td/><td>comp</td><td colspan=\"3\">None None None</td></tr><tr><td/><td colspan=\"4\">male-fem 10.00 35.00 35.00</td></tr><tr><td/><td colspan=\"2\">total acc 3.87</td><td colspan=\"2\">11.33 15.03</td></tr><tr><td/><td colspan=\"4\">sing-dual 13.89 27.78 36.11</td></tr><tr><td/><td>sing-pl</td><td colspan=\"3\">27.62 54.29 60.95</td></tr><tr><td/><td>gen-pt</td><td>3.40</td><td>8.85</td><td>13.55</td></tr><tr><td>EGY</td><td colspan=\"2\">antonym 3.02</td><td>9.51</td><td>13.71</td></tr><tr><td/><td>comp</td><td colspan=\"3\">None None None</td></tr><tr><td/><td colspan=\"2\">male-fem 5.00</td><td colspan=\"2\">15.00 20.00</td></tr><tr><td/><td colspan=\"2\">total acc 4.33</td><td colspan=\"2\">11.49 15.97</td></tr><tr><td/><td colspan=\"2\">sing-dual 0.00</td><td>0.00</td><td>0.00</td></tr><tr><td/><td>sing-pl</td><td>7.00</td><td colspan=\"2\">30.00 43.00</td></tr><tr><td/><td>gen-pt</td><td>0.46</td><td>3.72</td><td>4.80</td></tr><tr><td>SYR</td><td colspan=\"2\">antonym 0.40</td><td>2.81</td><td>4.81</td></tr><tr><td/><td>comp</td><td>3.83</td><td colspan=\"2\">10.71 14.49</td></tr><tr><td/><td colspan=\"2\">male-fem 6.89</td><td colspan=\"2\">16.70 23.17</td></tr><tr><td/><td colspan=\"2\">total acc 3.14</td><td>9.39</td><td>12.97</td></tr><tr><td/><td colspan=\"2\">sing-dual 0.00</td><td>2.04</td><td>6.12</td></tr><tr><td/><td>sing-pl</td><td colspan=\"3\">20.99 41.67 50.31</td></tr><tr><td/><td>gen-pt</td><td>1.66</td><td>4.92</td><td>8.36</td></tr><tr><td>LEB</td><td colspan=\"2\">antonym 0.43</td><td>5.18</td><td>9.31</td></tr><tr><td/><td>comp</td><td colspan=\"3\">13.14 26.72 33.62</td></tr><tr><td/><td colspan=\"2\">male-fem 5.56</td><td colspan=\"2\">13.89 22.22</td></tr><tr><td/><td colspan=\"2\">total acc 5.29</td><td colspan=\"2\">12.90 17.87</td></tr><tr><td/><td colspan=\"2\">sing-dual 0.00</td><td>0.00</td><td>0.00</td></tr><tr><td/><td>sing-pl</td><td colspan=\"3\">11.44 23.20 30.39</td></tr><tr><td/><td>gen-pt</td><td>0.00</td><td>4.94</td><td>11.11</td></tr><tr><td>TUN</td><td colspan=\"2\">antonym 0.41</td><td>4.00</td><td>7.67</td></tr><tr><td/><td>comp</td><td colspan=\"3\">None None None</td></tr><tr><td/><td colspan=\"2\">male-fem 0.00</td><td>0.00</td><td>0.00</td></tr><tr><td/><td colspan=\"2\">total acc 2.48</td><td>7.68</td><td>12.14</td></tr></table>", |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>: Evaluation across all relations for our Twitter-MC-</td></tr><tr><td>100 model. Values shown as \"None\" are for relationships</td></tr><tr><td>where the model did not include any of the word pairs in</td></tr><tr><td>the question tuples in the model vocabulary. Zeros mean the</td></tr><tr><td>model includes the words in its vocabulary but no correct an-</td></tr><tr><td>swers were returned in the top-K.</td></tr><tr><td>sults across the different word relationships. For</td></tr><tr><td>top-1 performance, one or another of our mod-</td></tr><tr><td>els scores best. For top-5 results, our Twitter-</td></tr><tr><td>250K-MC100 (Ours-MC-100) acquires best per-</td></tr><tr><td>formance for most dialects. Exceptions are EGY</td></tr><tr><td>and TUN dialects. Ours-MC-100 also performs</td></tr><tr><td>best on all but TUN dialect. These results show the</td></tr><tr><td>necessity of developing dialectal resources for the</td></tr><tr><td>various varieties and that a model trained on large</td></tr><tr><td>MSA data such as that of Zahran et al. (2015) is</td></tr><tr><td>quite sub-optimal.</td></tr></table>", |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |