| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:13:30.022927Z" |
| }, |
| "title": "Investigating Code-Mixed Modern Standard Arabic-Egyptian to English Machine Translation", |
| "authors": [ |
| { |
| "first": "El", |
| "middle": [], |
| "last": "Moatez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab The University of British Columbia", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Billah", |
| "middle": [], |
| "last": "Nagoudi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab The University of British Columbia", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Abdelrahim", |
| "middle": [], |
| "last": "Elmadany", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab The University of British Columbia", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "a.elmadany@ubc.ca" |
| }, |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Natural Language Processing Lab The University of British Columbia", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "muhammad.mageed@ubc.ca" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent progress in neural machine translation (NMT) has made it possible to translate successfully between monolingual language pairs where large parallel data exist, with pre-trained models improving performance even further. Although there exists work on translating in code-mixed settings (where one of the pairs includes text from two or more languages), it is still unclear what recent success in NMT and language modeling exactly means for translating code-mixed text. We investigate one such context, namely MT from code-mixed Modern Standard Arabic and Egyptian Arabic (MSAEA) into English. We develop models under different conditions, employing both (i) standard end-to-end sequence-to-sequence (S2S) Transformers trained from scratch and (ii) pre-trained S2S language models (LMs). We are able to acquire reasonable performance using only MSA-EN parallel data with S2S models trained from scratch. We also find LMs fine-tuned on data from various Arabic dialects to help the MSAEA-EN task. Our work is in the context of the Shared Task on Machine Translation in Code-Switching. Our best model achieves 25.72 BLEU, placing us first on the official shared task evaluation for MSAEA-EN.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent progress in neural machine translation (NMT) has made it possible to translate successfully between monolingual language pairs where large parallel data exist, with pre-trained models improving performance even further. Although there exists work on translating in code-mixed settings (where one of the pairs includes text from two or more languages), it is still unclear what recent success in NMT and language modeling exactly means for translating code-mixed text. We investigate one such context, namely MT from code-mixed Modern Standard Arabic and Egyptian Arabic (MSAEA) into English. We develop models under different conditions, employing both (i) standard end-to-end sequence-to-sequence (S2S) Transformers trained from scratch and (ii) pre-trained S2S language models (LMs). We are able to acquire reasonable performance using only MSA-EN parallel data with S2S models trained from scratch. We also find LMs fine-tuned on data from various Arabic dialects to help the MSAEA-EN task. Our work is in the context of the Shared Task on Machine Translation in Code-Switching. Our best model achieves 25.72 BLEU, placing us first on the official shared task evaluation for MSAEA-EN.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent year have witnessed fast progress in various areas of natural language processing (NLP), including machine translation (MT) where neural approaches have helped boost performance when translating between pairs with especially large amounts of parallel data. However, tasks involving a need to process data from different languages mixed together remain challenging for all NLP tasks. This phenomenon of using two or more languages simultaneously in speech or text is referred to as code-mixing (Gumperz, 1982) and is", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 515, |
| "text": "(Gumperz, 1982)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(1) MSAEA .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "English Human I want hard work, guys.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "GMT I want a rigid job, Jadaan.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "I want a solid job, jadan.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) MSAEA .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "The doctors said I can't walk normally again.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human", |
| "sec_num": null |
| }, |
| { |
| "text": "The doctors said that I was not a normal marginal again.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "S2ST Doctors said I wasn't a natural marginality again. Table 1 : Code-mixed Modern Standard Arabic-Egyptian Arabic (MSAEA) sentences with their English human translation, Google machine translation (GMT) 1 , and translation by a sequence-to-sequence Transformer model (S2ST) trained from scratch on 55M MSA-English parallel sentences. Green refers to good translations. Red refers to erroneous translation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 56, |
| "end": 63, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "prevalent in multilingual societies (Sitaram et al., 2019) . Code-mixing is challenging since the space of possibilities when processing mixed data is vast, but also because there is not usually sufficient codemixed resources to train models on. Nor is it clear how much code-mixing existing language models may have seen during pre-training, and so ability of these language models to transfer knowledge to downstream code-mixing tasks remain largely unexplored.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 58, |
| "text": "(Sitaram et al., 2019)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work, we investigate translation under a code-mixing scenario where sequences at source side are a combination of two varieties of the collection of languages referred to as Arabic. More specifically, we take as our objective translating between Modern Standard Arabic (MSA) mixed with Egyptian Arabic (EA) (source; collectively abbreviated here as MSAEA) into English (target). Table1 shows two examples of MSAEA sentences and their human and machine translations. We highlight problematic translations caused by nixing of Egyptian Arabic with MSA. Through work related to the shared task, we target the following three main research questions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "1. How do models trained from scratch on purely MSA data fare on the code-mixed MSAEA data (i.e., the zero-shot EA setting)?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "2. How do existing language models perform under the code-mixed condition (i.e., MSAEA)?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "3. What impact, if any, does exploiting dialectal Arabic (DA) data (i.e., from a range of dialects) have on the MSAEA code-mixed MT context?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "Our main contributions in this work lie primarily in answering these three questions. We also develop powerful models for translating from MSAEA to English. The rest of the paper is organized as follows: Section 2 discusses related work. The shared task is described in Section 3. Section 4 describes external parallel data we exploit to build our models. Section 5 presents the proposed MT models. Section 6 presents our experiments, and our different settings. We provide evaluation on Dev data in Section 7 and official results in Section 8. We conclude in Section 9.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GMT", |
| "sec_num": null |
| }, |
| { |
| "text": "A thread of research on code-mixed MT focuses on automatically generating synthetic code-mixed data to improve the downstream task. This includes attempts to generate linguistically-motivated sequences (Pratapa et al., 2018) . Some work leverages sequence-to-sequence (S2S) models (Winata et al., 2019) to generate code-mixing exploiting an external neural MT system, while others (Garg et al., 2018 ) use a recurrent neural network along with data generated by a sequence generative adversarial network (SeqGAN) and grammatical information such as from a part of speech tagger to generate code-mixed sequences. These methods have dependencies and can be costly to scale beyond one language pair.", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 224, |
| "text": "(Pratapa et al., 2018)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 281, |
| "end": 302, |
| "text": "(Winata et al., 2019)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 381, |
| "end": 399, |
| "text": "(Garg et al., 2018", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Arabic MT. For Arabic, some work has focused on translating between MSA and Arabic dialects. For instance, Zbib et al. (2012) studied the impact of combined dialectal and MSA data on dialect/MSA to English MT performance. Sajjad et al. (2013) uses MSA as a pivot language for translating Arabic dialects into English. Salloum et al. (2014) investigate the effect of sentence-level dialect identification and several linguistic features for MSA/dialect-English translation. Guellil et al. (2017) propose an neural machine translation (NMT) system for Arabic dialects using a vanilla recurrent neural networks (RNN) encoder-decoder model for translating Algerian Arabic written in a mixture of Arabizi and Arabic characters into MSA. Baniata et al. (2018) present an NMT system to translate Levantine (Jordanian, Syrian, and Palestinian) and Maghrebi (Algerian, Moroccan, Tunisia) to MSA, and MSA to English. Farhan et al. (2020) , propose unsupervised dialectal NMT, where the source dialect is not represented in training data. This last problem is referred to as zero-shot MT (Lample et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 125, |
| "text": "Zbib et al. (2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 222, |
| "end": 242, |
| "text": "Sajjad et al. (2013)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 318, |
| "end": 339, |
| "text": "Salloum et al. (2014)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 473, |
| "end": 494, |
| "text": "Guellil et al. (2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 907, |
| "end": 927, |
| "text": "Farhan et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1077, |
| "end": 1098, |
| "text": "(Lample et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "DA Arabic MT Resources. There are also efforts to develop dialectal Arabic MT resources. For example, Meftouh et al. (2015) present the Parallel Arabic Dialect Corpus (PADIC), 2 which is a multi-dialect corpus including MSA, Algerian, Tunisian, Palestinian, and Syrian. Recently, Sajjad et al. (2020a) also introduced AraBench, an evaluation suite for dialectal Arabic to English MT. AraBench consists of five publicly available datasets: Arabic-Dialect/English Parallel Text (APT) (Zbib et al., 2012) , Multi-dialectal Parallel Corpus of Arabic (MDC) (Bouamor et al., 2014) , MADAR Corpus (Bouamor et al., 2018) , Qatari-English speech corpus (Elmahdy et al., 2014) , and the English Bible translated into MSA. 3", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 123, |
| "text": "Meftouh et al. (2015)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 280, |
| "end": 301, |
| "text": "Sajjad et al. (2020a)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 482, |
| "end": 501, |
| "text": "(Zbib et al., 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 552, |
| "end": 574, |
| "text": "(Bouamor et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 590, |
| "end": 612, |
| "text": "(Bouamor et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 644, |
| "end": 666, |
| "text": "(Elmahdy et al., 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The goal of the shared tasks on machine translation in code-switching settings 4 is to encourage building MT systems that translate a source sentence into a target sentence while one of the directions contains 58 an alternation between two languages (i.e., codeswitching). We note that, in the current paper, we employ the wider term code-mixing. The shared task involves two subtasks: In the current work, we focus on the unsupervised MT subtask only. More specifically, we build models exclusively for MSAEA to English. Our approach exploits external data to train a variety of models. We now describe these external datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-Switching Shared Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4 Parallel Datasets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-Switching Shared Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to develop Arabic MT models that can translate efficiently across different text domains, we make use of a large collection of parallel sentences extracted from the Open Parallel Corpus (OPUS) (Tiedemann, 2012) . OPUS contains more than 2.7 billion parallel sentences in 90 languages. To train our models, we extract more than \u223c 61M sentences MSA-English parallel sentences from the whole collection. Since OPUS can have noise and duplicate data, we clean this collection and remove duplicates before we use it. We now describe our quality assurance method for cleaning and deduplication of the data. Data Quality Assurance. To keep only high quality parallel sentences, we follow two steps:", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 219, |
| "text": "(Tiedemann, 2012)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MSA-English Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "1. We run a cross-lingual semantic similarity model (Yang et al., 2019) on each pair of sentences, keeping only sentences with a bilingual similarity score between 0.30 and 0.99. This allows us to filter out sentence pairs whose source and target are identical (i.e., similarity score = 1) and those that are not good translations of one another (i.e., those with a cross-lingual semantic similarity score < 0.3).", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 71, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MSA-English Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Total 61M Similarity \u2208 [0.3 -0.99] 5.7M N-gram deduplication (>0.75) 55.2M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MSA-English Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "2. Observing some English sentences in the source data, we perform an analysis based on sub-string matching between source and target, using the word trigram sliding window method proposed by Barr\u00f3n-Cede\u00f1o and Rosso (2009) and used in Abdul-Mageed et al. (2021) to de-duplicate the data splits. In other words, we compare each sentence in the source side (i.e., MSA) to the target sentence (i.e., English). We then inspect all pairs of sentences that match higher than a given threshold, considering thresholds between 90% and 30%. We find that a threshold of > 75% safely guarantees completely distinct source and target pairs.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 222, |
| "text": "Barr\u00f3n-Cede\u00f1o and Rosso (2009)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MSA-English Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "More details about the MSA-English OPUS dataset before and after our quality assurance, including deduplication, are provided in ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MSA-English Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Several recent works show that MT models trained on one dialect can be used to improve models targeting other dialects (Farhan et al., 2020; Sajjad et al., 2020b Bouamor et al. (2018) translate 10k more sentences for five selected cities: Beirut, Cairo, Doha, Cairo, Tunis, and Rabat. The MADAR dataset also has region-level categorization (i.e., Gulf, Levantine, Nile, and Maghrebi). In our work, we use only the Gulf, Levantine, and Nile (Egyptian) dialects, and exclude Maghrebi. 5 Qatari-English Speech Corpus. This parallel corpus comprises 14.7k Qatari-English sentences collected by Elmahdy et al. (2014) from talk-show programs and Qatari TV series. More details about all our parallel dialectal-English datasets are in Table 3 .", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 140, |
| "text": "(Farhan et al., 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 141, |
| "end": 161, |
| "text": "Sajjad et al., 2020b", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 162, |
| "end": 183, |
| "text": "Bouamor et al. (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 483, |
| "end": 484, |
| "text": "5", |
| "ref_id": null |
| }, |
| { |
| "start": 590, |
| "end": 611, |
| "text": "Elmahdy et al. (2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 728, |
| "end": 735, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dialectal Arabic-English Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Data Splits. For our experiments, we split the MSA and DA data as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Splits and Pre-Processing", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "MSA. We randomly pick 10k sentences for validation (MSA-Dev) from MSA parallel data (see Section 4.1) after cleaning, and we use the rest of this data (\u223c 55.14M) for training (MSA-Train). DA. For validation (DA-Dev), we randomly pick 6k sentences from the 38k Egyptian-English data provided by Zbib et al. (2012) . We then use the rest of the data (i.e., \u223c 250.7k) for training (DA-Train). Pre-Processing. Pre-processing is an important step for building any MT model as it can significantly affect end results (Oudah et al., 2019) . For all our models, we only perform light preprocessing in order to retain a faithful representation of the original (naturally occurring) text. We remove diacritics and replace URLs, user mentions, and hashtags with the generic string tokens URL, USER, and HASHTAG respectively. Our second step for pre-processing is specific to each type of models we train as we will explain in the respective sections.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 312, |
| "text": "Zbib et al. (2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 511, |
| "end": 531, |
| "text": "(Oudah et al., 2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Splits and Pre-Processing", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We train our models on the MSA-English parallel data described in section 4.1 on MSA-Train with a Transformer (Vaswani et al., 2017) model as implemented in Fairseq (Ott et al., 2019) . For that, we follow Ott et al. (2018) in using 6 blocks for each of the encoder and decoder parts. We use a learning rate of 0.25, a dropout of 0.3, and a batch size 4, 000 tokens. For the optimizer, we use Adam (Kingma and Ba, 2014) with beta coefficients of 0.9 and 0.99 which control an exponential decay rate of running averages, with a weight decay of 10 \u22124 . We also apply an inverse square-root learning rate scheduler with a value of 5e \u22124 and 4, 000 warmup updates. For the loss function, we use label smoothed cross entropy with a smoothing strength of 0.1. We run the Moses tokenizer (Koehn et al., 2007) on our input before passing data to the model. For vocabulary, we use a joint Byte-Pair Encoding (BPE) (Sennrich et al., 2015) vocabulary with 64K split operations for subword segmentation.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 132, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 165, |
| "end": 183, |
| "text": "(Ott et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 781, |
| "end": 801, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 905, |
| "end": 928, |
| "text": "(Sennrich et al., 2015)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "From-Scratch Seq2Seq Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We also fine-tune two state-of-the-art pre-trained multlingual generative models, mT5 (Xue et al., 2020) and mBART (Liu et al., 2020) on DA-Train for 100 epochs. We use early stopping during finetuning and identify the best model on DA-Dev. We use the HuggingFace (Wolf et al., 2020) implemen-4 60 Source:", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 104, |
| "text": "(Xue et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 115, |
| "end": 133, |
| "text": "(Liu et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 264, |
| "end": 283, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-Trained Seq2Seq Language Models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-Trained Seq2Seq Language Models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "we don't know for sure and the girls don't know finn .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "mT5 we can't make sure and we don't know where the girls are mBART we don't know where to make sure and we don't know where the girls are Source: .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "i want to know the brothers' official position on harassment of liberals and nejad al-barai, even the thugs, countries that are not followed by the president are using his authority and ordering their immediate arrest.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "mT5 i want to know the situation of the official brothers from harassment of the silky and najad albarea and if these pants are not their president the president uses his power and order to arrest them immediately mBART i want to know the position of the official brothers from harassment in the army and najad al-bara'y, even if these are not theirs , the president should use his authority and order to arrest them immediately Source: \" user :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": ".\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "user: there is a need for a lawyer to help the section, jadan sobhi and walid televonas closed .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "mT5 user: we want a lawyer to go with us to the section , guys , sobhe and waleed their telephones are closed mBART \u00ab user : we want a lawyer to go with us to the section, oh good morning, and their telephones are closed. \u00bb", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "they hold hearings in places where there are no courts, and what thrives on god's creation will enter without permission, because the accused will not prevent them and judge my absence! mT5 they have sessions in places that are not courts, and god doesn't allow people to enter without a permit, so that when they come and prevent them and rule me absence mBART they hold meetings in places where there is no courts, and god doesn't allow people to enter without a permit, so that when the accused come they stop them and rule them Table 4 : MSA-EA sentences with their English translations using our Models. S2ST: Sequence-to-sequence Transformer model trained from scratch. Data samples are extracted from the shared task Test data. Green refers to good translation. Red refers to problematic translation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 532, |
| "end": 539, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "tation of each of these models, with the default settings for all hyper-parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "S2ST", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we describe the different ways we fine-tune and evaluate our models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Settings", |
| "sec_num": "6" |
| }, |
| { |
| "text": "First, we use S2ST model trained on MSA-English data exclusively to evaluate MSAEA code-mixed data . While we can refer to this setting as zero-shot, we note that it is not truly zero-shot in the strict sense of the word due to the code-mixed nature of the data (i.e., the data has a mixture of MSA and EA). Hence, we will refer to this setting as zero-shot EA.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Zero-Shot Setting", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Second, we further fine-tune the three models (i.e., S2ST, mT5, and mBART) on the DA data described in Section 4.2. While the downstream shared task data only involves EA mixed with MSA, we follow Farhan et al. (2020) and Sajjad et al. (2020b) in fine-tuning on different dialects when targeting a single downstream dialect (EA in our case). We will simply refer to this second setting as Fine-Tuned DA. 26.07 Table 5 : Results of models on DA-Dev data. S2ST: Sequence-to-sequence Transformer model trained from scratch. We note that in the zero-shot EA setting the S2ST model is trained on 55M bitext sentences.", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 217, |
| "text": "Farhan et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 222, |
| "end": 243, |
| "text": "Sajjad et al. (2020b)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 410, |
| "end": 417, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Fine-Tuning Setting", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We report results of all our models under different settings in BLEU scores (Papineni et al., 2002) . In addition to evaluation on uncased data, we run a language modeling based truecaser (Lita et al., 2003) on the outputs of our different models. 6 Results presented in Table 5 show that S2ST achieves relatively low scores (between 8.54 and 12.57) on all settings. In comparison, both mBART and mT5 fine-tuned on DA-Train are able to translate MSAEA to English with BLEU scores of 23.80 and 24.70 respectively. We note that truecasing the output results in improving the results with an average of +2.55 BLEU points. 19.79 Table 7 : Results of our models on official Test data. Again, in the zero-shot EA setting the S2ST model is trained on 55M bitext sentences, Discussion. We inspect output translations from our models on Test data. We observe that even though S2ST performs better than the two language models on Test data, both of these models are especially able to translate Egyptian Arabic tokens such as in example (1) in Table 4 well. Again, Test data contain more MSA than DA as we explained earlier and hence the S2ST model (which is trained on 55M sentence pairs) outperforms each of the two language models. This analysis suggests that fine-tuning the language models on more MSA-ENG should result in better performance.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 99, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 188, |
| "end": 207, |
| "text": "(Lita et al., 2003)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 248, |
| "end": 249, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 271, |
| "end": 278, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 625, |
| "end": 632, |
| "text": "Table 7", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 1034, |
| "end": 1041, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation on Dev Data", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Returning to our three main research questions, we can reach a number of conclusions. For RQ1, we observe that models trained from scratch on purely MSA data fare reasonably well on the codemixed MSAEA data (i.e., zero-shot EA setting). This is due to lexical overlap between MSA and EA. For RQ2, we also note that language models such as mT5 and mBART do well under the codemixed condition, more so than models trained from scratch when inference data involve more EA. This is the case even though these language models in our experiments are fine-tuned with significantly 62 less data (i.e., \u223c 250K pairs) than the from-scratch S2ST models (which are trained on 55M MSA + 250K DA pairs). For RQ3, our results show that training on data from various Arabic dialects helps translation in the MSAEA code-mixed condition. This is in line with previous research (Farhan et al., 2020) showing that exploiting data from various dialects can help downstream translation on a single dialect dialect in the zero-shot setting.", |
| "cite_spans": [ |
| { |
| "start": 859, |
| "end": 880, |
| "text": "(Farhan et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Official Shared Task (Test) Results", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We described our contribution to the shared tasks on MT in code-switching. 7 Our models target the MSAEA to English task under the unsupervised condition. Our experiments show that training models on MSA data is useful for the MSAEAto-English task in the zero-shot EA setting. We also show the utility of pre-trained language models such as mT5 and mBART on the code-mixing task. Our models place first in the official shared task evaluation. In the future, we intend to apply our methods on other dialects of Arabic and investigate other methods such as backtranslation for improving overall performance.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 76, |
| "text": "7", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "We use Google Translate API https://cloud. google.com/translate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://sites.google.com/site/ torjmanepnr/6-corpus 3 The United Bible Societies https://www.bible.com 4 https://code-switching.github.io/ 2021.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We do not make use of the Maghrebi data due to the considerable linguistic differences between Maghrebi and the the Egyptian dialect we target in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We were not been able to report results based on truecasing in this paper, but we note that we will provide these results in the camera ready version of this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We gratefully acknowledges support from the Natural Sciences and Engineering Research Council of Canada, the Social Sciences and Humanities Research Council of Canada, Canadian Foundation for Innovation, Compute Canada (www.computecanada.ca) and UBC ARC-Sockeye (https://doi.org/10.14288/ SOCKEYE) and Penguin Computing POD\u2122 (pod.penguincomputing.com).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "ARBERT & MARBERT: Deep Bidirectional Transformers for Arabic", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.01785" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, AbdelRahim Elmadany, and El Moatez Billah Nagoudi. 2020. ARBERT & MARBERT: Deep Bidirectional Transformers for Arabic. arXiv preprint arXiv:2101.01785.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Mega-cov: A billion-scale dataset of 100+ languages for covid-19", |
| "authors": [ |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Abdul-Mageed", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdelrahim", |
| "middle": [], |
| "last": "Elmadany", |
| "suffix": "" |
| }, |
| { |
| "first": "Dinesh", |
| "middle": [], |
| "last": "Pabbi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kunal", |
| "middle": [], |
| "last": "Verma", |
| "suffix": "" |
| }, |
| { |
| "first": "Rannie", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
| "volume": "", |
| "issue": "", |
| "pages": "3402--3420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muhammad Abdul-Mageed, AbdelRahim Elmadany, Dinesh Pabbi, Kunal Verma, Rannie Lin, et al. 2021. Mega-cov: A billion-scale dataset of 100+ languages for covid-19. In Proceedings of the 16th Conference of the European Chapter of the Associ- ation for Computational Linguistics: Main Volume, pages 3402-3420.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A neural machine translation model for arabic dialects that utilizes multitask learning (mtl). Computational intelligence and neuroscience", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Laith", |
| "suffix": "" |
| }, |
| { |
| "first": "Seyoung", |
| "middle": [], |
| "last": "Baniata", |
| "suffix": "" |
| }, |
| { |
| "first": "Seong-Bae", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laith H Baniata, Seyoung Park, and Seong-Bae Park. 2018. A neural machine translation model for arabic dialects that utilizes multitask learning (mtl). Com- putational intelligence and neuroscience, 2018.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "On automatic plagiarism detection based on n-grams comparison", |
| "authors": [ |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "European conference on information retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "696--700", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alberto Barr\u00f3n-Cede\u00f1o and Paolo Rosso. 2009. On au- tomatic plagiarism detection based on n-grams com- parison. In European conference on information re- trieval, pages 696-700. Springer.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A multidialectal parallel corpus of arabic", |
| "authors": [ |
| { |
| "first": "Houda", |
| "middle": [], |
| "last": "Bouamor", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| }, |
| { |
| "first": "Kemal", |
| "middle": [], |
| "last": "Oflazer", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "1240--1245", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Houda Bouamor, Nizar Habash, and Kemal Oflazer. 2014. A multidialectal parallel corpus of arabic. In LREC, pages 1240-1245.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The MADAR Arabic dialect corpus and lexicon", |
| "authors": [ |
| { |
| "first": "Houda", |
| "middle": [], |
| "last": "Bouamor", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "Wajdi", |
| "middle": [], |
| "last": "Zaghouani", |
| "suffix": "" |
| }, |
| { |
| "first": "Owen", |
| "middle": [], |
| "last": "Rambow", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Abdulrahim", |
| "suffix": "" |
| }, |
| { |
| "first": "Ossama", |
| "middle": [], |
| "last": "Obeid", |
| "suffix": "" |
| }, |
| { |
| "first": "Salam", |
| "middle": [], |
| "last": "Khalifa", |
| "suffix": "" |
| }, |
| { |
| "first": "Fadhl", |
| "middle": [], |
| "last": "Eryani", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Erdmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Kemal", |
| "middle": [], |
| "last": "Oflazer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Houda Bouamor, Nizar Habash, Mohammad Salameh, Wajdi Zaghouani, Owen Rambow, Dana Abdul- rahim, Ossama Obeid, Salam Khalifa, Fadhl Eryani, Alexander Erdmann, and Kemal Oflazer. 2018. The MADAR Arabic dialect corpus and lexicon. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Re- sources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Development of a tv broadcasts speech recognition system for qatari arabic", |
| "authors": [ |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Elmahdy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hasegawa-Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiman", |
| "middle": [], |
| "last": "Mustafawi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "3057--3061", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohamed Elmahdy, Mark Hasegawa-Johnson, and Eiman Mustafawi. 2014. Development of a tv broad- casts speech recognition system for qatari arabic. In LREC, pages 3057-3061.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Unsupervised dialectal neural machine translation", |
| "authors": [ |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Farhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bashar", |
| "middle": [], |
| "last": "Talafha", |
| "suffix": "" |
| }, |
| { |
| "first": "Analle", |
| "middle": [], |
| "last": "Abuammar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruba", |
| "middle": [], |
| "last": "Jaikat", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Al-Ayyoub", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Bisher Tarakji", |
| "suffix": "" |
| }, |
| { |
| "first": "Anas", |
| "middle": [], |
| "last": "Toma", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Information Processing & Management", |
| "volume": "57", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wael Farhan, Bashar Talafha, Analle Abuammar, Ruba Jaikat, Mahmoud Al-Ayyoub, Ahmad Bisher Tarakji, and Anas Toma. 2020. Unsupervised dialec- tal neural machine translation. Information Process- ing & Management, 57(3):102181.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Code-switched language models using dual RNNs and same-source pretraining", |
| "authors": [ |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Garg", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanmay", |
| "middle": [], |
| "last": "Parekh", |
| "suffix": "" |
| }, |
| { |
| "first": "Preethi", |
| "middle": [], |
| "last": "Jyothi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3078--3083", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1346" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saurabh Garg, Tanmay Parekh, and Preethi Jyothi. 2018. Code-switched language models using dual RNNs and same-source pretraining. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3078-3083, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Neural vs statistical translation of algerian arabic dialect written with arabizi and arabic letter", |
| "authors": [ |
| { |
| "first": "Imane", |
| "middle": [], |
| "last": "Guellil", |
| "suffix": "" |
| }, |
| { |
| "first": "Faical", |
| "middle": [], |
| "last": "Azouaou", |
| "suffix": "" |
| }, |
| { |
| "first": "Mourad", |
| "middle": [], |
| "last": "Abbas", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "The 31st Pacific Asia Conference on Language, Information and Computation PACLIC", |
| "volume": "31", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Imane Guellil, Faical Azouaou, and Mourad Abbas. 2017. Neural vs statistical translation of algerian arabic dialect written with arabizi and arabic letter. In The 31st Pacific Asia Conference on Language, Information and Computation PACLIC, volume 31, page 2017.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Discourse Strategies. Studies in Interactional Sociolinguistics", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gumperz", |
| "suffix": "" |
| } |
| ], |
| "year": 1982, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/CBO9780511611834" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John J. Gumperz. 1982. Discourse Strategies. Studies in Interactional Sociolinguistics. Cambridge Univer- sity Press.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Open source toolkit for statistical machine translation: Factored translation models and confusion network decoding", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Wade", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ondrej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Brooke", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Final Report of the Johns Hopkins", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Marcello Federico, Wade Shen, Nicola Bertoldi, Ondrej Bojar, Chris Callison-Burch, Brooke Cowan, Chris Dyer, Hieu Hoang, Richard Zens, et al. 2007. Open source toolkit for statisti- cal machine translation: Factored translation models and confusion network decoding. In Final Report of the Johns Hopkins 2006 Summer Workshop.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.07755" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine trans- lation. arXiv preprint arXiv:1804.07755.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Truecasing", |
| "authors": [ |
| { |
| "first": "Lucian", |
| "middle": [], |
| "last": "Vlad Lita", |
| "suffix": "" |
| }, |
| { |
| "first": "Abe", |
| "middle": [], |
| "last": "Ittycheriah", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nanda", |
| "middle": [], |
| "last": "Kambhatla", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "152--159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucian Vlad Lita, Abe Ittycheriah, Salim Roukos, and Nanda Kambhatla. 2003. Truecasing. In Proceed- ings of the 41st Annual Meeting of the Association for Computational Linguistics, pages 152-159.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Multilingual denoising pre-training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "726--742", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. Transac- tions of the Association for Computational Linguis- tics, 8:726-742.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Machine translation experiments on PADIC: A parallel Arabic DIalect corpus", |
| "authors": [ |
| { |
| "first": "Karima", |
| "middle": [], |
| "last": "Meftouh", |
| "suffix": "" |
| }, |
| { |
| "first": "Salima", |
| "middle": [], |
| "last": "Harrat", |
| "suffix": "" |
| }, |
| { |
| "first": "Salma", |
| "middle": [], |
| "last": "Jamoussi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mourad", |
| "middle": [], |
| "last": "Abbas", |
| "suffix": "" |
| }, |
| { |
| "first": "Kamel", |
| "middle": [], |
| "last": "Smaili", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 29th Pacific Asia Conference on Language, Information and Computation", |
| "volume": "", |
| "issue": "", |
| "pages": "26--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karima Meftouh, Salima Harrat, Salma Jamoussi, Mourad Abbas, and Kamel Smaili. 2015. Machine translation experiments on PADIC: A parallel Ara- bic DIalect corpus. In Proceedings of the 29th Pa- cific Asia Conference on Language, Information and Computation, pages 26-34, Shanghai, China.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.01038" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensi- ble toolkit for sequence modeling. arXiv preprint arXiv:1904.01038.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Scaling neural machine translation", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.00187" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, David Grangier, and Michael Auli. 2018. Scaling neural machine trans- lation. arXiv preprint arXiv:1806.00187.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The impact of preprocessing on arabic-english statistical and neural machine translation", |
| "authors": [ |
| { |
| "first": "Mai", |
| "middle": [], |
| "last": "Oudah", |
| "suffix": "" |
| }, |
| { |
| "first": "Amjad", |
| "middle": [], |
| "last": "Almahairi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.11751" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mai Oudah, Amjad Almahairi, and Nizar Habash. 2019. The impact of preprocessing on arabic-english statis- tical and neural machine translation. arXiv preprint arXiv:1906.11751.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting of the Association for Compu- tational Linguistics, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Language modeling for code-mixing: The role of linguistic theory based synthetic data", |
| "authors": [ |
| { |
| "first": "Adithya", |
| "middle": [], |
| "last": "Pratapa", |
| "suffix": "" |
| }, |
| { |
| "first": "Gayatri", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandipan", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalika", |
| "middle": [], |
| "last": "Bali", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1543--1553", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1143" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adithya Pratapa, Gayatri Bhat, Monojit Choudhury, Sunayana Sitaram, Sandipan Dandapat, and Kalika Bali. 2018. Language modeling for code-mixing: The role of linguistic theory based synthetic data. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1543-1553, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Arabench: Benchmarking dialectal arabic-english machine translation", |
| "authors": [ |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Abdelali", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5094--5107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hassan Sajjad, Ahmed Abdelali, Nadir Durrani, and Fahim Dalvi. 2020a. Arabench: Benchmarking di- alectal arabic-english machine translation. In Pro- ceedings of the 28th International Conference on Computational Linguistics, pages 5094-5107.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "AraBench: Benchmarking dialectal Arabic-English machine translation", |
| "authors": [ |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Abdelali", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5094--5107", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.447" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hassan Sajjad, Ahmed Abdelali, Nadir Durrani, and Fahim Dalvi. 2020b. AraBench: Benchmarking dialectal Arabic-English machine translation. In Proceedings of the 28th International Conference on Computational Linguistics, pages 5094-5107, Barcelona, Spain (Online). International Committee on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Translating dialectal arabic to english", |
| "authors": [ |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Kareem", |
| "middle": [], |
| "last": "Darwish", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hassan Sajjad, Kareem Darwish, and Yonatan Be- linkov. 2013. Translating dialectal arabic to english. In Proceedings of the 51st Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 1-6.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Sentence level dialect identification for machine translation system selection", |
| "authors": [ |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Salloum", |
| "suffix": "" |
| }, |
| { |
| "first": "Heba", |
| "middle": [], |
| "last": "Elfardy", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [], |
| "last": "Alamir-Salloum", |
| "suffix": "" |
| }, |
| { |
| "first": "Nizar", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "772--778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wael Salloum, Heba Elfardy, Linda Alamir-Salloum, Nizar Habash, and Mona Diab. 2014. Sentence level dialect identification for machine translation system selection. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 772-778.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1508.07909" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "A survey of code-switched speech and language processing", |
| "authors": [ |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khyathi Raghavi Chandu", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Rallabandi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunayana Sitaram, Khyathi Raghavi Chandu, Sai Kr- ishna Rallabandi, and Alan W. Black. 2019. A sur- vey of code-switched speech and language process- ing. CoRR, abs/1904.00784.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Parallel data, tools and interfaces in opus", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "2012", |
| "issue": "", |
| "pages": "2214--2218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. 2012:2214-2218.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Code-switched language models using neural based synthetic data from parallel sentences", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "271--280", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1026" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2019. Code-switched lan- guage models using neural based synthetic data from parallel sentences. In Proceedings of the 23rd Con- ference on Computational Natural Language Learn- ing (CoNLL), pages 271-280, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Transformers: State-of-theart natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Julien Chaumond, Lysandre Debut, Vic- tor Sanh, Clement Delangue, Anthony Moi, Pier- ric Cistac, Morgan Funtowicz, Joe Davison, Sam Shleifer, et al. 2020. Transformers: State-of-the- art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing: System Demonstrations, pages 38-45.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multilingual universal sentence encoder for semantic retrieval", |
| "authors": [ |
| { |
| "first": "Yinfei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Amin", |
| "middle": [], |
| "last": "Ahmad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandy", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jax", |
| "middle": [], |
| "last": "Law", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Hernandez Abrego", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Tar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun-Hsuan", |
| "middle": [], |
| "last": "Sung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.04307" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinfei Yang, Daniel Cer, Amin Ahmad, Mandy Guo, Jax Law, Noah Constant, Gustavo Hernan- dez Abrego, Steve Yuan, Chris Tar, Yun-Hsuan Sung, et al. 2019. Multilingual universal sen- tence encoder for semantic retrieval. arXiv preprint arXiv:1907.04307.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Machine translation of arabic dialects", |
| "authors": [], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 conference of the north american chapter of the association for computational linguistics: Human language technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "49--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "2012. Machine translation of arabic dialects. In Pro- ceedings of the 2012 conference of the north ameri- can chapter of the association for computational lin- guistics: Human language technologies, pages 49- 59.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "text": "1. Supervised MT. For supervised MT, gold data are provided to participants for training and evaluating models that take English as input and generate Hinglish sequences.", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "html": null, |
| "type_str": "table", |
| "text": "Parallel datasets extracted from OPUS (Tiedemann, 2012). We remove duplicate and identical pairs, keeping only high quality translations.", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td>59</td></tr></table>" |
| }, |
| "TABREF4": { |
| "html": null, |
| "type_str": "table", |
| "text": "Our parallel DA-English datasets. Gulf comprises data fromBouamor et al. (2018),Elmahdy et al. (2014) , andZbib et al. (2012).", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "text": "MADAR Bouamor et al. (2018) is a commissioned dataset where 26 Arabic native speakers were tasked to translate 2k English sentences each into their own native dialect. In addition,", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "html": null, |
| "type_str": "table", |
| "text": "shows results of all our MT models with different settings on the official shared task Test set. We observe that the Transformer model in the zero shot EA setting (a model that does not see Egyptian Arabic data) was able to translate MSAEA to English with 21.34 BLEU. As expected, fine-tuning all the models on DA-Train improves results across all models and leads to the best BLEU score of 25.72% with the S2ST model.Comparing performance of the S2ST model on Dev and Test data, we observe that Test data results are better. This suggests that Test data comprises more MSA than EA sequences. To test this hypothesis, we run a binary MSA-DA classifier Abdul-Mageed et al. (2020) on both the Dev and Test data to acquire MSA and DA distributions on each", |
| "num": null, |
| "content": "<table><tr><td>Dataset</td><td>#Size</td><td>MSA</td><td>DA</td></tr><tr><td>DA-Dev</td><td colspan=\"3\">6, 164 18.36% 81.64%</td></tr><tr><td colspan=\"4\">Official Test 6, 500 72.31% 27.69%</td></tr></table>" |
| }, |
| "TABREF8": { |
| "html": null, |
| "type_str": "table", |
| "text": "The data distribution (MSA Vs DA) in the DA-Dev and the official Test set. dataset. Results of this analysis, shown inTable 6, confirm our hypothesis about Test data involving significantly more MSA (i.e., 72.31%) compared to Dev data.", |
| "num": null, |
| "content": "<table><tr><td>Model</td><td>Setting</td><td>Blue</td></tr><tr><td/><td>Zero Shot EA</td><td>21.34</td></tr><tr><td>S2ST</td><td>Fine-tuned DA Zero Shot EA (true-cased)</td><td>22.51 23.68</td></tr><tr><td/><td>Fine-tuned DA (true-cased)</td><td>25.72</td></tr><tr><td>mT5</td><td>Fine-tuned DA Fine-tuned DA (true-cased)</td><td>16.41 18.80</td></tr><tr><td>mBART</td><td>Fine-tuned DA Fine-tuned DA (true-cased)</td><td>17.17</td></tr></table>" |
| } |
| } |
| } |
| } |