| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:13:47.512775Z" |
| }, |
| "title": "Can You Traducir This? Machine Translation for Code-Switched Input", |
| "authors": [ |
| { |
| "first": "Jitao", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "LISN", |
| "location": { |
| "settlement": "Orsay", |
| "country": "France" |
| } |
| }, |
| "email": "jitao.xu@limsi.fr" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Yvon", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "-Saclay, & CNRS, LISN", |
| "institution": "Univ. Paris", |
| "location": { |
| "settlement": "Orsay", |
| "country": "France" |
| } |
| }, |
| "email": "francois.yvon@limsi.fr" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Code-Switching (CSW) is a common phenomenon that occurs in multilingual geographic or social contexts, which raises challenging problems for natural language processing tools. We focus here on Machine Translation (MT) of CSW texts, where we aim to simultaneously disentangle and translate the two mixed languages. Due to the lack of actual translated CSW data, we generate artificial training data from regular parallel texts. Experiments show this training strategy yields MT systems that surpass multilingual systems for code-switched texts. These results are confirmed in an alternative task aimed at providing contextual translations for a L2 writing assistant.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Code-Switching (CSW) is a common phenomenon that occurs in multilingual geographic or social contexts, which raises challenging problems for natural language processing tools. We focus here on Machine Translation (MT) of CSW texts, where we aim to simultaneously disentangle and translate the two mixed languages. Due to the lack of actual translated CSW data, we generate artificial training data from regular parallel texts. Experiments show this training strategy yields MT systems that surpass multilingual systems for code-switched texts. These results are confirmed in an alternative task aimed at providing contextual translations for a L2 writing assistant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Code-Switching (CSW) denotes the alternation of two languages within a single utterance (Poplack, 1980; Sitaram et al., 2019) . It is a common communicative phenomenon that occurs in multilingual communities during spoken and written interactions. CSW is a well studied phenomenon in linguistic circles and has given rise to a number of theories regarding the structure of mixed language fragments (Poplack, 1978; Pfaff, 1979; Poplack, 1980; Belazi et al., 1994; Myers-Scotton, 1997) . The Matrix Language Frame (MLF) theory (Myers-Scotton, 1997) defines the concept of matrix and embedded languages where the matrix language is the main language that the sentence structure should conform to and notably provides the syntactic morphemes, while the influence of the embedded language is lesser and is mostly manifested in the insertion of content morphemes.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 103, |
| "text": "(Poplack, 1980;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 104, |
| "end": 125, |
| "text": "Sitaram et al., 2019)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 398, |
| "end": 413, |
| "text": "(Poplack, 1978;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 414, |
| "end": 426, |
| "text": "Pfaff, 1979;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 427, |
| "end": 441, |
| "text": "Poplack, 1980;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 442, |
| "end": 462, |
| "text": "Belazi et al., 1994;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 463, |
| "end": 483, |
| "text": "Myers-Scotton, 1997)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 525, |
| "end": 546, |
| "text": "(Myers-Scotton, 1997)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rise of social media and user-generated content has made written instances of code-switched language more visible. It is estimated that as much as 17% of Indian Facebook posts (Bali et al., 2014) and 3.5% of all tweets (Rijhwani et al., 2017) are code-switched. This phenomenon is also becoming more pervasive in short text messages, chats, blogs, and the like (Samih et al., 2016) . Code-switching however remains understudied in natural language processing (NLP) , and most work to date has focused on token-level language identification (LID) (Samih et al., 2016 ) and on language models for Automatic Speech Recognition (Winata et al., 2019) . More tasks are being considered lately, such as Named Entity Recognition (Aguilar et al., 2018) , Part-of-Speech tagging (Ball and Garrette, 2018) or Sentiment Analysis (Patwa et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 180, |
| "end": 199, |
| "text": "(Bali et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 223, |
| "end": 246, |
| "text": "(Rijhwani et al., 2017)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 365, |
| "end": 385, |
| "text": "(Samih et al., 2016)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 550, |
| "end": 569, |
| "text": "(Samih et al., 2016", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 616, |
| "end": 649, |
| "text": "Recognition (Winata et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 725, |
| "end": 747, |
| "text": "(Aguilar et al., 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 773, |
| "end": 798, |
| "text": "(Ball and Garrette, 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 821, |
| "end": 841, |
| "text": "(Patwa et al., 2020)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We focus here on another task for CSW texts: Machine Translation (MT). The advent of Neural Machine Translation (NMT) technologies (Bahdanau et al., 2015; Vaswani et al., 2017) has made it possible to design multilingual models capable of translating from multiple source languages into multiple target languages (Firat et al., 2016; Johnson et al., 2017) , where however both the input and output are monolingual. We study here the ability of such architectures to translate fragments freely mixing a \"matrix\" and an \"embedded\" language into monolingual utterances.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 154, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 155, |
| "end": 176, |
| "text": "Vaswani et al., 2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 313, |
| "end": 333, |
| "text": "(Firat et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 334, |
| "end": 355, |
| "text": "Johnson et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our main contribution is to show that for the two pairs of languages considered (French-English and Spanish-English): (a) translation of CSW texts is almost as good as the translation of monolingual texts -a performance that bilingual systems are unable to match; (b) such results can be obtained by training solely with artificial data; (c) CSW translation systems achieve a near deterministic ability to recopy in the output target words found in the input, suggesting that they are endowed with some language identification abilities. Using these models, we are also able to obtain competitive results on the SemEval 2014 Task 5: L2 Writing Assistant, which we see as one potential application area of CSW translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Building translation systems for code-switched data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Parallel corpora with natural CSW data are very scarce (Menacer et al., 2019) and, similar to Song et al. (2019a), we generate artificial CSW parallel sentences from regular translation data. We first compute word alignments between parallel sentences using fast align 1 (Dyer et al., 2013) . We then extract so-called minimal alignment units following the approach of Crego et al. (2005) : these correspond to small bilingual phrase pairs (e, f ) extracted from (symmetrized) word alignments such that all alignment links outgoing from words in e reach a word in f , and vice-versa.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 77, |
| "text": "(Menacer et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 271, |
| "end": 290, |
| "text": "(Dyer et al., 2013)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 369, |
| "end": 388, |
| "text": "Crego et al. (2005)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "For each pair of parallel sentence, we first randomly select the matrix language; 2 then the number of replacements r to appear in a derived CSW sentence with an exponential distribution as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "P (r = k) = 1 2 k+1 \u2200k = 1, . . . , rep (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where rep is a predefined maximum number of replacements. We also make sure that the number of replacements does not exceed half of either the original source or target sentences length, adjusting the actual number of replacements as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "n = min( S 2 , T 2 , r)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where S and T are respectively the length of the source and target sentences. We finally choose uniformly at random r alignment units and replace these fragments in the matrix language by their counterpart in the embedded language. Figure 1 displays examples of generated CSW sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 240, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Code-switched data generation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We use WMT data for CSW data generation and for training MT systems. We discard sentences which do not possess the correct language by using the fasttext LID model 3 (Bojanowski et al., 2017) . We use Moses tools (Koehn et al., 2007) to normalize punctuations, remove non-printing characters and discard sentence pairs with a source / target ratio higher than 1.5, with a maximum sentence length of 250. We tokenize all WMT data using Moses tokenizer. 4 Our procedure for artificial CSW data generation uses WMT13 En-Es parallel data with 14.5M sentences. For En-Fr, we use all WMT14 parallel data, for a grand total of 33.9M sentences. Our development sets are respectively newstest2011 and newstest2012 for En-Es, and newstest2012 and newstest2013 as development sets for En-Fr; the corresponding test sets are newstest2013 (En-Es) and newstest2014 (En-Fr).", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 191, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 213, |
| "end": 233, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 452, |
| "end": 453, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data preparation", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "We use the fairseq 5 (Ott et al., 2019) implementation of Transformer base (Vaswani et al., 2017) for our models with a hidden size of 512 and a feedforward size of 2048. We optimize with Adam, set up with an initial learning rate of 0.0007 and an inverse square root weight decay schedule, as well as 4000 warmup steps. All models were trained with mixed precision and a batch size of 8192 tokens for 300k iterations on 4 V100 GPUs. For each language pair, we use a shared source-target inventory built with Byte Pair Encoding (BPE) of 32K merge operations, using the implementation published by Sennrich et al. (2016) . 6 Note that we do not share the embedding matrices. Our experiments with sharing the decoder's input and output embeddings or sharing all encoder+decoder embeddings did not yield further gains. We compare three settings for Code-Switch models:", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 39, |
| "text": "(Ott et al., 2019)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 75, |
| "end": 97, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 597, |
| "end": 619, |
| "text": "Sennrich et al. (2016)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 622, |
| "end": 623, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation systems", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "\u2022 the base-csw setting, where we train two separate systems, one translating CSW into English, and the other translating CSW into Spanish or French.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation systems", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "\u2022 the multi-csw setting, where we train one model able to generate either pure matrix or embedded language in the output. To this", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation systems", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "In Oregon , planners are experimenting with giving drivers different choices . r = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "Dans Oregon , planners are experimenting with giving drivers different choices . r = 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "Dans Oregon , les planificateurs are experimenting with giving drivers different choices . r = 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "Dans Oregon , les planificateurs are experimenting en offrant aux drivers different choices . Embedded Dans l'Or\u00e9gon, les planificateurs tentent l'exp\u00e9rience en offrant aux automobilistes diff\u00e9rents choix. Figure 1 : Examples of generated CSW sentences when taking English as the matrix language and varying the number r of replacements of embedded French segments (in boldface).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 206, |
| "end": 214, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "end, similar to a multilingual NMT model (Johnson et al., 2017) , we add a tag at the beginning of each CSW sentence to specify the desired target language. Taking En-Fr as an example, we add a <EN> tag for CSW-En and a <FR> tag for CSW-Fr. We use the combination of CSW-En and CSW-Fr data for training, which implies that each source side (CSW sentence) is duplicated in the training data, once for each possible output.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 63, |
| "text": "(Johnson et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 the joint-csw setting, which extends multi-csw by using one encoder and two separate decoders and training the two output languages simultaneously with a combined loss function: for each training (CSW) instance, the loss function sums the two prediction terms for the embedded and the matrix language. The training data remains the same.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that all our Code-Switch systems also have the ability to translate monolingual source data, in either direction. For comparison purposes, we also use our parallel data to train two baselines: (a) regular NMT systems for the considered language pairs (base), similar to base-csw; (b) bilingual NMT systems, capable of translating from and into both two languages (bilingual). The selection of the desired target language relies on the same tagging mechanism as multi-csw, which means that both types of models see exactly the same examples. All resulting baseline Transformer models have the exact same hyperparameters and use the same training scheme as Code-Switch. Performance is computed with SacreBLEU (Post, 2018) and METEOR (Denkowski and Lavie, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 712, |
| "end": 724, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 736, |
| "end": 763, |
| "text": "(Denkowski and Lavie, 2014)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "We run tests using artificial CSW datasets, as mentioned in Section 2.2, as well as on the original test sets, in order to evaluate our models' ability to translate both CSW and monolingual sentences. Results are in Table 1 where we also separately report scores for the 'Matrix' and 'Embedded' part of the test sets. As is obvious on the copy line, the 'Embedded' part contains mostly source language, and corresponds to an actual translation task whereas the 'Matrix' part mostly contains target words on the source side, and is much easier to translate.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 216, |
| "end": 223, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Machine translation experiments 3.1 Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "On the left part of this table, we see that the baseline systems, either with two (base) or one single (bilingual) model(s), do better on monolingual test sets than their counterparts trained on CSW data (respectively base-csw and multi-csw). For both language pairs, the observed differences are in the range of 1-1.5 BLEU points. Conversely, when translating CSW sentences, * -csw models perform significantly better than the corresponding baselines models, which have never seen CSW in the source.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine translation experiments 3.1 Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Moreover, we note the marked differences between BLEU scores obtained by these models when the matrix language for the CSW source is the target and when the embedded language is the target. In the former case, translation is near perfect; in the latter case they nonetheless use the little information available to improve over the monolingual scores (about 1-1.5 BLEU points), nearly matching the performance of the baseline systems. This is illustrated for Fr-En, for which joint-csw improved from 33.7 to 35.0; in the same condition, the bilingual system only improves by 0.1 point.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine translation experiments 3.1 Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Among the three Code-Switch models, multi-csw is the weakest, while the other two achieve comparable performance. Interestingly, with joint training (joint-csw), we can recover with one single system the performance of the two separate systems used in the base-csw condition. On the monolingual tests, this system also matches the performance of the multilingual baseline (bilingual), which makes it overall our best contender of the lot. Table 1 : Translating monolingual newstest data and artificial csw-newstest data for two language pairs where performance is measured via the BLEU (B) and METEOR (M) scores. We also report a trivial baseline that just recopies the source text. Small numbers contain BLEU scores computed separately when the target language is the embedded language (left) and the matrix language (right). For the monolingual tests (left part), these correspond to scores computed on the same sentences that are also included in the CSW tests.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 439, |
| "end": 446, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Machine translation experiments 3.1 Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to better study the effect of mixing languages, we modify the synthetic data generation method to keep one language as the matrix language, in which segments are incrementally replaced by translations of the embedded language. We relax the constraint on the maximum number of replacements and generate new test sets with an increasing number of replacements, ranging from 1 to 20, resulting in 20 7 versions of the CSW test sets (in each direction). In Figure 2 , we plot the BLEU scores of both source CSW sentences and their translations for En-Fr language pair, using each language as the matrix language, to visualize the impact of progressively introducing more target fragments into the source.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 462, |
| "end": 470, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Code-Switching effect", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "The same behavior is observed for both language pairs and directions: on average, inserting random target fragments boosts the translation performance, with a larger payoff for the first few target segments. There exists an important gap for the output BLEU scores when CSW source sentences with different matrix languages reach the same (input) BLEU scores. Even though we generate a large number of replacements, the basic grammar structure of the matrix language is still maintained. Therefore, taking the target language as matrix gives the model a pre-translated sentence structure that is much easier to reproduce.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code-Switching effect", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "A second question concerns the ability of the translation system to identify target fragments in the source and to copy them in the target, even though these fragments are indistinguishable from genuine source segments. We use labels computed Figure 2 : Evolution of the BLEU score of source CSW data and their target translation for En-Fr. (a) Direction CSW-En. The solid curve takes Fr as the matrix language, where we progressively inject more En segments; for the dash dot curve, En is the matrix language, with a growing number of Fr segments. (b) Direction CSW-Fr. Note that the target BLEU is always much higher than the source BLEU, with about a 20 points difference. The gap between the dash dot and solid curves is due to the basic sentence structure of the matrix language (see Section 3.2.1).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 243, |
| "end": 251, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "As dash dot curves represent insertion in the reference target sentence, the corresponding BLEU score is always higher than the solid curve and actually reaches 100 (in the absence of any embedded language).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "during the CSW generation procedure to sort out pre-translated (target) segments from actual source segments to be translated. For instance, when translating into French, only tokens with a label eng, denoting English, are expected to be translated. All other tokens correspond to French words are expected to be copied. As reported in Table 2 , our translation models are able to copy almost all pre-translated tokens for both language pairs and directions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Refining the analysis, we also study whether the relative order of target words changes, or is preserved, during the translation. Table 3 reports the percentage of exact and switched-order copies. We observe again large differences with respect to the position of the matrix language. When the matrix language is the target language, the model always preserves the observed token order since it indicates a correct sentence structure for the hypothesis. When translating into the embedded language, we observe a larger number of word order changes: in this case, inserted target segments may not appear in their correct order in the CSW sentence, an issue that the model tries to fix. An example of this is in Figure 3 , where we observe a swap between the input (\"diff\u00e9rent choix\") and output (\"choix diff\u00e9rent\") word orders.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 130, |
| "end": 137, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 710, |
| "end": 718, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Conversely, it is also interesting to look at the proportion of mixed language generated on the tar- Table 2 : Analyzing the recopy of tokens on csw-newstest2014 for En-Fr and csw-newstest2013 for En-Es. We report the number of (pre-translated) tokens that should be copied, and the corresponding ratios. get side. Recall that in our training, the source is mixed-language, while the target is always monolingual. We use an in-house token-level language identification (LID) model to identify the language of output tokens and to detect the CSW rate on the target side. As indicated in Table 2 , our models generate almost pure monolingual translations, with a very low rate of CSW text. CSW-translation models thus seem to perform some language identification, as they almost perfectly sort out target language tokens (which are almost always copied) from the source language tokens (which are always translated). A last issue concerns morphological errors: when inserting foreign words into a matrix source, one cannot expect to always also introduce the right inflection marks, some of which can only be determined once the target context is known. Another interesting phenomenon, that we do not simulate here, is when the embedded (target) lemma is adapted bears a morphological mark that only exist in the matrix language, which means that two linguistic systems are mixed within the same word, thereby posing more extreme difficulties for MT (Manandise and Gdaniec, 2011) .", |
| "cite_spans": [ |
| { |
| "start": 1448, |
| "end": 1477, |
| "text": "(Manandise and Gdaniec, 2011)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 108, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 586, |
| "end": 593, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "To illustrate the ability to correct grammar errors in input fragments, we manually noise a CSW sentence and display its translation in Figure 3 . Where the input just contains the lemma of the French word \"tenter\" (to try), the model inserts a modal \"doivent\" to fix the context. Another illustration is for the adjective \"diff\u00e9rent\" which is moved into post-nominal position, and for which an article (\"un\") is inserted. This indicates that the model not only copies what already exists but also tends to adjust translations whenever necessary.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 144, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implicit LID in translation", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "In this section, we evaluate CSW translation for the SemEval 2014 Task 5: L2 Writing Assistant (van Gompel et al., 2014) , which can be handled as an MT task from mixed data.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 120, |
| "text": "(van Gompel et al., 2014)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computing translations in context", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This task consists in translating L1 fragments in an L2 context, where the test set design is such that there is exactly one L1 insert in each utterance. We evaluated on two L1-L2 pairs: English-Spanish and French-English, and list below example test segments provided by the organizers for these pairs of languages (the insert and reference segments are in boldface):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Input (L1=English,L2=Spanish): \"Todo ello, in accordance con los principios que siempre-hemos apoyado.\" Output: \"Todo ello, de conformidad con los principios que siempre hemos apoy-ado.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Input (L1=French,L2=English): \"I rentre\u00e0 la maison because I am tired.\" Output: \"I return home because I am tired.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The official metric for the SemEval evaluation is a word-based accuracy of the translations of the L1 fragment, which means that the L2 context of each sentence is not taken into account in scoring. Since our systems are full-fledged NMT systems, their output may not contain the reference L2 prefix and suffix. Therefore, two options are explored to compute these scores. The first is to post-process the output HYP and align it with the L2 reference context in REF. This alignment allows us to only score the relevant fragment in HYP. We refer to this option as free-dec.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The second option is to ensure that the L2 context will be present in the output translation. To this end, we use the force decoding mode of fairseq, implementing the methods of Post and Vilar (2018) ; Hu et al. (2019) . We explored two different ways to express the L2 context as decoding constraints. The first turns every token in the L2 context as a separate constraint (token-cst). Continuing the previous example, \"I, because, I, am, tired.\" yield 5 constraints. The second uses the prefix and suffix of the L2 context as two multi-word constraints (presuf-cst). In this case, \"I\" and \"because I am tired.\" yield just 2 constraints. In both cases, constraints are required to be present in the prescribed order in the output.", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 199, |
| "text": "Post and Vilar (2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 202, |
| "end": 218, |
| "text": "Hu et al. (2019)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Scores are computed with the SemEval evaluation tool, 8 which enables a comparison with other submissions for this task. Results are in Table 4 and 5. For En-Es, our CSW translator outperforms the best system in the official evaluation (van Gompel et al., 2014) . Note that this model is not specifically designed nor tuned in any way for the SemEval task. For Fr-En, our system achieves better performance than the forth best participating system, with a clear gap with respect to the top results. In both cases, constraint decoding hurts performance: given that the automatic copy of target segments is already nearly perfect, introducing more constraints during", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 261, |
| "text": "(van Gompel et al., 2014)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 143, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In Oregon , planners are experimenting with giving drivers different choices. Fr Dans l'Or\u00e9gon, les planificateurs tentent l'exp\u00e9rience en offrant aux automobilistes diff\u00e9rents choix. CSW In l'Oregon , planners tentent l' exp\u00e9rience with giving automobilistes diff\u00e9rents choix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "En", |
| "sec_num": null |
| }, |
| { |
| "text": "Dans l'Or\u00e9gon , les planificateurs tentent l'exp\u00e9rience de donner aux automobilistes diff\u00e9rents choix. Noisy CSW In l' Oregon , planners tenter l'exp\u00e9rience with giving automobilist diff\u00e9rent choix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyp", |
| "sec_num": null |
| }, |
| { |
| "text": "Hyp Dans l'Or\u00e9gon , les planificateurs doivent tenter l'exp\u00e9rience de donner\u00e0 l' automobiliste un choix diff\u00e9rent. the search has here a clear detrimental effect for this task. To better study the performance gap between these language pairs, we additionally score the development and test data with BLEU and METEOR. Results in Table 6 show that for these metrics, we achieve performance that are in that same ballpark for the two language pairs, suggesting that the observed difference in the SemEval metric is likely due to a mismatch between references and system outputs. The official metric is a word accuracy which may exclude acceptable translations by exact token match.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 328, |
| "end": 335, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyp", |
| "sec_num": null |
| }, |
| { |
| "text": "Research in the area of NLP for CSW has mostly focused on CSW Language Modeling, especially for Automatic Speech Recognition (Pratapa et al., 2018; Garg et al., 2018; Gonen and Goldberg, Table 6 : Results of other metrics on SemEval data. METEOR scores for the Fr-En SemEval test are much worse than for En-Es. This is mostly due to the high \"fragmentation penalty\" computed by METEOR for English; the corresponding average F mean is about 0.99, showing that translations are mostly correct. Winata et al., 2019; Lee and Li, 2020) . Evaluation tasks, benchmarks have also been prepared for LID in user generated CSW content (Zubiaga et al., 2016; Molina et al., 2016) , Named Entity Recognition (Aguilar et al., 2018) , Part-of-Speech tagging (Ball and Garrette, 2018; Khanuja et al., 2020) and Sentiment Analysis (Patwa et al., 2020) . CSW was also found useful in foreign language teaching: Renduchintala et al. (2019a,b) showed that replacing words by their counterparts in foreign language helps to learn foreign language vocabulary.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 147, |
| "text": "(Pratapa et al., 2018;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 148, |
| "end": 166, |
| "text": "Garg et al., 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 167, |
| "end": 186, |
| "text": "Gonen and Goldberg,", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 492, |
| "end": 512, |
| "text": "Winata et al., 2019;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 513, |
| "end": 530, |
| "text": "Lee and Li, 2020)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 624, |
| "end": 646, |
| "text": "(Zubiaga et al., 2016;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 647, |
| "end": 667, |
| "text": "Molina et al., 2016)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 695, |
| "end": 717, |
| "text": "(Aguilar et al., 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 743, |
| "end": 768, |
| "text": "(Ball and Garrette, 2018;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 769, |
| "end": 790, |
| "text": "Khanuja et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 814, |
| "end": 834, |
| "text": "(Patwa et al., 2020)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 893, |
| "end": 923, |
| "text": "Renduchintala et al. (2019a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 187, |
| "end": 194, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Regarding MT, most past work has focused on using artificial CSW data to help conventional translation systems. Huang and Yates (2014) used CSW corpus to improve word alignment and statistical MT. Dinu et al. (2019) experienced replacing and concatenating source terminology constraints by the corresponding translation(s) to boost the accuracy of term translations. Song et al. (2019a) shared the same idea by replacing phrases with prespecified translation to perform \"soft\" constraint decoding. A different line of research is in (Bulte and Tezcan, 2019; Pham et al., 2020) , who explore ways to combine a source sentence with similar translations extracted from translation memories. Yang et al. (2020) also pretrained translation models by predicting original source segments from generated CSW sentences and claimed better results compared to other pre-training methods (Conneau and Lample, 2019; Song et al., 2019b) . Nevertheless, there barely exists work aimed at translating CSW sentences. Johnson et al. (2017) mentioned using a multilingual NMT system to translate CSW sentence to a third target language by showing only one example. To the best of our knowledge, only one parallel Arabic-English CSW corpus was specifically released for MT applications (Menacer et al., 2019) . This CSW data was extracted from the UN data with Arabic as the matrix language: while translations into English were readily available, the purely Arabic side of the corpus was obtained using Google Translate to fill the missing Arabic bits.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 134, |
| "text": "Huang and Yates (2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 197, |
| "end": 215, |
| "text": "Dinu et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 367, |
| "end": 386, |
| "text": "Song et al. (2019a)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 533, |
| "end": 557, |
| "text": "(Bulte and Tezcan, 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 558, |
| "end": 576, |
| "text": "Pham et al., 2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 688, |
| "end": 706, |
| "text": "Yang et al. (2020)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 876, |
| "end": 902, |
| "text": "(Conneau and Lample, 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 903, |
| "end": 922, |
| "text": "Song et al., 2019b)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 1000, |
| "end": 1021, |
| "text": "Johnson et al. (2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1266, |
| "end": 1288, |
| "text": "(Menacer et al., 2019)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this study, we present a data augmentation method to generate artificial CSW data. We have shown that artificial data generated could be used to train NMT systems to translate both monolingual and CSW sentences (in one or even two different languages). With joint training of the two languages, we were able to build systems that were as good as a baseline bilingual system on monolingual texts, and much better for CSW texts. Our system does not need any explicit language identification and almost perfectly sorts out source tokens from target tokens in a CSW utterance. Another interesting feature of our system is that it always output monolingual translations. We finally report state-of-the-art results for the SemEval L2 Writing Assistant task for Es-En, while the related results for Fr-En are still somewhat lagging behind the best scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and outlook", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the future, we would like to generate more realistic CSW data from monolingual sentences using a translation model. We also plan to explore ways to translate CSW texts simultaneously into both languages, so that the two decoding processes can mutually influence one another: in a first step in that direction, we have shown that training with a joint loss was actually beneficial for the translation into the two languages. Another line of research would be to continue experimenting with realistic language data, also containing other phenomena such as morphological binding. Finally, we also intend to study the somewhat more realistic condition where a mixture of languages A and B is translated into language C; we believe that the artificial CSW generation methods developed in our work would also be effective for this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and outlook", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This work was granted access to the HPC resources of IDRIS under the allocation 2021-[AD011011580R1] made by GENCI. The authors wish to thank Josep Crego for his comments of an earlier version of this work. We also would like to thank the anonymous reviewers for their valuable suggestions. The first author is partly funded by Systran and by a grant from R\u00e9gion Ile-de-France.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://github.com/clab/fast_align 2 Note that we abuse here the terms \"matrix\" and \"embedded\" language, as we do not attempt to generate linguistically realistic CSW data matching the constraints of the MLF theory. We use these terms in a much looser sense where the sentence in the \"matrix\" language is the one that receives arbitrary insertions from the \"embedded\" language. This means that our artificial CSW sentences will contain insertions of unconstrained fragments containing both content and function words, which the theory would generally consider ungrammatical.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://dl.fbaipublicfiles.com/ fasttext/supervised-models/lid.176.bin 4 https://github.com/moses-smt/ mosesdecoder 5 https://github.com/pytorch/fairseq 6 https://github.com/rsennrich/ subword-nmt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For sentences that could not accommodate 20 replacements, we performed as many replacements as possible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/proycon/ semeval2014task5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Named entity recognition on code-switched data: Overview of the CALCS 2018 shared task", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahad", |
| "middle": [], |
| "last": "Alghamdi", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Soto", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hirschberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Workshop on Computational Approaches to Linguistic Code-Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "138--147", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3219" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar, Fahad AlGhamdi, Victor Soto, Mona Diab, Julia Hirschberg, and Thamar Solorio. 2018. Named entity recognition on code-switched data: Overview of the CALCS 2018 shared task. In Proceedings of the Third Workshop on Compu- tational Approaches to Linguistic Code-Switching, pages 138-147, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "LinCE: A Centralized Benchmark for Linguistic Code-switching Evaluation", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sudipta", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1803--1813", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar, Sudipta Kar, and Thamar Solorio. 2020. LinCE: A Centralized Benchmark for Lin- guistic Code-switching Evaluation. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 1803-1813, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "From English to code-switching: Transfer learning with strong morphological clues", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8033--8044", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.716" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar and Thamar Solorio. 2020. From English to code-switching: Transfer learning with strong morphological clues. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 8033-8044, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "I am borrowing ya mixing ?\" an analysis of English-Hindi code mixing in Facebook", |
| "authors": [ |
| { |
| "first": "Kalika", |
| "middle": [], |
| "last": "Bali", |
| "suffix": "" |
| }, |
| { |
| "first": "Jatin", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Yogarshi", |
| "middle": [], |
| "last": "Vyas", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the First Workshop on Computational Approaches to Code Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "116--126", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/W14-3914" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kalika Bali, Jatin Sharma, Monojit Choudhury, and Yo- garshi Vyas. 2014. \"I am borrowing ya mixing ?\" an analysis of English-Hindi code mixing in Facebook. In Proceedings of the First Workshop on Computa- tional Approaches to Code Switching, pages 116- 126, Doha, Qatar. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Part-of-speech tagging for code-switched, transliterated texts without explicit language identification", |
| "authors": [ |
| { |
| "first": "Kelsey", |
| "middle": [], |
| "last": "Ball", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3084--3089", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1347" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelsey Ball and Dan Garrette. 2018. Part-of-speech tagging for code-switched, transliterated texts with- out explicit language identification. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3084-3089, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Code switching and x-bar theory: The functional head constraint", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hedi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Belazi", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Edward", |
| "suffix": "" |
| }, |
| { |
| "first": "Almeida Jacqueline", |
| "middle": [], |
| "last": "Rubin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Toribio", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Linguistic inquiry", |
| "volume": "", |
| "issue": "", |
| "pages": "221--237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hedi M Belazi, Edward J Rubin, and Almeida Jacque- line Toribio. 1994. Code switching and x-bar theory: The functional head constraint. Linguistic inquiry, pages 221-237.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00051" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Neural fuzzy repair: Integrating fuzzy matches into neural machine translation", |
| "authors": [ |
| { |
| "first": "Bram", |
| "middle": [], |
| "last": "Bulte", |
| "suffix": "" |
| }, |
| { |
| "first": "Arda", |
| "middle": [], |
| "last": "Tezcan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1800--1809", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1175" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bram Bulte and Arda Tezcan. 2019. Neural fuzzy re- pair: Integrating fuzzy matches into neural machine translation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 1800-1809, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "7059--7069", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. In Advances in Neural Information Processing Systems, volume 32, pages 7059-7069. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Reordered search, and tuple unfolding for Ngram-based SMT", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Josep", |
| "suffix": "" |
| }, |
| { |
| "first": "Jos\u00e9", |
| "middle": [ |
| "B" |
| ], |
| "last": "Crego", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mari\u00f1o", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the MT Summit X", |
| "volume": "", |
| "issue": "", |
| "pages": "283--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josep M. Crego, Jos\u00e9 B. Mari\u00f1o, and Adri\u00e0 De Gis- pert. 2005. Reordered search, and tuple unfolding for Ngram-based SMT. In In Proceedings of the MT Summit X, pages 283-289.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Meteor universal: Language specific translation evaluation for any target language", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "376--380", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/W14-3348" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Denkowski and Alon Lavie. 2014. Meteor uni- versal: Language specific translation evaluation for any target language. In Proceedings of the Ninth Workshop on Statistical Machine Translation, pages 376-380, Baltimore, Maryland, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Training neural machine translation to apply terminology constraints", |
| "authors": [ |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Prashant", |
| "middle": [], |
| "last": "Mathur", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Al-Onaizan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3063--3068", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1294" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgiana Dinu, Prashant Mathur, Marcello Federico, and Yaser Al-Onaizan. 2019. Training neural ma- chine translation to apply terminology constraints. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3063-3068, Florence, Italy. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A simple, fast, and effective reparameterization of IBM model 2", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Chahuneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "644--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A simple, fast, and effective reparameter- ization of IBM model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 644-648, At- lanta, Georgia. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Multi-way, multilingual neural machine translation with a shared attention mechanism", |
| "authors": [ |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "866--875", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orhan Firat, Kyunghyun Cho, and Yoshua Bengio. 2016. Multi-way, multilingual neural machine trans- lation with a shared attention mechanism. In Pro- ceedings of the 2016 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 866-875. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Code-switched language models using dual RNNs and same-source pretraining", |
| "authors": [ |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Garg", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanmay", |
| "middle": [], |
| "last": "Parekh", |
| "suffix": "" |
| }, |
| { |
| "first": "Preethi", |
| "middle": [], |
| "last": "Jyothi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3078--3083", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1346" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saurabh Garg, Tanmay Parekh, and Preethi Jyothi. 2018. Code-switched language models using dual RNNs and same-source pretraining. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3078-3083, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Language modeling for code-switching: Evaluation, integration of monolingual data, and discriminative training", |
| "authors": [ |
| { |
| "first": "Hila", |
| "middle": [], |
| "last": "Gonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4175--4185", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1427" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hila Gonen and Yoav Goldberg. 2019. Language mod- eling for code-switching: Evaluation, integration of monolingual data, and discriminative training. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 4175- 4185, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Improved lexically constrained decoding for translation and monolingual rewriting", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "Edward" |
| ], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Culkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Tongfei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "839--850", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1090" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Edward Hu, Huda Khayrallah, Ryan Culkin, Patrick Xia, Tongfei Chen, Matt Post, and Benjamin Van Durme. 2019. Improved lexically constrained decoding for translation and monolingual rewriting. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 839-850, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Improving word alignment using linguistic code switching data", |
| "authors": [ |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Yates", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/E14-1001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fei Huang and Alexander Yates. 2014. Improving word alignment using linguistic code switching data. In Proceedings of the 14th Conference of the Euro- pean Chapter of the Association for Computational Linguistics, pages 1-9, Gothenburg, Sweden. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
| "authors": [ |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Thorat", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [], |
| "last": "Vi\u00e9gas", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Macduff", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "339--351", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "GLUECoS: An evaluation benchmark for code-switched NLP", |
| "authors": [ |
| { |
| "first": "Simran", |
| "middle": [], |
| "last": "Khanuja", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandipan", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3575--3585", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.329" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simran Khanuja, Sandipan Dandapat, Anirudh Srini- vasan, Sunayana Sitaram, and Monojit Choudhury. 2020. GLUECoS: An evaluation benchmark for code-switched NLP. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 3575-3585, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Moses: Open source toolkit for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "Brooke", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wade", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Constantin", |
| "suffix": "" |
| }, |
| { |
| "first": "Evan", |
| "middle": [], |
| "last": "Herbst", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the As- sociation for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Ses- sions, pages 177-180, Prague, Czech Republic. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Modeling codeswitch languages using bilingual parallel corpus", |
| "authors": [ |
| { |
| "first": "Grandee", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "860--870", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.80" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grandee Lee and Haizhou Li. 2020. Modeling code- switch languages using bilingual parallel corpus. In Proceedings of the 58th Annual Meeting of the As- sociation for Computational Linguistics, pages 860- 870, Online. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Morphology to the rescue redux: Resolving borrowings and code-mixing in machine translation", |
| "authors": [ |
| { |
| "first": "Esm\u00e9", |
| "middle": [], |
| "last": "Manandise", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Gdaniec", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Systems and Frameworks for Computational Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "86--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Esm\u00e9 Manandise and Claudia Gdaniec. 2011. Mor- phology to the rescue redux: Resolving borrow- ings and code-mixing in machine translation. In Systems and Frameworks for Computational Mor- phology, pages 86-97, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Machine Translation on a parallel Code-Switched Corpus", |
| "authors": [ |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Menacer", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Langlois", |
| "suffix": "" |
| }, |
| { |
| "first": "Denis", |
| "middle": [], |
| "last": "Jouvet", |
| "suffix": "" |
| }, |
| { |
| "first": "Dominique", |
| "middle": [], |
| "last": "Fohr", |
| "suffix": "" |
| }, |
| { |
| "first": "Odile", |
| "middle": [], |
| "last": "Mella", |
| "suffix": "" |
| }, |
| { |
| "first": "Kamel", |
| "middle": [], |
| "last": "Sma\u00efli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Canadian AI 2019 -32nd Conference on Canadian Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohamed Menacer, David Langlois, Denis Jouvet, Do- minique Fohr, Odile Mella, and Kamel Sma\u00efli. 2019. Machine Translation on a parallel Code-Switched Corpus. In Canadian AI 2019 -32nd Conference on Canadian Artificial Intelligence, Lecture Notes in Artificial Intelligence, Ontario, Canada.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Overview for the second shared task on language identification in code-switched data", |
| "authors": [ |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Molina", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahad", |
| "middle": [], |
| "last": "Alghamdi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Ghoneim", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdelati", |
| "middle": [], |
| "last": "Hawwari", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Rey-Villamizar", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Second Workshop on Computational Approaches to Code Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "40--49", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-5805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giovanni Molina, Fahad AlGhamdi, Mahmoud Ghoneim, Abdelati Hawwari, Nicolas Rey- Villamizar, Mona Diab, and Thamar Solorio. 2016. Overview for the second shared task on language identification in code-switched data. In Proceedings of the Second Workshop on Computa- tional Approaches to Code Switching, pages 40-49, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Duelling languages: Grammatical structure in codeswitching", |
| "authors": [ |
| { |
| "first": "Carol", |
| "middle": [], |
| "last": "Myers-Scotton", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carol Myers-Scotton. 1997. Duelling languages: Grammatical structure in codeswitching. Oxford University Press.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-4009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "SemEval-2020 task 9: Overview of sentiment analysis of code-mixed tweets", |
| "authors": [ |
| { |
| "first": "Parth", |
| "middle": [], |
| "last": "Patwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sudipta", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Suraj", |
| "middle": [], |
| "last": "Pandey", |
| "suffix": "" |
| }, |
| { |
| "first": "Pykl", |
| "middle": [], |
| "last": "Srinivas", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanmoy", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "774--790", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parth Patwa, Gustavo Aguilar, Sudipta Kar, Suraj Pandey, Srinivas PYKL, Bj\u00f6rn Gamb\u00e4ck, Tanmoy Chakraborty, Thamar Solorio, and Amitava Das. 2020. SemEval-2020 task 9: Overview of senti- ment analysis of code-mixed tweets. In Proceed- ings of the Fourteenth Workshop on Semantic Eval- uation, pages 774-790, Barcelona (online). Interna- tional Committee for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Constraints on language mixing: Intrasentential code-switching and borrowing in Spanish/English. Language", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Carol", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pfaff", |
| "suffix": "" |
| } |
| ], |
| "year": 1979, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "291--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carol W Pfaff. 1979. Constraints on language mix- ing: Intrasentential code-switching and borrowing in Spanish/English. Language, pages 291-318.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Priming neural machine translation", |
| "authors": [ |
| { |
| "first": "Minh", |
| "middle": [ |
| "Quang" |
| ], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Jitao", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Josep", |
| "middle": [], |
| "last": "Crego", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Yvon", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "462--473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh Quang Pham, Jitao Xu, Josep Crego, Fran\u00e7ois Yvon, and Jean Senellart. 2020. Priming neural ma- chine translation. In Proceedings of the Fifth Confer- ence on Machine Translation, pages 462-473, On- line. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Syntactic structure and social function of code-switching", |
| "authors": [ |
| { |
| "first": "Shana", |
| "middle": [], |
| "last": "Poplack", |
| "suffix": "" |
| } |
| ], |
| "year": 1978, |
| "venue": "Centro de Estudios Puertorrique\u00f1os", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shana Poplack. 1978. Syntactic structure and social function of code-switching, volume 2. Centro de Estudios Puertorrique\u00f1os, City University of New York].", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Sometimes i'll start a sentence in spanish y termino en espa\u00d1ol: toward a typology of code-switching 1", |
| "authors": [ |
| { |
| "first": "Shana", |
| "middle": [], |
| "last": "Poplack", |
| "suffix": "" |
| } |
| ], |
| "year": 1980, |
| "venue": "Linguistics", |
| "volume": "18", |
| "issue": "", |
| "pages": "581--618", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/ling.1980.18.7-8.581" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shana Poplack. 1980. Sometimes i'll start a sentence in spanish y termino en espa\u00d1ol: toward a typology of code-switching 1. Linguistics, 18:581-618.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Fast lexically constrained decoding with dynamic beam allocation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1314--1324", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1119" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post and David Vilar. 2018. Fast lexically con- strained decoding with dynamic beam allocation for neural machine translation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers), pages 1314-1324, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Language modeling for code-mixing: The role of linguistic theory based synthetic data", |
| "authors": [ |
| { |
| "first": "Adithya", |
| "middle": [], |
| "last": "Pratapa", |
| "suffix": "" |
| }, |
| { |
| "first": "Gayatri", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandipan", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalika", |
| "middle": [], |
| "last": "Bali", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1543--1553", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1143" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adithya Pratapa, Gayatri Bhat, Monojit Choudhury, Sunayana Sitaram, Sandipan Dandapat, and Kalika Bali. 2018. Language modeling for code-mixing: The role of linguistic theory based synthetic data. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1543-1553, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Simple construction of mixed-language texts for vocabulary learning", |
| "authors": [ |
| { |
| "first": "Adithya", |
| "middle": [], |
| "last": "Renduchintala", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "369--379", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4439" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adithya Renduchintala, Philipp Koehn, and Jason Eis- ner. 2019a. Simple construction of mixed-language texts for vocabulary learning. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 369-379, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Spelling-aware construction of macaronic texts for teaching foreign-language vocabulary", |
| "authors": [ |
| { |
| "first": "Adithya", |
| "middle": [], |
| "last": "Renduchintala", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6438--6443", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1679" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adithya Renduchintala, Philipp Koehn, and Jason Eis- ner. 2019b. Spelling-aware construction of maca- ronic texts for teaching foreign-language vocabulary. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 6438- 6443, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Estimating code-switching on Twitter with a novel generalized word-level language detection technique", |
| "authors": [ |
| { |
| "first": "Shruti", |
| "middle": [], |
| "last": "Rijhwani", |
| "suffix": "" |
| }, |
| { |
| "first": "Royal", |
| "middle": [], |
| "last": "Sequiera", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalika", |
| "middle": [], |
| "last": "Bali", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra Shekhar", |
| "middle": [], |
| "last": "Maddila", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1971--1982", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1180" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shruti Rijhwani, Royal Sequiera, Monojit Choud- hury, Kalika Bali, and Chandra Shekhar Maddila. 2017. Estimating code-switching on Twitter with a novel generalized word-level language detection technique. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1971-1982, Van- couver, Canada. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Multilingual code-switching identification via LSTM recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Younes", |
| "middle": [], |
| "last": "Samih", |
| "suffix": "" |
| }, |
| { |
| "first": "Suraj", |
| "middle": [], |
| "last": "Maharjan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammed", |
| "middle": [], |
| "last": "Attia", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Second Workshop on Computational Approaches to Code Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "50--59", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-5806" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Younes Samih, Suraj Maharjan, Mohammed Attia, Laura Kallmeyer, and Thamar Solorio. 2016. Multi- lingual code-switching identification via LSTM re- current neural networks. In Proceedings of the Second Workshop on Computational Approaches to Code Switching, pages 50-59, Austin, Texas. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A survey of code-switched speech and language processing", |
| "authors": [ |
| { |
| "first": "Sunayana", |
| "middle": [], |
| "last": "Sitaram", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khyathi Raghavi Chandu", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Rallabandi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunayana Sitaram, Khyathi Raghavi Chandu, Sai Kr- ishna Rallabandi, and Alan W. Black. 2019. A sur- vey of code-switched speech and language process- ing. CoRR, abs/1904.00784.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Code-switching for enhancing NMT with pre-specified translation", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weihua", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "449--459", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1044" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Song, Yue Zhang, Heng Yu, Weihua Luo, Kun Wang, and Min Zhang. 2019a. Code-switching for enhancing NMT with pre-specified translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 449-459, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Mass: Masked sequence to sequence pre-training for language generation", |
| "authors": [ |
| { |
| "first": "Kaitao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ICML, volume 97 of Proceedings of Machine Learning Research", |
| "volume": "", |
| "issue": "", |
| "pages": "5926--5936", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie- Yan Liu. 2019b. Mass: Masked sequence to se- quence pre-training for language generation. In ICML, volume 97 of Proceedings of Machine Learn- ing Research, pages 5926-5936. PMLR.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Antal van den Bosch, Els Lefever, and V\u00e9ronique Hoste", |
| "authors": [ |
| { |
| "first": "Iris", |
| "middle": [], |
| "last": "Maarten Van Gompel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hendrickx", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "36--44", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/S14-2005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten van Gompel, Iris Hendrickx, Antal van den Bosch, Els Lefever, and V\u00e9ronique Hoste. 2014. Se- mEval 2014 task 5 -L2 writing assistant. In Pro- ceedings of the 8th International Workshop on Se- mantic Evaluation (SemEval 2014), pages 36-44, Dublin, Ireland. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Code-switched language models using neural based synthetic data from parallel sentences", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "271--280", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1026" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2019. Code-switched lan- guage models using neural based synthetic data from parallel sentences. In Proceedings of the 23rd Con- ference on Computational Natural Language Learn- ing (CoNLL), pages 271-280, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Boosting neural machine translation with similar translations", |
| "authors": [ |
| { |
| "first": "Jitao", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Josep", |
| "middle": [], |
| "last": "Crego", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1580--1590", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jitao Xu, Josep Crego, and Jean Senellart. 2020. Boost- ing neural machine translation with similar trans- lations. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 1580-1590, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "CSP: Code-switching pre-training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bojie", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ambyera", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Shen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Ju", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2624--2636", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Yang, Bojie Hu, Ambyera Han, Shen Huang, and Qi Ju. 2020. CSP: Code-switching pre-training for neural machine translation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2624-2636, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "TweetLID: a benchmark for tweet language identification. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Pablo", |
| "middle": [], |
| "last": "I\u00f1aki San Vicente", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gamallo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "50", |
| "issue": "", |
| "pages": "729--766", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arkaitz Zubiaga, I\u00f1aki San Vicente, Pablo Gamallo, Jos\u00e9 Ramom Pichel, I\u00f1aki Alegria, Nora Aranberri, Aitzol Ezeiza, and V\u00edctor Fresno. 2016. TweetLID: a benchmark for tweet language identification. Lan- guage Resources and Evaluation, 50(4):729-766.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "A noisy Code-Switched sentence with French as both the matrix and target language.", |
| "type_str": "figure" |
| }, |
| "TABREF3": { |
| "text": "", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>: Percentage of sentences for which all target</td></tr><tr><td>words have been exactly copied without and with or-</td></tr><tr><td>der changes, for csw-newstest2014 (En-Fr) and</td></tr><tr><td>csw-newstest2013 (En-Es). We separately re-</td></tr><tr><td>port numbers for the case where the foreign language</td></tr><tr><td>(French or Spanish) is the embedded (Mat En) or ma-</td></tr><tr><td>trix (Mat For) language.</td></tr></table>" |
| }, |
| "TABREF5": { |
| "text": "Results of SemEval 2014 Task 5 for En-Es.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"3\">Accuracy Word Accuracy Recall</td></tr><tr><td>UEdin-run1</td><td>0.733</td><td>0.824</td><td>1.0</td></tr><tr><td>UEdin-run2</td><td>0.731</td><td>0.821</td><td>1.0</td></tr><tr><td>UEdin-run3</td><td>0.723</td><td>0.816</td><td>1.0</td></tr><tr><td>CNRC-run1</td><td>0.556</td><td>0.694</td><td>1.0</td></tr><tr><td>multi-csw</td><td/><td/><td/></tr><tr><td>free-dec</td><td>0.554</td><td>0.685</td><td>0.996</td></tr><tr><td>token-cst</td><td>0.531</td><td>0.665</td><td>0.990</td></tr><tr><td>presuf-cst</td><td>0.519</td><td>0.658</td><td>0.982</td></tr><tr><td>joint-csw</td><td/><td/><td/></tr><tr><td>free-dec</td><td>0.626</td><td>0.744</td><td>0.994</td></tr></table>" |
| }, |
| "TABREF6": { |
| "text": "Results of SemEval 2014 Task 5 for Fr-En.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |