| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:13:49.951205Z" |
| }, |
| "title": "Language Models are Few-shot Multilingual Learners", |
| "authors": [ |
| { |
| "first": "Genta", |
| "middle": [], |
| "last": "Indra Winata", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Hong Kong University of Science and Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Hong Kong University of Science and Technology", |
| "location": {} |
| }, |
| "email": "amadotto@connect.ust.hk" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Hong Kong University of Science and Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rosanne", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Yosinski", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Hong Kong University of Science and Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Google", |
| "middle": [], |
| "last": "Brain", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "General-purpose language models have demonstrated impressive capabilities, performing on par with state-of-the-art approaches on a range of downstream natural language processing (NLP) tasks and benchmarks when inferring instructions from very few examples. Here, we evaluate the multilingual skills of the GPT and T5 models in conducting multi-class classification on non-English languages without any parameter updates. We show that, given a few English examples as context, pre-trained language models can predict not only English test samples but also non-English ones. Finally, we find the in-context few-shot cross-lingual prediction results of language models are significantly better than random prediction, and they are competitive compared to the existing state-of-the-art cross-lingual models and translation models.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "General-purpose language models have demonstrated impressive capabilities, performing on par with state-of-the-art approaches on a range of downstream natural language processing (NLP) tasks and benchmarks when inferring instructions from very few examples. Here, we evaluate the multilingual skills of the GPT and T5 models in conducting multi-class classification on non-English languages without any parameter updates. We show that, given a few English examples as context, pre-trained language models can predict not only English test samples but also non-English ones. Finally, we find the in-context few-shot cross-lingual prediction results of language models are significantly better than random prediction, and they are competitive compared to the existing state-of-the-art cross-lingual models and translation models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The progress in language model (LM) pretraining (Peters et al., 2018; Devlin et al., 2019; Radford et al., 2019; Yang et al., 2019; Liu et al., 2019a; Brown et al., 2020; Lewis et al., 2020; Raffel et al., 2020; Gao et al., 2020a) has led to the possibility of conducting few-shot learning, that is, learning a new task using a small number of examples without any further training or gradient computation. Few-shot learning alleviates the cost for extensive labeled data, which is beneficial since collecting high-quality labeled data is resource-intensive and expensive. It also reduces the cost for model fine-tuning, which requires tremendous GPU or TPU resources. Fewshot learning can be seen as a one-for-all plugand-play computational model that can be applied to various natural language tasks, from sentiment analysis for text classification to story generation, provided only a small context (Brown et al., 2020) . Figure 1 : Accuracy vs. model size on English-Spanish MNLU dataset. Cross-lingual in-context learning with LMs (i.e., context with few English examples tested on Spanish sentences) performs as well as models trained in cross-lingual setting and translation baselines.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 69, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 70, |
| "end": 90, |
| "text": "Devlin et al., 2019;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 91, |
| "end": 112, |
| "text": "Radford et al., 2019;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 113, |
| "end": 131, |
| "text": "Yang et al., 2019;", |
| "ref_id": "BIBREF68" |
| }, |
| { |
| "start": 132, |
| "end": 150, |
| "text": "Liu et al., 2019a;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 151, |
| "end": 170, |
| "text": "Brown et al., 2020;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 171, |
| "end": 190, |
| "text": "Lewis et al., 2020;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 191, |
| "end": 211, |
| "text": "Raffel et al., 2020;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 212, |
| "end": 230, |
| "text": "Gao et al., 2020a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 902, |
| "end": 922, |
| "text": "(Brown et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 925, |
| "end": 933, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The idea of few-shot learning is also relevant to address the low-resource issue in non-English languages. Few-shot learning has been applied to NLP tasks (Brown et al., 2020; Lu et al., 2021; Perez et al., 2021; Liu et al., 2021a,b; Cahyawijaya et al., 2021a) . Common approaches to solve the low-resource issue are to pretrain models with self-supervised learning using unlabelled monolingual text data collected from various resources available online (Wilie et al., 2020; Le et al., 2020; Eddine et al., 2020; Nguyen and Nguyen, 2020; Scheible et al., 2020; Bhattacharjee et al., 2021; Cahyawijaya et al., 2021b; Park et al., 2021) and then apply pre-training on the source language and fine-tune on the target languages (Schuster et al., 2019; Lin et al., 2019; Winata et al., 2019 Pfeiffer et al., 2020; Zheng et al., 2021; Lin et al., 2021b) . Conversely, the few-shot learning does not need any training from the source and target languages. Figure 1 shows how it is possible to utilize pre-trained models on non-English languages, such as Spanish, as the performance is not random, Figure 2 : Example of the inference and query generation on the few-shot learning, where the source language and target language are German and English, respectively. and the performance increases as the models are given more samples. We conjecture that pre-trained models may be able to adapt to languages that are similar to English. However, for many language tasks, it is difficult to collect a large supervised training dataset as language experts (e.g., linguists or native speakers) are required to annotate the data.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 175, |
| "text": "(Brown et al., 2020;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 176, |
| "end": 192, |
| "text": "Lu et al., 2021;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 193, |
| "end": 212, |
| "text": "Perez et al., 2021;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 213, |
| "end": 233, |
| "text": "Liu et al., 2021a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 234, |
| "end": 260, |
| "text": "Cahyawijaya et al., 2021a)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 455, |
| "end": 475, |
| "text": "(Wilie et al., 2020;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 476, |
| "end": 492, |
| "text": "Le et al., 2020;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 493, |
| "end": 513, |
| "text": "Eddine et al., 2020;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 514, |
| "end": 538, |
| "text": "Nguyen and Nguyen, 2020;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 539, |
| "end": 561, |
| "text": "Scheible et al., 2020;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 562, |
| "end": 589, |
| "text": "Bhattacharjee et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 590, |
| "end": 616, |
| "text": "Cahyawijaya et al., 2021b;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 617, |
| "end": 635, |
| "text": "Park et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 725, |
| "end": 748, |
| "text": "(Schuster et al., 2019;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 749, |
| "end": 766, |
| "text": "Lin et al., 2019;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 767, |
| "end": 786, |
| "text": "Winata et al., 2019", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 787, |
| "end": 809, |
| "text": "Pfeiffer et al., 2020;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 810, |
| "end": 829, |
| "text": "Zheng et al., 2021;", |
| "ref_id": null |
| }, |
| { |
| "start": 830, |
| "end": 848, |
| "text": "Lin et al., 2021b)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 950, |
| "end": 958, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1091, |
| "end": 1099, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Another line of work is to apply cross-lingual transfer on English with the same task as the target languages (Ponti et al., 2018; Artetxe and Schwenk, 2019; Liu et al., 2019b; Lauscher et al., 2020; Liu et al., , 2021c . However, such methods still need to apply a fine-tuning step to update the model for fast adaptation, which can be challenging for large pre-trained modelssome models require substantial memory capacity -since the models have to be trained on highperforming machines. Different from the aforementioned method, in-context learning using an LM does not allow any parameter updates. Thus, the process does not need to compute and store the gradients for backward propagation.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 130, |
| "text": "(Ponti et al., 2018;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 131, |
| "end": 157, |
| "text": "Artetxe and Schwenk, 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 158, |
| "end": 176, |
| "text": "Liu et al., 2019b;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 177, |
| "end": 199, |
| "text": "Lauscher et al., 2020;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 200, |
| "end": 219, |
| "text": "Liu et al., , 2021c", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we investigate the practicality of applying few-shot learning in the multilingual setting for four languages, English, French, German, and Spanish, on natural language understanding intent prediction tasks using publicly available LMs that are mainly trained on English data. We show that, given a few English examples as context, pretrained LMs can predict not only English test sam-ples, but also non-English ones ( Figure 2 ). To the best of our knowledge, no existing works have studied these tasks in multilingual settings. We conjecture that the English LMs can still produce good results on languages that are closely related to English. We construct the inference for the multi-class prediction setup by extending the idea from of applying multiple binary predictions on each class. Instead of guiding the model to generate true or false like in their work, which is not consistent and sometimes generates other words -, we introduce maximum confidence prediction. This method considers the confidence of predicting a certain label to provide a prediction. We design this as a multiple-choice task in which the confidence of the prediction for all possible classes is compared. Each class's confidence score is computed by normalizing the logits of generating the next boolean token given the prompt as the context. This method is considered to be more scalable than the simple k-way few-shot learning, where we need to put all data in a single prompt, since we only have a fixed maximum sequence length and, in the deployment, each forward step can be run in parallel to speed up the process. To increase the difficulty of the challenge, we also propose a cross-lingual task, where the context and query are in different languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 432, |
| "end": 440, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Overall, we find that conditional generative LMs, such as the GPT-2 (Radford et al., 2019) , GPT NEO models (Gao et al., 2020a) , and T5 models (Raffel et al., 2020) have the capability to predict non-English languages, and adding more shots and using larger models achieves a substantial increment in performance, making it significantly better than random, which indicates the models are able to understand the prompt. We only focus on GPT and T5 models. T5 models do not perform as well as GPT models, which might be caused by the pre-training strategy. Experimental results in the cross-lingual setting demonstrate that pre-trained LMs make correct predictions. To summarize, our contributions are as follows:", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 90, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 108, |
| "end": 127, |
| "text": "(Gao et al., 2020a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 144, |
| "end": 165, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We study few-shot learning in the multilingual setting on four languages without any gradient updates. We use the publicly available GPT and T5 LMs, and compare the results to those from the zero-shot and fine-tuning approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a simple and straightforward approach to perform few-shot learning on multiclass classification by applying binary prediction and considering the confidence of predicting the boolean tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We display the zero-shot, one-shot, and manyshot proficiency of the LMs in the crosslingual setting when the language of the prompt is different from the target language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "First, we briefly define the notation of the input and output of the task, and then we introduce our method to design prompts for few-shot in-context learning. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Few-shot Multilingual Learners", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Let us define D as the distribution over the dataset and P as the prompt that we use as the input of the LM \u03b8. The prompt P = [D pos , D neg , Q] is a concatenation of few-shot samples: positive samples D pos , negative samples D neg , and the query Q, where D pos , D neg \u223c D. D pos is a sample with a label that is the same as the query, and D neg is a sample that is taken from the dataset D with a label other than the query. \u03b8 takes P as the input of the model, and the LM generates a word y. We define the task T s\u2192t , where s is the source language and t is the target language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notation and Tasks", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this paper, we focus on the intent detection task in the monolingual and cross-lingual settings. In the monolingual setting, the source language is the same as the target language, and in the crosslingual setting, we take the source language as different from the target language (s = t). We design our task as a multiple-choice problem, in which each sample has a label l \u2208 L, where L is the set of possible labels. We predict the boolean (true or false) for each sample and take the highest prediction confidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notation and Tasks", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We define the task by designing prompts to perform few-shot learning. We design our task as a binary classification for multi-class prediction by following . The idea is to guide the model to predict the boolean tokens, true and false. We examine the usage of two types of LMs, GPT and T5 models, and we construct prompts specific to each model. We use a specific way to probe the LMs to perform the few-shot prediction since they are trained with different learning objectives. Table 1 shows the format of the prefix we use for the GPT and T5 models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 479, |
| "end": 486, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prompt Generation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "X i is one of the Model Prompt GPT [SAMPLES] Q \u2192 T5 [SAMPLES] Q \u2192 [MASK] [SAMPLES] Format Example X 1 \u2192 true\\n zeige mir meine wecker=>get_alarm=true\\n X * 1 \u2192 false\\n entferne alle wecker=>get_alarm=false\\n \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 X k \u2192 true\\n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prompt Generation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "kann ich meine wecker sehen?=>get_alarm=true\\n X * k \u2192 false\\n keinen sound bitte=>get_alarm=false\\n few-shot samples, and X * i is the sample from other classes. For the GPT models, we only input the prefix by concatenating positive and negative samples with the query. Specifically for the T5 models, we add an additional token after the query and let the model predict that particular token during the generation step. Figure 2 shows an example of how we generate the prompt in k-shot settings. We create L prompts and apply L forward steps for each sample. For each prompt, k positive and negative samples are randomly drawn from the dataset. It is worthwhile to note that the sampling method is similar to k-way few-shot learning, but the samples are not merged into a single prompt. We do this because we want to give more shots as the prompt to the LMs as they have a limitation on the number of tokens they can accept as input (1,024 tokens in GPT-2 XL and 2,048 tokens in GPT NEO ). We add a special token \\n as a separator between each sample, as shown in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 422, |
| "end": 430, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1066, |
| "end": 1073, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prompt Generation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To get the final prediction of each sample, first, we compute the score of predicting the next boolean (true or false 2 ) given the prompt X i for label i: P \u03b8 (y = true|X i ) and P \u03b8 (y = false|X i ) from the prediction distribution. Then, we normalize the score to get the probability of generating the true token to measure how much confidence the LM has to predict label i. We collect all the confidence scores over all label options and choose the highest confidence score among them, as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Maximum Confidence Prediction", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "MC(X, L) = argmax i\u2208L P \u03b8 (y = true|X i ) b P \u03b8 (y = b|X i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Maximum Confidence Prediction", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": ", 1where b \u2208 {true, false}. We take the label with the highest confidence score as MC(X, L).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Maximum Confidence Prediction", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "For in-context learning, choosing the order of samples is essential (Lu et al., 2021) . Here, we examine the impact of the order of the samples. We construct the probing set in two ways: (1) shuffle the few-shot samples and measure the variance in performance after changing their order, and (2) arrange the positive samples before the negative samples. We find that the latter works well, specifically on the T5 models.", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 85, |
| "text": "(Lu et al., 2021)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choices of Samples", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In this work, we compare the few-shot learning performance with other common approaches: zeroshot, zero-shot cross-task, and fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3" |
| }, |
| { |
| "text": "One way to solve zero-shot prediction is by using entailment models to calculate the entailment score between sequences and labels. Given a pretrained LM \u03c8 with an entailment head, a set of hypotheses H, and possible labels L, the model accepts two inputs, the hypothesis h \u2208 H and label l \u2208 L, and generates the entailment score given any combinations of the hypothesis and label P \u03c8 (y = entail|h, l):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Zero-shot Cross-Task", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "ES(H, L) = argmax h,l\u2208{H,L} P \u03c8 (y = entail|h, l). (2) 3.2 Zero-shot In-Context Learning", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Zero-shot Cross-Task", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "This approach is very similar to our few-shot approach. It does not need any samples, and the model is only given natural language instruction. However, instead of using the prompt like in the few-shot setting, we can set up the prompt in a question-and-answer (Q&A) format as follows: Q: Is '<INTENT>' the intent of '<TEXT>'? A:. (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Zero-shot Cross-Task", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Fine-tuning is the most common approach to updating a pre-trained model's weights when training with a labeled dataset. The advantage of this approach is strong performance since we give supervised signals with the correct labels to the model. For fine-tuning, we use the same sets of few-shot samples as in the in-context learning. In Section 4.2, we provide the hyper-parameters used in the experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fine-tuning", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We use an English natural language understanding (NLU) dataset, SNIPS (Coucke et al., 2018) , and two multilingual NLU datasets, MTOP and Multilingual NLU (MultiNLU) (Schuster et al., 2019). MTOP includes four languages, English (en), French (fr), German (de), and Spanish (es), and Multilingual NLU includes two languages, English (en) and Spanish (es). We measure the model performance by calculating the average and standard deviation of the accuracy with three runs.", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 91, |
| "text": "(Coucke et al., 2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We set up the experiment in two settings: monolingual and cross-lingual. In the monolingual setting, we test the ability of the model to conduct few-shot in-context learning on four languages: English (en), French (fr), German (de), and Spanish (es). In the cross-lingual setting, we test its ability to predict a query from a non-English language with the English context (en\u2192XX). In the few-shot in-context learning, we use k-way-few-shot classification, taking k samples. For each model, we take k \u2208 [0, 5, K], where K \u2264 40 is the largest number of few-shot samples that can be passed to the model as input and is divisible by 10 without exceeding the maximum input token limit. We utilize an NVIDIA Tesla V100 16GB GPU to run the inference so that the model is ensured to fit in a single GPU, and we use 16-bit precision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Model details We run experiments on a variety of publicly available models: 3 four sizes of GPT-2 models (0.1B, 0.3B, 0.8B and 1.6B), three sizes of GPT NEO models (1.3B, 2.7B, and 6B), and two sizes of T5 models (0.8B and 3B). Table 3 shows the details of each pre-trained model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 228, |
| "end": 235, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use the same sets of few-shot samples for the baselines. We run fine-tuning on the pre-trained models mBERT (Devlin et al., 2019) and XLM-R (Conneau et al., 2020) , and also compare our models with the zero-shot cross-task models using pre-trained models XLM-R, fine-tuned on XNLI (Conneau et al., 2018) , and BART, fine-tuned on MNLI ; 4 a random baseline; and state-of-the-art results reported on each dataset. For the finetuning, we use a learning rate of 5e-5 with a decay of 0.9 for every epoch, and a batch size of 32. We apply an early stopping after 5 epochs without any improvement on the validation set. Fine-tuning (all-shot on source language, zero-shot on target language)", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 132, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 143, |
| "end": 165, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 284, |
| "end": 306, |
| "text": "(Conneau et al., 2018)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": null |
| }, |
| { |
| "text": "Seq2Seq w/ CRISS 36.10 48.60 46.60 -Seq2Seq w/ XLM-R 42.30 50.30 43.90 -NLM (Liu et al., 2021d) 54.91 59.99 58.16 -X2Parser (Liu et al., 2021d) 56 Tables 2 and 4 show the results in the monolingual and cross-lingual settings, respectively. The tables show that the performance improvement is highly related to the size of the pre-trained model, and the performance gap between the fully trained stateof-the-art model and the few-shot learning models decreases when we use larger models, indicating the usefulness of utilizing models of bigger sizes. The performance of the models with few-shot learning is considered promising as they are not trained at all, and the best model's performance gap with the fine-tuned model is less than 10%.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 95, |
| "text": "(Liu et al., 2021d)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 124, |
| "end": 143, |
| "text": "(Liu et al., 2021d)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 147, |
| "end": 161, |
| "text": "Tables 2 and 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Few-shot vs. Fine-tuning. Comparing the performance of generative models to fine-tuning, it is clear that we can achieve higher accuracy without any training. However, in this experiment, we acknowledge GPT and T5 models we use for incontext learning are larger than the models we finetune, and few-shot learning is much more efficient since the models are not required to store the intermediate memory. In terms of inference speed, the few-shot models require more time to run an inference step, which may cause a bottleneck when the number of few-shot samples is relatively large. This is the limitation of this method, and reducing the inference time is an open research area to improve the efficiency of in-context learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Zero-shot cross-task baselines. Surprisingly, the zero-shot cross-task models are able to predict the samples much better than the random baseline, particularly on English tasks. Overall, the XLM-R LARGE model performs better than the BART LARGE models in all tasks except SNIPS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "GPT vs. T5 models. In general, the GPT models outperform the T5 models in all language pairs and datasets in a head-to-head comparison: Both GPT-2 LARGE and T5 LARGE have a similar number of parameters (0.8B), but they have a significant performance difference. A similar pattern can also be observed on larger models, such as GPT NEO 2.7B and T5 3B 3B. Although the T5 models perform worse than the GPT models, they do not have a maximum token size for the input, as the GPT models do, which is one of the advantages of using them. On the other hand, we find that changing the sample order tremendously affects the performance of the T5 models. As shown in Tables 2 and 4 , the performance increases substantially when we sort the few-shot samples based on their label (i.e., first all positive and then all negative examples). Conversely, the GPT models suffer loss in performance. Thus, we can make the conclusion that changing the sample order may produce high variance in the results, as also shown in (Lu et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 1007, |
| "end": 1024, |
| "text": "(Lu et al., 2021)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 658, |
| "end": 672, |
| "text": "Tables 2 and 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Effectiveness on non-English languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on the results, the performance of the models is lower in the non-English languages than in English. These results are expected since the pre-trained models are mostly trained on English data. However, the differences in performance are marginal. This finding may indicate that our few-shot learning method can be effectively utilized for languages that are in the same language family as English, such as French, German, and Spanish, but this will require further investigation in the future.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Cross-lingual results. Based on the results in Table 4, we can see that the generative models are able to use the context from English to predict the sample in non-English languages. The crosslingual setting is considered harder than the monolingual one since the models need to contextualize and understand the source and target languages to predict the test samples correctly. In general, the trend of the results in the cross-lingual setting is similar to the monolingual setting. In the MTOP dataset, we find that the models generally achieve higher performance for en\u2192es than for the other two target languages (de and fr). In MultiNLU, our GPT NEO-J closes the gap with the existing stateof-the-art baseline with fine-tuning from underperforming it only by a close margin of around 4.2%, and the GPT NEO-J performance is only less than 3% worse than that of the Translate-Train model. These results show a promising new direction in the zero-shot cross-lingual research that can be applied to other datasets and language pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "To further understand how much data we need for the in-context learning, we conduct experiments with different numbers of few-shot samples, including zero-shot experiments on the MTOP and MultiNLU datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "MTOP dataset. Figures 3, 4 , 5, and 6 illustrate the results with different numbers of samples on the MTOP dataset in the monolingual setting. We show a different set of k-shot results for each model according to the maximum samples that can be used in the model as input. The results consistently improved as the number of shots increases. Interestingly, the QA style's zero-shot strategy can outperform random prediction only on two or three models in each language, and the others are worse. The fine-tuning results on MTOP are thus far worse than those of few-shot learning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 26, |
| "text": "Figures 3, 4", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "MultiNLU dataset. Figures 7 and 8 illustrate the results with different numbers of samples on the MultiNLU dataset in the monolingual setting. The results on MultiNLU for the models with finetuning are closer to those of few-shot learning than those on the MTOP dataset. The reason may be the number of labels that the MTOP dataset has compared to MultiNLU. As a result, the zero-shot performance on the GPT models is sometimes worse than that of the random baseline.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 33, |
| "text": "Figures 7 and 8", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "6 Related Work", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Recent work on few-shot in-context learning uses LMs to solve NLP tasks (Petroni et al., 2019; Brown et al., 2020; Gao et al., 2020b; Zhao et al., 2021; Schick and Sch\u00fctze, 2021; Lin et al., 2021a) . In this approach, we select the appropriate prompts to trigger the LMs to behave so that they can predict the desired output (Liu et al., 2021b) . However, the prompts have to be engineered to allow the LM to generate a text appropriate to solve the task. Learning to calibrate the few-shot results is also essential to reduce the model's performance variance (Zhao et al., 2021) , and the selection criteria in choosing the prompts are also important (Perez et al., 2021) . In another stream of work, ; Li and Liang (2021) proposed an automated method to create prompts for a diverse set of tasks by gradient-based tuning instead of manually searching for a good prompt. Using such a method, may allow us to find an optimal prompt easier, it is very difficult to discover the optimal prompts for complicated natural language processing tasks, such as semantic parsing (Liu et al., 2021b) . ", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 94, |
| "text": "(Petroni et al., 2019;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 95, |
| "end": 114, |
| "text": "Brown et al., 2020;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 115, |
| "end": 133, |
| "text": "Gao et al., 2020b;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 134, |
| "end": 152, |
| "text": "Zhao et al., 2021;", |
| "ref_id": "BIBREF69" |
| }, |
| { |
| "start": 153, |
| "end": 178, |
| "text": "Schick and Sch\u00fctze, 2021;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 179, |
| "end": 197, |
| "text": "Lin et al., 2021a)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 325, |
| "end": 344, |
| "text": "(Liu et al., 2021b)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 560, |
| "end": 579, |
| "text": "(Zhao et al., 2021)", |
| "ref_id": "BIBREF69" |
| }, |
| { |
| "start": 652, |
| "end": 672, |
| "text": "(Perez et al., 2021)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 704, |
| "end": 723, |
| "text": "Li and Liang (2021)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1069, |
| "end": 1088, |
| "text": "(Liu et al., 2021b)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Few-shot In-Context Learning", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Recent advances in pre-trained LMs have been focused on building pre-trained encoders, such as BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2019a) , ELMO (Peters et al., 2018) , ULM-FiT (Howard and Ruder, 2018) , ELECTRA (Clark et al., 2019) , XLM (Conneau and Lample, 2019) , and XLM-R (Conneau et al., 2020; Goyal et al., 2021) , decoder-only models, such as GPT models (Radford et al., 2019; Brown et al., 2020) and encoder-decoder models, such as T5 (Raffel et al., 2020) , BART (Lewis et al., 2020) , and their mul-tilingual versions, mT5 (Xue et al., 2021) and mBART . Pre-trained encoders have been used to improve the contextualized representations of multilingual systems in various NLP tasks, for example, dialogue systems (Liu et al., , 2021d , code-switching sequence labeling (Aguilar et al., 2020; Winata, 2021) , and multilingual speech recognition (Datta et al., 2020; . Meanwhile, the pre-trained encoder-decoder models, have been used for various sequence generation tasks, such as summarization (Raffel et al., 2020) , conversational agents (Lin et al., 2020b,a; Wu and Xiong, 2020; Hosseini-Asl et al., 2020; Lin et al., 2021b) , and knowledge grounding (Chen et al., 2020; Zhao et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 121, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 132, |
| "end": 151, |
| "text": "(Liu et al., 2019a)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 159, |
| "end": 180, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 191, |
| "end": 215, |
| "text": "(Howard and Ruder, 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 226, |
| "end": 246, |
| "text": "(Clark et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 253, |
| "end": 279, |
| "text": "(Conneau and Lample, 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 292, |
| "end": 314, |
| "text": "(Conneau et al., 2020;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 315, |
| "end": 334, |
| "text": "Goyal et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 377, |
| "end": 399, |
| "text": "(Radford et al., 2019;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 400, |
| "end": 419, |
| "text": "Brown et al., 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 459, |
| "end": 480, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 488, |
| "end": 508, |
| "text": "(Lewis et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 549, |
| "end": 567, |
| "text": "(Xue et al., 2021)", |
| "ref_id": null |
| }, |
| { |
| "start": 738, |
| "end": 758, |
| "text": "(Liu et al., , 2021d", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 794, |
| "end": 816, |
| "text": "(Aguilar et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 817, |
| "end": 830, |
| "text": "Winata, 2021)", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 869, |
| "end": 889, |
| "text": "(Datta et al., 2020;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1019, |
| "end": 1040, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1065, |
| "end": 1086, |
| "text": "(Lin et al., 2020b,a;", |
| "ref_id": null |
| }, |
| { |
| "start": 1087, |
| "end": 1106, |
| "text": "Wu and Xiong, 2020;", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 1107, |
| "end": 1133, |
| "text": "Hosseini-Asl et al., 2020;", |
| "ref_id": null |
| }, |
| { |
| "start": 1134, |
| "end": 1152, |
| "text": "Lin et al., 2021b)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1179, |
| "end": 1198, |
| "text": "(Chen et al., 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1199, |
| "end": 1217, |
| "text": "Zhao et al., 2020)", |
| "ref_id": "BIBREF70" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-trained Language Models", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "More Languages In this paper, we explored only cross-lingual transfer learning from and to Latinbased language (e.g., English to Spanish / French / German). Extending our approach to non-Latin languages (e.g., Thai, Chinese, etc.) is challenging for two reasons: 1) we are currently using English tokenizers which are known to fails, or they assign UNK tokens when prompt with non-Latin characters, and 2) a possible little, or absent, the named entity overlap between the source and target language, which could make the English prompt completely irrelevant. The latter suggests an interesting future work, where we could study the correlation between performance and word (or token) overlapping of the source (en) and the target language samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitation and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "More Datasets and Models Intent recognition is an important task, especially in multiple language scenarios. In future work, we plan to include the missing languages of MTOP and MultiNLU, and to add more languages from the MultiATIS++ (Xu et al., 2020) which consists of a total of 9 languages, that is, English, Spanish, German, French, Portuguese, Chinese, Japanese, Hindi, and Turkish. Moreover, to cope with the tokenization issues, we would like to explore multilingual LMs such as MT5 (Xue et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 235, |
| "end": 252, |
| "text": "(Xu et al., 2020)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 491, |
| "end": 509, |
| "text": "(Xue et al., 2021)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitation and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "This paper demonstrates the multilingual skills of pre-trained LMs, GPT and T5, in conducting incontext learning without parameter updates. This work is our initial attempt to show the effectiveness of in-context learning in the multilingual and crosslingual setting. It covers four different languages and explores the possibility of conducting efficient inference on low-resource tasks. We find that LMs can predict samples correctly, significantly better than the random prediction, in cross-lingual tasks with no training examples of the target languages. We would like to investigate further the applicability of this method to other tasks and languages in future work. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "The code is released at https://github.com/ gentaiscool/few-shot-lm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Notice that some tokenizers (e.g., T5) splits \"true\" in two sub-tokens. We compute the score of the first sub-token only since it is significantly different for the two label (i.e. \"tr\" and \"fal\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The models except GPTNEO-J are taken from https://huggingface.co/. The GPTNEO-J model is taken from https://github.com/kingoflolz/ mesh-transformer-jax/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The XLM-R model fine-tuned with XNLI data can be accessed at https://huggingface.co/joeddav/ xlm-roberta-large-xnli. The BART model finetuned with MNLI data can be accessed at https:// huggingface.co/facebook/bart-large-mnli", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We want to thank Bryan Wilie and Samuel Cahyawijaya for their support in accessing the cloud service. We also sincerely thank Zihan Liu and ML Collective members for helping with the discussion about this project. Finally, we want to thanks the reviewer of the paper for their meaningful comments and suggestions. Given the short time for the camera-ready, we tried our best to improve the final version of the paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgment", |
| "sec_num": null |
| }, |
| { |
| "text": "This appendix shows the results on few-shot monolingual and cross-lingual settings on SNIPS, MTOP, and multilingual NLU datasets over a different number of samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Full k-shot Results", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Char2subword: Extending the subword embedding space from pre-trained models using robust character compositionality", |
| "authors": [ |
| { |
| "first": "Gustavo", |
| "middle": [], |
| "last": "Aguilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nazneen", |
| "middle": [], |
| "last": "Rajani", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Keskar", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gustavo Aguilar, Bryan McCann, Tong Niu, Nazneen Rajani, N. Keskar, and T. Solorio. 2020. Char2subword: Extending the subword em- bedding space from pre-trained models using robust character compositionality. ArXiv, abs/2010.12730.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Massively multilingual sentence embeddings for zeroshot cross-lingual transfer and beyond", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "597--610", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe and Holger Schwenk. 2019. Mas- sively multilingual sentence embeddings for zero- shot cross-lingual transfer and beyond. Transac- tions of the Association for Computational Linguis- tics, 7:597-610.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Anindya Iqbal, and Rifat Shahriyar. 2021. Banglabert: Combating embedding barrier for low-resource language understanding", |
| "authors": [ |
| { |
| "first": "Abhik", |
| "middle": [], |
| "last": "Bhattacharjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Tahmid", |
| "middle": [], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazi", |
| "middle": [], |
| "last": "Samin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rahman", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.00204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhik Bhattacharjee, Tahmid Hasan, Kazi Samin, M Sohel Rahman, Anindya Iqbal, and Rifat Shahri- yar. 2021. Banglabert: Combating embedding bar- rier for low-resource language understanding. arXiv preprint arXiv:2101.00204.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Language models are few-shot learners", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Tom B Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Ryder", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Subbiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Girish", |
| "middle": [], |
| "last": "Shyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Sastry", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.14165" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Greenformer: Factorization toolkit for efficient deep neural networks", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Holy", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Lovenia", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenliang", |
| "middle": [], |
| "last": "Wilie", |
| "suffix": "" |
| }, |
| { |
| "first": "Etsuko", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Ishii", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel Cahyawijaya, Genta Indra Winata, Holy Love- nia, Bryan Wilie, Wenliang Dai, Etsuko Ishii, and Pascale Fung. 2021a. Greenformer: Factorization toolkit for efficient deep neural networks.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Indonlg: Benchmark and resources for evaluating indonesian natural language generation", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Karissa", |
| "middle": [], |
| "last": "Wilie", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaohong", |
| "middle": [], |
| "last": "Vincentio", |
| "suffix": "" |
| }, |
| { |
| "first": "Adhiguna", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Kuncoro", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Syafri", |
| "middle": [], |
| "last": "Zhi Yuan Lim", |
| "suffix": "" |
| }, |
| { |
| "first": "Masayu", |
| "middle": [], |
| "last": "Bahar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Leylia Khodra", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.08200" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel Cahyawijaya, Genta Indra Winata, Bryan Wilie, Karissa Vincentio, Xiaohong Li, Adhiguna Kuncoro, Sebastian Ruder, Zhi Yuan Lim, Syafri Ba- har, Masayu Leylia Khodra, et al. 2021b. Indonlg: Benchmark and resources for evaluating indone- sian natural language generation. arXiv preprint arXiv:2104.08200.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Zero-shot cross-lingual transfer of neural machine translation with multilingual pretrained encoders", |
| "authors": [ |
| { |
| "first": "Guanhua", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongdong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenping", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.08757" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guanhua Chen, Shuming Ma, Yun Chen, Li Dong, Dongdong Zhang, Jia Pan, Wenping Wang, and Furu Wei. 2021. Zero-shot cross-lingual transfer of neu- ral machine translation with multilingual pretrained encoders. arXiv preprint arXiv:2104.08757.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "KGPT: Knowledge-grounded pretraining for data-to-text generation", |
| "authors": [ |
| { |
| "first": "Wenhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Xifeng", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "Yang" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8635--8648", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.697" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenhu Chen, Yu Su, Xifeng Yan, and William Yang Wang. 2020. KGPT: Knowledge-grounded pre- training for data-to-text generation. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8635-8648, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Electra: Pre-training text encoders as discriminators rather than generators", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2019. Electra: Pre-training text encoders as discriminators rather than genera- tors. In International Conference on Learning Rep- resentations.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00c9douard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, \u00c9douard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "7059--7069", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. Advances in Neural Information Processing Systems, 32:7059- 7069.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Xnli: Evaluating crosslingual sentence representations", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2475--2485", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Ruty Rinott, Guillaume Lample, Ad- ina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating cross- lingual sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, pages 2475-2485.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces", |
| "authors": [ |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Coucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Alaa", |
| "middle": [], |
| "last": "Saade", |
| "suffix": "" |
| }, |
| { |
| "first": "Adrien", |
| "middle": [], |
| "last": "Ball", |
| "suffix": "" |
| }, |
| { |
| "first": "Th\u00e9odore", |
| "middle": [], |
| "last": "Bluche", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Caulier", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Leroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Cl\u00e9ment", |
| "middle": [], |
| "last": "Doumouro", |
| "suffix": "" |
| }, |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Gisselbrecht", |
| "suffix": "" |
| }, |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Caltagirone", |
| "suffix": "" |
| }, |
| { |
| "first": "Thibaut", |
| "middle": [], |
| "last": "Lavril", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.10190" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alice Coucke, Alaa Saade, Adrien Ball, Th\u00e9odore Bluche, Alexandre Caulier, David Leroy, Cl\u00e9ment Doumouro, Thibault Gisselbrecht, Francesco Calta- girone, Thibaut Lavril, et al. 2018. Snips voice plat- form: an embedded spoken language understanding system for private-by-design voice interfaces. arXiv preprint arXiv:1805.10190.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Language-agnostic multilingual modeling", |
| "authors": [ |
| { |
| "first": "Arindrima", |
| "middle": [], |
| "last": "Datta", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhuvana", |
| "middle": [], |
| "last": "Ramabhadran", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Emond", |
| "suffix": "" |
| }, |
| { |
| "first": "Anjuli", |
| "middle": [], |
| "last": "Kannan", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8239--8243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arindrima Datta, Bhuvana Ramabhadran, Jesse Emond, Anjuli Kannan, and Brian Roark. 2020. Language-agnostic multilingual modeling. In ICASSP 2020-2020 IEEE International Confer- ence on Acoustics, Speech and Signal Processing (ICASSP), pages 8239-8243. IEEE.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Barthez: a skilled pretrained french sequence-to-sequence model", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moussa Kamal Eddine", |
| "suffix": "" |
| }, |
| { |
| "first": "J-P", |
| "middle": [], |
| "last": "Antoine", |
| "suffix": "" |
| }, |
| { |
| "first": "Michalis", |
| "middle": [], |
| "last": "Tixier", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vazirgiannis", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.12321" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moussa Kamal Eddine, Antoine J-P Tixier, and Michalis Vazirgiannis. 2020. Barthez: a skilled pre- trained french sequence-to-sequence model. arXiv preprint arXiv:2010.12321.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The pile: An 800gb dataset of diverse text for language modeling", |
| "authors": [ |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Stella", |
| "middle": [], |
| "last": "Biderman", |
| "suffix": "" |
| }, |
| { |
| "first": "Sid", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurence", |
| "middle": [], |
| "last": "Golding", |
| "suffix": "" |
| }, |
| { |
| "first": "Travis", |
| "middle": [], |
| "last": "Hoppe", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Phang", |
| "suffix": "" |
| }, |
| { |
| "first": "Horace", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Anish", |
| "middle": [], |
| "last": "Thite", |
| "suffix": "" |
| }, |
| { |
| "first": "Noa", |
| "middle": [], |
| "last": "Nabeshima", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.00027" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leo Gao, Stella Biderman, Sid Black, Laurence Gold- ing, Travis Hoppe, Charles Foster, Jason Phang, Ho- race He, Anish Thite, Noa Nabeshima, et al. 2020a. The pile: An 800gb dataset of diverse text for lan- guage modeling. arXiv preprint arXiv:2101.00027.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Making pre-trained language models better few-shot learners", |
| "authors": [ |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.15723" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyu Gao, Adam Fisch, and Danqi Chen. 2020b. Making pre-trained language models better few-shot learners. arXiv preprint arXiv:2012.15723.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Giri Anantharaman, and Alexis Conneau. 2021. Larger-scale transformers for multilingual masked language modeling", |
| "authors": [ |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2105.00572" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naman Goyal, Jingfei Du, Myle Ott, Giri Ananthara- man, and Alexis Conneau. 2021. Larger-scale trans- formers for multilingual masked language modeling. arXiv preprint arXiv:2105.00572.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Semih Yavuz, and Richard Socher. 2020. A simple language model for task-oriented dialogue", |
| "authors": [ |
| { |
| "first": "Ehsan", |
| "middle": [], |
| "last": "Hosseini-Asl", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.00796" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehsan Hosseini-Asl, Bryan McCann, Chien-Sheng Wu, Semih Yavuz, and Richard Socher. 2020. A simple language model for task-oriented dialogue. arXiv preprint arXiv:2005.00796.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "328--339", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1031" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "From zero to hero: On the limitations of zero-shot language transfer with multilingual transformers", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Lauscher", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinit", |
| "middle": [], |
| "last": "Ravishankar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4483--4499", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne Lauscher, Vinit Ravishankar, Ivan Vuli\u0107, and Goran Glava\u0161. 2020. From zero to hero: On the lim- itations of zero-shot language transfer with multilin- gual transformers. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 4483-4499.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Flaubert: Unsupervised language model pre-training for french", |
| "authors": [ |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Vial", |
| "suffix": "" |
| }, |
| { |
| "first": "Jibril", |
| "middle": [], |
| "last": "Frej", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Segonne", |
| "suffix": "" |
| }, |
| { |
| "first": "Maximin", |
| "middle": [], |
| "last": "Coavoux", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Lecouteux", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Allauzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Benoit", |
| "middle": [], |
| "last": "Crabb\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "" |
| }, |
| { |
| "first": "Didier", |
| "middle": [], |
| "last": "Schwab", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "2479--2490", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hang Le, Lo\u00efc Vial, Jibril Frej, Vincent Segonne, Max- imin Coavoux, Benjamin Lecouteux, Alexandre Al- lauzen, Benoit Crabb\u00e9, Laurent Besacier, and Didier Schwab. 2020. Flaubert: Unsupervised language model pre-training for french. In Proceedings of the 12th Language Resources and Evaluation Con- ference, pages 2479-2490.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Kr-bert: A small-scale korean-specific language model", |
| "authors": [ |
| { |
| "first": "Sangah", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Hansol", |
| "middle": [], |
| "last": "Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunmee", |
| "middle": [], |
| "last": "Baik", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzi", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Hyopil", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2008.03979" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sangah Lee, Hansol Jang, Yunmee Baik, Suzi Park, and Hyopil Shin. 2020. Kr-bert: A small-scale korean-specific language model. arXiv preprint arXiv:2008.03979.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Bart: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal ; Abdelrahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Mtop: A comprehensive multilingual task-oriented semantic parsing benchmark", |
| "authors": [ |
| { |
| "first": "Haoran", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuohui", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Anchit", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Sonal", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Yashar", |
| "middle": [], |
| "last": "Mehdad", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
| "volume": "", |
| "issue": "", |
| "pages": "2950--2962", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haoran Li, Abhinav Arora, Shuohui Chen, Anchit Gupta, Sonal Gupta, and Yashar Mehdad. 2021. Mtop: A comprehensive multilingual task-oriented semantic parsing benchmark. In Proceedings of the 16th Conference of the European Chapter of the As- sociation for Computational Linguistics: Main Vol- ume, pages 2950-2962.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Prefixtuning: Optimizing continuous prompts for generation", |
| "authors": [ |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.00190" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Lisa Li and Percy Liang. 2021. Prefix- tuning: Optimizing continuous prompts for genera- tion. arXiv preprint arXiv:2101.00190.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Choosing transfer languages for cross-lingual learning", |
| "authors": [ |
| { |
| "first": "Yu-Hsiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Chian-Yu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Zirui", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuyan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mengzhou", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Shruti", |
| "middle": [], |
| "last": "Rijhwani", |
| "suffix": "" |
| }, |
| { |
| "first": "Junxian", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuezhe", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3125--3135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu-Hsiang Lin, Chian-Yu Chen, Jean Lee, Zirui Li, Yuyan Zhang, Mengzhou Xia, Shruti Rijhwani, Junxian He, Zhisong Zhang, Xuezhe Ma, et al. 2019. Choosing transfer languages for cross-lingual learn- ing. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3125-3135.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Leveraging slot descriptions for zero-shot cross-domain dialogue statetracking", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Seungwhan", |
| "middle": [], |
| "last": "Moon", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenpeng", |
| "middle": [], |
| "last": "Crook", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiguang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunjoon", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajen", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Subba", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "5640--5648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Bing Liu, Seungwhan Moon, Paul A Crook, Zhenpeng Zhou, Zhiguang Wang, Zhou Yu, Andrea Madotto, Eunjoon Cho, and Rajen Subba. 2021a. Leveraging slot descriptions for zero-shot cross-domain dialogue statetracking. In Proceed- ings of the 2021 Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5640-5648.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Xpersona: Evaluating multilingual personalized chatbot", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Genta", |
| "middle": [], |
| "last": "Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Bang", |
| "suffix": "" |
| }, |
| { |
| "first": "Etsuko", |
| "middle": [], |
| "last": "Ishii", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.07568" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Zihan Liu, Genta Indra Winata, Samuel Cahyawijaya, Andrea Madotto, Yejin Bang, Etsuko Ishii, and Pascale Fung. 2020a. Xpersona: Eval- uating multilingual personalized chatbot. arXiv preprint arXiv:2003.07568.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Bitod: A bilingual multidomain dataset for task-oriented dialogue modeling. arXiv e-prints", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Feijun", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Andrea Madotto, Genta Indra Winata, Peng Xu, Feijun Jiang, Yuxiang Hu, Chen Shi, and Pascale Fung. 2021b. Bitod: A bilingual multi- domain dataset for task-oriented dialogue modeling. arXiv e-prints, pages arXiv-2106.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Mintl: Minimalist transfer learning for task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3391--3405", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Andrea Madotto, Genta Indra Winata, and Pascale Fung. 2020b. Mintl: Minimalist trans- fer learning for task-oriented dialogue systems. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3391-3405.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "What makes good in-context examples for gpt-3? arXiv preprint", |
| "authors": [ |
| { |
| "first": "Jiachang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dinghan", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yizhe", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Carin", |
| "suffix": "" |
| }, |
| { |
| "first": "Weizhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2101.06804" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2021a. What makes good in-context examples for gpt-3? arXiv preprint arXiv:2101.06804.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Pretrain, prompt, and predict: A systematic survey of prompting methods in natural language processing", |
| "authors": [ |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weizhe", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinlan", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengbao", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroaki", |
| "middle": [], |
| "last": "Hayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2107.13586" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2021b. Pre- train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multilingual denoising pre-training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "726--742", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020a. Multilingual denoising pre-training for neural machine translation. Transac- tions of the Association for Computational Linguis- tics, 8:726-742.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019a. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Zero-shot cross-lingual dialogue systems with transferable latent variables", |
| "authors": [ |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Genta", |
| "middle": [], |
| "last": "Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1297--1303", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihan Liu, Jamin Shin, Yan Xu, Genta Indra Winata, Peng Xu, Andrea Madotto, and Pascale Fung. 2019b. Zero-shot cross-lingual dialogue systems with trans- ferable latent variables. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1297-1303.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Attention-informed mixed-language training for zero-shot cross-lingual task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "8433--8440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihan Liu, Genta Indra Winata, Zhaojiang Lin, Peng Xu, and Pascale Fung. 2020b. Attention-informed mixed-language training for zero-shot cross-lingual task-oriented dialogue systems. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 34, pages 8433-8440.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "X2parser: Cross-lingual and crossdomain framework for task-oriented compositional semantic parsing", |
| "authors": [ |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2106.03777" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihan Liu, Genta Indra Winata, Peng Xu, and Pascale Fung. 2021c. X2parser: Cross-lingual and cross- domain framework for task-oriented compositional semantic parsing. arXiv preprint arXiv:2106.03777.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "X2Parser: Cross-lingual and cross-domain framework for task-oriented compositional semantic parsing", |
| "authors": [ |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 6th Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "112--127", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.repl4nlp-1.13" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihan Liu, Genta Indra Winata, Peng Xu, and Pas- cale Fung. 2021d. X2Parser: Cross-lingual and cross-domain framework for task-oriented compo- sitional semantic parsing. In Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021), pages 112-127, Online. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Fantastically ordered prompts and where to find them: Overcoming few-shot prompt order sensitivity", |
| "authors": [ |
| { |
| "first": "Yao", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Bartolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Alastair", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.08786" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yao Lu, Max Bartolo, Alastair Moore, Sebastian Riedel, and Pontus Stenetorp. 2021. Fantastically ordered prompts and where to find them: Overcom- ing few-shot prompt order sensitivity. arXiv preprint arXiv:2104.08786.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Learning knowledge bases with parameters for task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", |
| "volume": "", |
| "issue": "", |
| "pages": "2372--2394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Madotto, Samuel Cahyawijaya, Genta Indra Winata, Yan Xu, Zihan Liu, Zhaojiang Lin, and Pas- cale Fung. 2020a. Learning knowledge bases with parameters for task-oriented dialogue systems. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 2372-2394.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Language models as few-shot learner for task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2008.06239" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Madotto, Zihan Liu, Zhaojiang Lin, and Pas- cale Fung. 2020b. Language models as few-shot learner for task-oriented dialogue systems. arXiv preprint arXiv:2008.06239.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "\u00c9ric Villemonte de la Clergerie, Djam\u00e9 Seddah, and Beno\u00eet Sagot", |
| "authors": [ |
| { |
| "first": "Louis", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Muller", |
| "suffix": "" |
| }, |
| { |
| "first": "Pedro Javier Ortiz", |
| "middle": [], |
| "last": "Su\u00e1rez", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoann", |
| "middle": [], |
| "last": "Dupont", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Romary", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7203--7219", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Louis Martin, Benjamin Muller, Pedro Javier Ortiz Su\u00e1rez, Yoann Dupont, Laurent Romary, \u00c9ric Ville- monte de la Clergerie, Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2020. Camembert: a tasty french language model. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7203-7219.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Phobert: Pre-trained language models for vietnamese", |
| "authors": [ |
| { |
| "first": "Anh", |
| "middle": [ |
| "Tuan" |
| ], |
| "last": "Dat Quoc Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dat Quoc Nguyen and Anh Tuan Nguyen. 2020. Phobert: Pre-trained language models for viet- namese. In Proceedings of the 2020 Conference on", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Natural Language Processing: Findings", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1037--1042", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Empirical Methods in Natural Language Processing: Findings, pages 1037-1042.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Klue: Korean language understanding evaluation", |
| "authors": [ |
| { |
| "first": "Sungjoon", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Jihyung", |
| "middle": [], |
| "last": "Moon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Won", |
| "middle": [ |
| "Ik" |
| ], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiyoon", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2105.09680" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sungjoon Park, Jihyung Moon, Sungdong Kim, Won Ik Cho, Jiyoon Han, Jangwon Park, Chisung Song, Jun- seong Kim, Yongsook Song, Taehwan Oh, et al. 2021. Klue: Korean language understanding eval- uation. arXiv preprint arXiv:2105.09680.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "True few-shot learning with language models", |
| "authors": [ |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2105.11447" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ethan Perez, Douwe Kiela, and Kyunghyun Cho. 2021. True few-shot learning with language models. arXiv preprint arXiv:2105.11447.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Language models as knowledge bases?", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Bakhtin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2463--2473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Petroni, Tim Rockt\u00e4schel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and Alexander Miller. 2019. Language models as knowl- edge bases? In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 2463-2473.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Mad-x: An adapter-based framework for multi-task cross-lingual transfer", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Pfeiffer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.00052" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Pfeiffer, Ivan Vuli\u0107, Iryna Gurevych, and Sebas- tian Ruder. 2020. Mad-x: An adapter-based frame- work for multi-task cross-lingual transfer. arXiv preprint arXiv:2005.00052.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Adversarial propagation and zero-shot cross-lingual transfer of word vector specialization", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "282--293", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edoardo Maria Ponti, Ivan Vuli\u0107, Goran Glava\u0161, Nikola Mrk\u0161i\u0107, and Anna Korhonen. 2018. Adversarial propagation and zero-shot cross-lingual transfer of word vector specialization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 282-293.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "A stack-propagation framework with token-level intent detection for spoken language understanding", |
| "authors": [ |
| { |
| "first": "Libo", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoyang", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2078--2087", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Libo Qin, Wanxiang Che, Yangming Li, Haoyang Wen, and Ting Liu. 2019. A stack-propagation frame- work with token-level intent detection for spoken language understanding. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2078-2087.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "21", |
| "issue": "", |
| "pages": "1--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the lim- its of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21:1-67.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Gottbert: a pure german language model", |
| "authors": [ |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Scheible", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Thomczyk", |
| "suffix": "" |
| }, |
| { |
| "first": "Patric", |
| "middle": [], |
| "last": "Tippmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Jaravine", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Boeker", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.02110" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raphael Scheible, Fabian Thomczyk, Patric Tippmann, Victor Jaravine, and Martin Boeker. 2020. Got- tbert: a pure german language model. arXiv preprint arXiv:2012.02110.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "It's not just size that matters: Small language models are also few-shot learners", |
| "authors": [ |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "Schick", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "2339--2352", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timo Schick and Hinrich Sch\u00fctze. 2021. It's not just size that matters: Small language models are also few-shot learners. In Proceedings of the 2021 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 2339-2352.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Cross-lingual transfer learning for multilingual task oriented dialog", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Sonal", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Rushin", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "3795--3805", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Schuster, Sonal Gupta, Rushin Shah, and Mike Lewis. 2019. Cross-lingual transfer learning for multilingual task oriented dialog. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3795-3805.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Eliciting knowledge from language models using automatically generated prompts", |
| "authors": [ |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yasaman", |
| "middle": [], |
| "last": "Razeghi", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "V" |
| ], |
| "last": "Robert L Logan", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Wallace", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4222--4235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. 2020. Eliciting knowledge from language models using automati- cally generated prompts. In Proceedings of the 2020 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 4222-4235.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Indonlu: Benchmark and resources for evaluating indonesian natural language understanding", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Wilie", |
| "suffix": "" |
| }, |
| { |
| "first": "Karissa", |
| "middle": [], |
| "last": "Vincentio", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaohong", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sidik", |
| "middle": [], |
| "last": "Zhi Yuan Lim", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahmad", |
| "middle": [], |
| "last": "Soleman", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Mahendra", |
| "suffix": "" |
| }, |
| { |
| "first": "Syafri", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bahar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "843--857", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bryan Wilie, Karissa Vincentio, Genta Indra Winata, Samuel Cahyawijaya, Xiaohong Li, Zhi Yuan Lim, Sidik Soleman, Rahmad Mahendra, Pascale Fung, Syafri Bahar, et al. 2020. Indonlu: Benchmark and resources for evaluating indonesian natural language understanding. In Proceedings of the 1st Confer- ence of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th Interna- tional Joint Conference on Natural Language Pro- cessing, pages 843-857.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Multilingual transfer learning for code-switched language and speech neural modeling", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.06268" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata. 2021. Multilingual transfer learn- ing for code-switched language and speech neural modeling. arXiv preprint arXiv:2104.06268.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Are multilingual models effective in codeswitching?", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Cahyawijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the Fifth Workshop on Computational Approaches to Linguistic Code-Switching", |
| "volume": "", |
| "issue": "", |
| "pages": "142--153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Samuel Cahyawijaya, Zihan Liu, Zhaojiang Lin, Andrea Madotto, and Pascale Fung. 2021. Are multilingual models effective in code- switching? In Proceedings of the Fifth Workshop on Computational Approaches to Linguistic Code- Switching, pages 142-153.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Code-switched language models using neural based synthetic data from parallel sentences", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Chien-Sheng", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "271--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2019. Code-switched lan- guage models using neural based synthetic data from parallel sentences. In Proceedings of the 23rd Con- ference on Computational Natural Language Learn- ing (CoNLL), pages 271-280.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Adapt-and-adjust: Overcoming the long-tail problem of multilingual speech recognition", |
| "authors": [ |
| { |
| "first": "Guangsen", |
| "middle": [], |
| "last": "Genta Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hoi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.01687" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genta Indra Winata, Guangsen Wang, Caiming Xiong, and Steven Hoi. 2020. Adapt-and-adjust: Over- coming the long-tail problem of multilingual speech recognition. arXiv preprint arXiv:2012.01687.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Probing task-oriented dialogue representation from language models", |
| "authors": [ |
| { |
| "first": "Sheng", |
| "middle": [], |
| "last": "Chien", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5036--5051", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chien-Sheng Wu and Caiming Xiong. 2020. Probing task-oriented dialogue representation from language models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5036-5051.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "End-to-end slot alignment and recognition for crosslingual nlu", |
| "authors": [ |
| { |
| "first": "Weijia", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Batool", |
| "middle": [], |
| "last": "Haider", |
| "suffix": "" |
| }, |
| { |
| "first": "Saab", |
| "middle": [], |
| "last": "Mansour", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5052--5063", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weijia Xu, Batool Haider, and Saab Mansour. 2020. End-to-end slot alignment and recognition for cross- lingual nlu. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Process- ing (EMNLP), pages 5052-5063.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Linting", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihir", |
| "middle": [], |
| "last": "Kale", |
| "suffix": "" |
| }, |
| { |
| "first": "Rami", |
| "middle": [], |
| "last": "Al-Rfou", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Siddhant", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "483--498", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linting Xue, Noah Constant, Adam Roberts, Mi- hir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 483-498.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. Advances in Neural Infor- mation Processing Systems, 32:5753-5763.", |
| "links": null |
| }, |
| "BIBREF69": { |
| "ref_id": "b69", |
| "title": "Calibrate before use: Improving few-shot performance of language models", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Tony", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Shi", |
| "middle": [], |
| "last": "Wallace", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2102.09690" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tony Z Zhao, Eric Wallace, Shi Feng, Dan Klein, and Sameer Singh. 2021. Calibrate before use: Im- proving few-shot performance of language models. arXiv preprint arXiv:2102.09690.", |
| "links": null |
| }, |
| "BIBREF70": { |
| "ref_id": "b70", |
| "title": "Knowledgegrounded dialogue generation with pre-trained language models", |
| "authors": [ |
| { |
| "first": "Xueliang", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Can", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chongyang", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3377--3390", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.272" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xueliang Zhao, Wei Wu, Can Xu, Chongyang Tao, Dongyan Zhao, and Rui Yan. 2020. Knowledge- grounded dialogue generation with pre-trained lan- guage models. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 3377-3390, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF71": { |
| "ref_id": "b71", |
| "title": "Xia Song, and Furu Wei. 2021. Consistency regularization for cross-lingual fine-tuning", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaohan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zewen", |
| "middle": [], |
| "last": "Chi", |
| "suffix": "" |
| }, |
| { |
| "first": "Saksham", |
| "middle": [], |
| "last": "Singhal", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2106.08226" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Zheng, Li Dong, Shaohan Huang, Wenhui Wang, Zewen Chi, Saksham Singhal, Wanxiang Che, Ting Liu, Xia Song, and Furu Wei. 2021. Consistency regularization for cross-lingual fine-tuning. arXiv preprint arXiv:2106.08226.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "The results on German (de) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "The results on English (en) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "The results on Spanish (es) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "text": "The results on French (fr) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "text": "The results on English (en) multilingual NLU dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "text": "The results on Spanish (es) multilingual NLU dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF6": { |
| "type_str": "figure", |
| "text": "The acc results on English (en) SNIPS with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF7": { |
| "type_str": "figure", |
| "text": "The f1 results on English (en) SNIPS with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF8": { |
| "type_str": "figure", |
| "text": "The acc results on the cross-lingual setting, English-German (de) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF9": { |
| "type_str": "figure", |
| "text": "The f1 results on the cross-lingual setting, English-German (de) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF10": { |
| "type_str": "figure", |
| "text": "The acc results on the cross-lingual setting, English-Spanish (es) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF11": { |
| "type_str": "figure", |
| "text": "The f1 results on the cross-lingual setting, English-Spanish (es) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF12": { |
| "type_str": "figure", |
| "text": "The acc results on the cross-lingual setting, English-French (fr) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF13": { |
| "type_str": "figure", |
| "text": "The f1 results on the cross-lingual setting, English-French (fr) MTOP dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF14": { |
| "type_str": "figure", |
| "text": "The acc results on the cross-lingual setting, English-Spanish (es) multilingual NLU dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF15": { |
| "type_str": "figure", |
| "text": "The f1 results on the cross-lingual setting, English-Spanish (es) multilingual NLU dataset with GPT models.", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "text": "", |
| "content": "<table/>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "text": "\u00b1 6.34 35.46 \u00b1 0.92 36.18 \u00b1 2.12 41.16 \u00b1 5.65 51.59 \u00b1 12.83 37.56 \u00b1 7.14 GPT-2 MEDIUM 0.3B 65.71 \u00b1 2.80 52.94 \u00b1 5.12 63.35 \u00b1 3.01 54.33 \u00b1 4.75 50.6 \u00b1 2.44 72.21 \u00b1 14.88 50.25 \u00b1 4.99 GPT-2 LARGE 0.8B 71.43 \u00b1 10.27 50.94 \u00b1 6.63 59.70 \u00b1 4.50 52.38 \u00b1 2.65 44.75 \u00b1 1.11 62.36 \u00b1 13.82 58.04 \u00b1 5.28 GPT-2 XL 1.6B 78.43 \u00b1 3.16 78.43 \u00b1 3.16 73.93 \u00b1 1.21 56.61 \u00b1 2.02 45.21 \u00b1 2.54 79.04 \u00b1 \u00b1 3.21 89.66 \u00b1 0.50 84.18 \u00b1 0.32 85.04 \u00b1 1.18 94.32 \u00b1 1.14 88.54 \u00b1 6.18 T5 LARGE 0.8B 23.57 \u00b1 8.93 41.84 \u00b1 7.63 36.02 \u00b1 5.26 49.49 \u00b1 6.32 40.41 \u00b1 5.97 37.57 \u00b1 15.23 21.20 \u00b1 6.51 T5 3B 3B 46.52 \u00b1 6.69 50.81 \u00b1 6.45 46.17 \u00b1 4.06 46.45 \u00b1 4.39 44.38 \u00b1 0.22 31.46 \u00b1 18.18 31.60 \u00b1 14.90 GPT NEO 2.7B (ordered) 86.71 \u00b1 1.62 55.69 \u00b1 3.45 55.12 \u00b1 4.01 50.77 \u00b1 4.41 50.70 \u00b1 2.47 63.33 \u00b1 7.14 61.51 \u00b1 1.63 T5 LARGE 0.8B (ordered) 25.90 \u00b1 18.51 63.06 \u00b1 4.56 51.92 \u00b1 3.90 62.71 \u00b1 6.30 55.91 \u00b1 3.82 38.97 \u00b1 14.80 63.10 \u00b1", |
| "content": "<table><tr><td>Models</td><td>SNIPS</td><td/><td>MTOP</td><td/><td/><td colspan=\"2\">MultiNLU</td></tr><tr><td/><td>en</td><td>de</td><td>en</td><td>es</td><td>fr</td><td>en</td><td>es</td></tr><tr><td>Random</td><td>14.29</td><td>15.07</td><td>15.25</td><td>15.55</td><td>14.36</td><td>8.33</td><td>8.33</td></tr><tr><td>Full-training SOTA</td><td>99.00 \u2021</td><td>88.80 \u2020</td><td>94.00 \u2020</td><td>90.10 \u2020</td><td>89.60 \u2020</td><td>99.11 *</td><td>98.90 *</td></tr><tr><td/><td/><td colspan=\"3\">Zero-shot Cross-Task Prediction</td><td/><td/><td/></tr><tr><td>BART LARGE 0.4B</td><td>74.43</td><td>24.80</td><td>43.41</td><td>36.06</td><td>24.77</td><td>65.60</td><td>34.77</td></tr><tr><td>XLM-R LARGE 0.6B</td><td>68.00</td><td>54.30</td><td>53.37</td><td>51.67</td><td>51.99</td><td>77.79</td><td>66.35</td></tr><tr><td/><td/><td/><td colspan=\"2\">Few-shot Learning (K-shot)</td><td/><td/><td/></tr><tr><td>GPT-2 0.1B</td><td>39.33 \u00b1 8.58</td><td colspan=\"5\">40.03 5.05</td><td>64.74 \u00b1 7.64</td></tr><tr><td>GPT NEO 1.3B</td><td>84.19 \u00b1 2.78</td><td colspan=\"3\">67.17 \u00b1 2.50 82.40 \u00b1 1.90 73.51 \u00b1 0.95</td><td>66.3 \u00b1 1.29</td><td>89.70 \u00b1 1.28</td><td>85.77 \u00b1 2.53</td></tr><tr><td>GPT NEO 2.7B</td><td>91.24 \u00b1 0.68</td><td colspan=\"5\">71.57 \u00b1 5.94 81.51 \u00b1 0.39 76.94 \u00b1 0.83 70.31 \u00b1 1.99 83.76 \u00b1 3.14</td><td>87.82 \u00b1 1.55</td></tr><tr><td>GPT NEO-J 6B</td><td>93.38 \u00b1 0.76</td><td colspan=\"6\">80.97 4.46</td></tr><tr><td>T5 3B 3B (ordered)</td><td>93.00 \u00b1 3.00</td><td colspan=\"5\">74.11 \u00b1 2.69 65.03 \u00b1 1.87 66.97 \u00b1 1.35 68.89 \u00b1 2.51 80.12 \u00b1 3.95</td><td>86.60 \u00b1 2.40</td></tr><tr><td/><td/><td/><td>Fine-tuning (40-shot)</td><td/><td/><td/><td/></tr><tr><td>mBERT 0.2B</td><td>88.57 \u00b1 3.14</td><td colspan=\"5\">25.21 \u00b1 2.31 41.44 \u00b1 5.59 33.82 \u00b1 10.08 16.54 \u00b1 5.54 84.88 \u00b1 1.59</td><td>87.87 \u00b1 3.29</td></tr><tr><td>XLM-R BASE 0.3B</td><td colspan=\"4\">87.95 \u00b1 1.39 27.47 \u00b1 11.90 37.03 \u00b1 5.11 27.16 \u00b1 5.51</td><td>13.8 \u00b1 6.50</td><td>77.06 \u00b1 3.16</td><td>74.85 \u00b1 1.53</td></tr></table>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "text": "", |
| "content": "<table/>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "text": "Model architecture.", |
| "content": "<table><tr><td>Models</td></tr></table>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "text": "6B 79.41 \u00b1 1.18 81.57 \u00b1 0.83 77.85 \u00b1 1.63 82.66 \u00b1 4.19 T5 LARGE 0.8B 37.14 \u00b1 5.44 38.14 \u00b1 3.20 33.53 \u00b1 4.85 14.95 \u00b1 16.34 T5 3B 3B 35.35 \u00b1 7.07 34.64 \u00b1 6.21 37.26 \u00b1 8.68 14.11 \u00b1 14.01 GPT NEO 2.7B (ordered) 0.8B 42.23 \u00b1 3.24 48.62 \u00b1 2.60 46.30 \u00b1 3.02 47.83 \u00b1 5.73 T5 3B (ordered) 3B 52.23 \u00b1 4.29 52.74 \u00b1 3.20 49.72 \u00b1 5.37 50.42 \u00b1 6.01", |
| "content": "<table><tr><td/><td>.16</td><td>60.30</td><td>58.34</td><td>-</td></tr><tr><td>Multi CoVe (Schuster et al., 2019)</td><td>-</td><td>-</td><td>-</td><td>53.89</td></tr><tr><td>Translate-Train (Liu et al., 2020b)</td><td>-</td><td>-</td><td>-</td><td>85.39</td></tr><tr><td>MTL (Liu et al., 2020b)</td><td>-</td><td>-</td><td>-</td><td>87.88</td></tr><tr><td/><td colspan=\"2\">Few-shot Learning (K-shot)</td><td/><td/></tr><tr><td>GPT-2 0.1B</td><td colspan=\"4\">23.89 \u00b1 1.52 27.10 \u00b1 3.19 26.14 \u00b1 0.54 38.60 \u00b1 3.54</td></tr><tr><td>GPT-2 MEDIUM 0.3B</td><td colspan=\"4\">39.61 \u00b1 5.42 41.81 \u00b1 4.66 42.40 \u00b1 3.84 40.40 \u00b1 10.48</td></tr><tr><td>GPT-2 LARGE 0.8B</td><td colspan=\"4\">30.94 \u00b1 4.45 34.69 \u00b1 6.50 33.04 \u00b1 4.56 23.99 \u00b1 14.02</td></tr><tr><td>GPT-2 XL 1.6B</td><td colspan=\"4\">42.88 \u00b1 4.94 48.43 \u00b1 4.42 50.67 \u00b1 4.50 51.31 \u00b1 9.87</td></tr><tr><td>GPT NEO 1.3B</td><td colspan=\"4\">56.14 \u00b1 2.75 63.14 \u00b1 2.52 60.25 \u00b1 3.32 64.82 \u00b1 5.94</td></tr><tr><td>GPT NEO 2.7B</td><td colspan=\"4\">58.27 \u00b1 1.28 64.79 \u00b1 1.69 62.30 \u00b1 1.60 65.91 \u00b1 6.42</td></tr><tr><td>GPT NEO-J</td><td/><td/><td/><td/></tr></table>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "text": "Few-shot results in the cross-lingual setting on MTOP and MultiNLU datasets.", |
| "content": "<table><tr><td>5 Results and Analysis</td></tr><tr><td>5.1 Model Performance</td></tr></table>", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |