| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:14:28.605429Z" |
| }, |
| "title": "A Report on the VarDial Evaluation Campaign 2020", |
| "authors": [ |
| { |
| "first": "Mihaela", |
| "middle": [], |
| "last": "G\u0203man", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Bucharest", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bocconi University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [ |
| "Tudor" |
| ], |
| "last": "Ionescu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Bucharest", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Helsinki", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Helsinki", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Helsinki", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Niko", |
| "middle": [], |
| "last": "Partanen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Helsinki", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Purschke", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Luxembourg", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Helsinki", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Rochester Institute of Technology", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents the results of the VarDial Evaluation Campaign 2020 organized as part of the seventh workshop on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects (VarDial), co-located with COLING 2020. The campaign included three shared tasks each focusing on a different challenge of language and dialect identification: Romanian Dialect Identification (RDI), Social Media Variety Geolocation (SMG), and Uralic Language Identification (ULI). The campaign attracted 30 teams who enrolled to participate in one or multiple shared tasks and 14 of them submitted runs across the three shared tasks. Finally, 11 papers describing participating systems are published in the VarDial proceedings and referred to in this report.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents the results of the VarDial Evaluation Campaign 2020 organized as part of the seventh workshop on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects (VarDial), co-located with COLING 2020. The campaign included three shared tasks each focusing on a different challenge of language and dialect identification: Romanian Dialect Identification (RDI), Social Media Variety Geolocation (SMG), and Uralic Language Identification (ULI). The campaign attracted 30 teams who enrolled to participate in one or multiple shared tasks and 14 of them submitted runs across the three shared tasks. Finally, 11 papers describing participating systems are published in the VarDial proceedings and referred to in this report.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The VarDial Evaluation Campaign 2020 1 is the most recent iteration of a series of evaluation campaigns featuring multiple shared tasks organized together with the Workshop on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects (VarDial) . It follows three editions organized in 2017 (Zampieri et al., 2017) featuring four shared tasks and in 2018 (Zampieri et al., 2018) and 2019 (Zampieri et al., 2019) featuring five shared tasks.", |
| "cite_spans": [ |
| { |
| "start": 233, |
| "end": 265, |
| "text": "Varieties and Dialects (VarDial)", |
| "ref_id": null |
| }, |
| { |
| "start": 312, |
| "end": 335, |
| "text": "(Zampieri et al., 2017)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 376, |
| "end": 399, |
| "text": "(Zampieri et al., 2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 409, |
| "end": 432, |
| "text": "(Zampieri et al., 2019)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Co-located with international NLP conferences such as COLING, EACL, and NAACL, VarDial is a forum for researchers interested in diatopic language variation from a computational perspective. Since its first edition in 2014, VarDial hosted shared tasks on various topics such as morphosyntactic tagging, cross-lingual dependency parsing, and language and dialect identification. Most shared tasks organized at VarDial have addressed dialect and language identification on newspaper texts, social media posts, speech transcriptions, and many other genres and domains Goutte et al., 2016) . A large number of languages and dialects from different families have been included in the VarDial shared tasks: national language varieties of Chinese, English, French, Spanish, and Portuguese, pairs or groups of similar languages such as Bosnian, Croatian, and Serbian and Malay and Indonesian, and dialects of languages such as Arabic and German. Some of the datasets made available in these tasks, such as the ArchiMob for Swiss German dialects and the multilingual DSLCC, have been used outside these competitions evidencing the interest of the NLP community in the topic (Tan et al., 2014; Kumar et al., 2018) . 2 In this paper, we present the results and the main findings of the VarDial Evaluation Campaign 2020. Three tasks addressing different aspects of language and dialect identification have been organized this year. The Romanian Dialect Identification (RDI) shared task is described in Section 4, the Social Media Variety Geolocation (SMG) task is presented in Section 5, and finally the Uralic Language Identification (ULI) shared task is described in Section 6. We include references to the 11 system description papers written by the participants of the campaign in Table 1. 2 Shared Tasks at VarDial 2020 Romanian Dialect Identification (RDI): In the Romanian Dialect Identification (RDI) shared task, we provided participants with the MOROCO data set (Butnaru and Ionescu, 2019) for training, which contains Moldavian (MD) and Romanian (RO) samples of text collected from the news domain. The task was a binary classification by dialect, in which a classification model is required to discriminate between the Moldavian (MD) and the Romanian (RO) dialects. The task was closed, therefore, participants are not allowed to use external data to train their models. The test set contained newly collected text samples from a different domain, not previously included in MOROCO, resulting in a cross-domain dialect identification task.", |
| "cite_spans": [ |
| { |
| "start": 564, |
| "end": 584, |
| "text": "Goutte et al., 2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1164, |
| "end": 1182, |
| "text": "(Tan et al., 2014;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1183, |
| "end": 1202, |
| "text": "Kumar et al., 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1205, |
| "end": 1206, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1772, |
| "end": 1780, |
| "text": "Table 1.", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In contrast to most past and present VarDial tasks, the SMG task is framed as a geolocation task: given a text, the participants have to predict its geographic location in terms of latitude/longitude coordinates. This setup addresses the common issue that defining a set of discrete labels is not trivial for many language areas where there is a continuum between varieties rather than clear-cut borders. The SMG task is split into three subtasks covering different language areas: the BCMS subtask is focused on geolocated tweets published in the area of Croatia, Bosnia and Herzegovina, Montenegro and Serbia in the HBS macro-language ; the DE-AT subtask focuses on conversations from the microblogging platform Jodel initiated in Germany and Austria, which are written in standard German but commonly contain regional and dialectal forms; the CH subtask is based on Jodel conversations initiated in Switzerland, which were found to be held majoritarily in Swiss German dialects (Hovy and Purschke, 2018) . All three subtasks used the same data format and evaluation methodology. Both constrained and unconstrained submissions were allowed, but only one participating team made use of the latter.", |
| "cite_spans": [ |
| { |
| "start": 981, |
| "end": 1006, |
| "text": "(Hovy and Purschke, 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Social Media Variety Geolocation (SMG):", |
| "sec_num": null |
| }, |
| { |
| "text": "This shared task focused on discriminating between endangered languages of the Uralic group. In addition to 29 Uralic minority languages, the shared task also featured 149 non-relevant languages. For training, we provided texts from the Wanca 2016 corpora (Jauhiainen et al., 2019a) for the relevant languages while the texts for the non-relevant languages came from the Leipzig corpora collection (Goldhahn et al., 2012) . The test set for the relevant languages included sentences from the forthcoming Wanca 2017 corpora (Jauhiainen et al., 2020b) that were not present in the Wanca 2016 corpora. The sentences for the non-relevant languages were from the Leipzig corpora collection. The ULI shared task was divided into three separate tracks using the same training and test data. The difference between the tracks was based on how the submissions were scored: track 1 focused on macro-averaged F-score for the 29 relevant languages, track 2 on micro-averaged F-score for the relevant languages, and track 3 on macro-averaged F-score for all 178 languages. All the tracks were closed, so no other data or models were to be used for training in addition to the pre-defined training sets.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 282, |
| "text": "(Jauhiainen et al., 2019a)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 398, |
| "end": 421, |
| "text": "(Goldhahn et al., 2012)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 523, |
| "end": 549, |
| "text": "(Jauhiainen et al., 2020b)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification (ULI):", |
| "sec_num": null |
| }, |
| { |
| "text": "A total of 30 teams enrolled to participate in this year's VarDial evaluation campaign and 14 of them submitted results to one or more shared tasks. In Table 1 , we list the teams that participated in the shared tasks, including references to the 11 system description papers written by the participants. We include detailed information about these submissions in each respective task section of this report.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 152, |
| "end": 159, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Participating Teams", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The RDI task attracted 8 teams followed by the SMG task with 7 teams who submitted runs to one or more of its three language tracks: BCMS, DE-AT, and CH. This is a similar number of teams that have participated in the most popular shared tasks from the VarDial evaluation campaign 2019. Only the NRC team submitted results to the ULI shared task, which is rather unusual, as tasks in past VarDial evaluation campaign have all received a very good number of submissions. It should be noted that the 2020 campaign run from April 20 to July 30 during the early stages of the COVID-19 pandemic. Lock downs and restrictive measures in many countries during this period have impacted universities and research centers worldwide causing significant disruption. We believe that this situation is very likely to have discouraged more teams to participate in this year's evaluation campaign. The chosen training and test corpora allowed us to evaluate participants on a challenging cross-genre binary dialect identification task: Romanian (RO) versus Moldavian (MD). However, participants were provided with development data comprising both news articles and tweets. The number of samples in the training, the development and the test sets are listed in Table 2 . All text samples were automatically pre-processed to replace each named entity with the token $NE$.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1244, |
| "end": 1251, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Participating Teams", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Akanksha. The Akanksha team fine-tuned a reformer model (Kitaev et al., 2019) on the provided data, considering character-level and phrase-level tokens. Then, a binary classifier is trained on top of the fine-tuned reformer model. The team submitted only one run.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 77, |
| "text": "(Kitaev et al., 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Anumi\u0163i. The Anumi\u0163i team (Popa and S , tef\u0203nescu, 2020) submitted three runs using fine-tuned Romanian BERT models (Dumitrescu et al., 2020) . They started from BERT models that are pre-trained on Romanian corpora. For the first two runs, the team submitted individual models, the first one being a cased BERT model and the second one being an uncased BERT model, respectively. For the third run, the team proposed an SVM ensemble of five different transformer-based models, some being multilingual and others being specifically trained on Romanian corpora.", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 141, |
| "text": "(Dumitrescu et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Phlyers. All the submissions made by the Phlyers (Ceolin and Zhang, 2020) are based on Na\u00efve Bayes models applied on character n-grams. Before applying the models, the team preprocessed the text samples by removing numbers, punctuation and common Twitter tags such as \"LIVE\", \"FOTO\" and \"VIDEO\", as well as the $NE$ tag (which was used to replace named entities). For the first run, the Phlyers tuned the model on the news development set, obtaining optimal results with \u03b1 = 10 \u22124 and n-grams in the range 5-8 that occur less than 1000 times. For the second and third runs, the Phlyers tuned the model on the tweets development set. The best model uses \u03b1 = 10 \u22123 and n-grams in the range 6-8 that occur less than 250 times, while the second best model uses \u03b1 = 10 \u22123 and n-grams in the range 5-7 that occur less than 200 times.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The Linguistadors. The Linguistadors proposed a character-level CNN architecture, which was trained using ground-truth and pseudo-labels. For the first run, the model is fine-tuned using pseudolabels for the validation set. For the second run, the model is fine-tuned using pseudo-labels for both validation and test sets. For the third run, the CNN is fine-tuned using pseudo-labels for a subset of the validation set that includes samples with 95% confidence of being correct.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "T\u00fcbingen. The runs submitted by the T\u00fcbingen team (\u00c7\u00f6ltekin, 2020) consist of multiple linear SVM classifiers based on sparse character and word n-gram features, including a domain adaptation method proposed in their earlier shared task participation (Wu et al., 2019) . For the first run, a base SVM model (trained only on the target development set) is first applied on the test set. Then, the model is retrained by adding the test predictions for which the classifier is confident (distance from the decision boundary is higher than 0.5) to the training set. As the final predictions, the authors take the majority vote of five classifiers trained with (slightly) different hyperparameters. For the second run, the T\u00fcbingen team used an ensemble of 20 classifiers trained on disjoint parts of the training data, while also splitting the news articles into sentences. The training data for the second submission is formed of the training and the development sets, assigning 25\u00d7 higher weights to tweets than to sentences taken from news articles. The third run of the T\u00fcbingen team is very similar to the second, the only difference being the filtering of the source documents based on the confidence of another classifier trained on the target development set.", |
| "cite_spans": [ |
| { |
| "start": 251, |
| "end": 268, |
| "text": "(Wu et al., 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "UAIC. The UAIC team (Rebeja and Cristea, 2020) proposed a model based on TF-IDF encoders trained on each dialect, independently. The TF-IDF encodings are concatenated into a single tensor and provided as input to a deep learning architecture that learns to classify each data sample. The architecture is trained using categorical cross-entropy. The UAIC team submitted two runs with slightly different hyperparameters.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 46, |
| "text": "(Rebeja and Cristea, 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "UPB. Similar to Anumi\u0163i, the UPB team (Zaharia et al., 2020) submitted three runs using the Romanian BERT model (Dumitrescu et al., 2020) . For the first submission, the model is trained for three epochs on text chunks of 512 tokens taken with an overlap of 128 tokens. For the second run, the Romanian BERT model is trained using an adversarial technique that alters certain examples in the data set. For the third run, the model is trained for four epochs on text chunks of 480 tokens.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 60, |
| "text": "(Zaharia et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 112, |
| "end": 137, |
| "text": "(Dumitrescu et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "SUKI. The SUKI team (Jauhiainen et al., 2020a ) submitted a single run to the RDI shared task. The authors employed a custom Na\u00efve Bayes model based on relative frequencies of character 4-grams and 5-grams as features. They removed $NE$ tags and non-alphabetic characters from all data samples. Then, they changed the remaining characters to lowercase. The SUKI team trained the submitted model on both training and development samples. Table 3 : Macro F 1 -scores attained by the participating teams on the RDI shared task. A summary of methods and features used by participants are also included.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 45, |
| "text": "(Jauhiainen et al., 2020a", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 437, |
| "end": 444, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The runs submitted by each participant in the RDI shared task are presented in Table 3 . The systems are ranked according to the macro F 1 -scores. Interestingly, we observe that the top scoring system is a shallow approach based on an SVM ensemble applied on word and character n-grams. The best model, which is submitted by the T\u00fcbingen team, is closely followed by an SVM ensemble applied on fine-tuned multilingual and monolingual BERT embeddings. The results show that T\u00fcbingen and Anumi\u0163i are the only two teams surpassing the 70% threshold. Their very good results compared with the rest of the participants are likely due to the idea of splitting the news articles in sentences. This hypothesis is also supported by the following observation. Although both Anumi\u0163i and UPB fine-tuned the same Romanian BERT model, their results are significantly different, probably because the Anumi\u0163i team fine-tuned the model on sentences, while UPB fine-tuned it on text chunks. Considering the domain gap between news articles and tweets, which is also caused by the high difference in the average number of tokens per sample -see (G\u0203man and Ionescu, 2020b) -we believe that the idea of splitting the news articles into sentences to reduce the domain gap is quite useful.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rank", |
| "sec_num": null |
| }, |
| { |
| "text": "In the Romanian Dialect Identification challenge, we proposed a shared task on cross-domain binary classification by dialect. A total of 8 teams participated in the competition, each submitting between 1 and 3 runs. This resulted in a total of 19 submissions, which represents an increase of almost 100% compared with last year's Moldavian vs. Romanian Cross-Dialect Topic Identification (MRC) shared task (Zampieri et al., 2019 ). An interesting difference compared with the results reported for the MRC shared task is that, in the RDI shared task, the best performance is obtained by a shallow approach based on word and character n-grams. This is consistent with the results observed in previous VarDial evaluation campaigns (Zampieri et al., 2017; Zampieri et al., 2018) , where some of the winners employed shallow approaches based on character n-grams (Butnaru and Ionescu, 2018; Ionescu and Butnaru, 2017). In summary, we conclude that the battle between deep and shallow approaches is still open, at least when it comes to dialect identification.", |
| "cite_spans": [ |
| { |
| "start": 406, |
| "end": 428, |
| "text": "(Zampieri et al., 2019", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 728, |
| "end": 751, |
| "text": "(Zampieri et al., 2017;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 752, |
| "end": 774, |
| "text": "Zampieri et al., 2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "5 Social Media Variety Geolocation SMG", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The SMG task is based on three datasets from two Social Media platforms, Jodel and Twitter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 The BCMS subtask is focused on geolocated tweets published in the area of Croatia, Bosnia and Herzegovina, Montenegro and Serbia in the so-called BCMS macro-language (ISO acronym HBS, code 639-3). While the independent status of the specific languages is rather disputed, there is significant variation between them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 The DE-AT subtask focuses on Jodel conversations initiated in Germany and Austria, which are written in standard German but commonly contain regional and dialectal forms. Jodel is a mobile chat application that lets people anonymously talk to other users within a 10km-radius around them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 The CH subtask focuses on Jodel conversations from Switzerland, which were found to be held majoritarily in Swiss German dialects. This dataset is considerably smaller, but we expect it to contain more dialect-specific cues than the DE-AT one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The BCMS Twitter dataset is described in . The two Jodel datasets are subsets of the corpus collected by Hovy and Purschke (2018) . Some additional cleaning and filtering steps have been applied to these corpora, and they have been split into training, development and test sets (see Table 4 for key figures). All three subtasks use the same data format: each instance consists of three fields, the unprocessed text of the message (BCMS) or conversation (DE-AT and CH), the latitude coordinate and the longitude coordinate. Figure 1 shows the geographic distribution of training instances. ", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 129, |
| "text": "Hovy and Purschke (2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 284, |
| "end": 291, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 524, |
| "end": 532, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We received submissions from seven teams, with five teams participating in all three subtasks. The HeLju team submitted both unconstrained and constrained systems, whereas all other participants focused on constrained systems (i.e., not using any external training data). The participating systems can be classified into two approaches to geolocation: a direct one which frames the problem as a double regression, and an indirect one which converts the coordinates into a finite set of dialect areas and uses a classification model to predict one of the areas.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "CUBoulder-UBC. This approach is based on earlier work described in Hulden et al. (2015) . It divides each geographic area into a fixed grid and uses a Naive Bayes classifier with bag-of-words features for prediction, together with kernel density estimation to avoid data sparsity. The submissions include a single system, a mean-based ensemble of 10, and a median-based ensemble of 10. HeLju. The HeLju systems (Scherrer and Ljube\u0161i\u0107, 2020) rely on the BERT architecture, where the classification output is replaced by a double regression output. For the constrained submissions (C), the BERT models are trained from scratch using the SMG training data, whereas pre-trained models are used for the unconstrained submissions (UC). The unconstrained submissions named UC ext include additional training data from the development set.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 87, |
| "text": "Hulden et al. (2015)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Piyush Mishra. This submission is based on a bidirectional LSTM that is fed with FastText embeddings (Mishra, 2020) . Latitudes and longitudes are predicted by double regression with quantile loss.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 115, |
| "text": "(Mishra, 2020)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "SUKI. This approach divides each geographic area into a fixed grid with 81 areas and uses a n-gram language model to predict the most likely area (Jauhiainen et al., 2020a ).", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 171, |
| "text": "(Jauhiainen et al., 2020a", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The Linguistadors. These submissions are based on classic regression methods (linear regression, lasso regression, and ridge regression) and rely on TF-IDF weighted input features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "UnibucKernel. The UnibucKernel team (G\u0203man and Ionescu, 2020a) submitted two single systems, a character-level CNN (Zhang et al., 2015) with double regression output, and a Nu-SVR model trained on top of n-gram string kernels (Ionescu et al., 2016). The third system is an ensemble approach based on XGBoost, trained on the predictions provided by the two previously mentioned systems and an LSTMbased one. The LSTM is trained on top of fine-tuned German BERT embeddings.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 135, |
| "text": "(Zhang et al., 2015)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "ZHAW-InIT. The ZHAW-InIT team (Benites et al., 2020) uses unsupervised k-means clustering to infer a set of dialect classes which are then used in a classification architecture. Their systems are based either on SVMs with TF-IDF weighted word and character n-gram features, or on the HELI language modeling architecture (ZHAW-InIT (HELI)). The SVM submission to the CH subtask (ZHAW-InIT (META)) is in fact a meta-classifier combining several SVMs with different features, whereas single SVMs are used for BCMS and DE-AT (ZHAW-InIT (SVM)).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The test set predictions were evaluated on the basis of median and mean distance to the gold coordinate. Submissions are ranked by decreasing median distance, which is the official metric. For comparison, we also mention the distance values obtained from a simple centroid baseline, which predicts the center point (measured on the training data) for each test instance. Results and rankings for the three tasks are presented in Tables 5, 6 , and 7 respectively. Ranks are attributed only to the best-ranked submission of each team.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 429, |
| "end": 440, |
| "text": "Tables 5, 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Median distance Mean distance Table 6 : SMG shared task -DE-AT results. Unconstrained submissions above the horizontal line, constrained ones below.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 37, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rank Team (Run)", |
| "sec_num": null |
| }, |
| { |
| "text": "For the DE-AT and CH subtasks, we also provide a dialect area accuracy measure. These are based on partitions of the areas into different dialectal areas based on previous dialectological research ( 2013; Scherrer and Stoeckle, 2016). 5 Dialect area accuracy represents the percentage of test instances whose predicted coordinates lie inside the same area as the gold coordinates. Rather unsurprisingly, the unconstrained approaches outperform the constrained ones, but only by a small margin in the CH subtask. There is no clear winning approach among the constrained submissions. BERT (HeLju) works well in large-data settings, but underperforms on the CH subtask where classical approaches are more competitive. The ZHAW-InIT and CUBoulder-UBC systems show that a classification strategy with a fixed set of classes can outperform a regression strategy that learns to predict longitudes and latitudes directly. This finding may be due to the fact that social media posts are not randomly scattered across space, but tend to gather around a relatively small number of larger cities and agglomerations.", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 198, |
| "text": "(", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rank Team (Run)", |
| "sec_num": null |
| }, |
| { |
| "text": "More generally, the DE-AT subtask has turned out to be the hardest one: only half of the submitted systems managed to beat the baseline, and unlike in the other subtasks, no system managed to halve the baseline distance. This suggests that the regional features in the DE-AT Jodel corpus are too sparse to be learned reliably.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rank Team (Run)", |
| "sec_num": null |
| }, |
| { |
| "text": "In terms of evaluation measures, the median and mean distances are highly correlated. Dialect area accuracy also yields a similar picture overall, but some differences are noteworthy. For DE-AT, all systems clearly outperform the baseline on this measure, and the ZHAW-InIT (SVM) submission turns out to be much more competitive than the distance measures suggest. This submission confirms its advantage on the CH task, where it is indistinguishable from the unconstrained submissions in terms of dialect area accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rank Team (Run)", |
| "sec_num": null |
| }, |
| { |
| "text": "For the first time at VarDial, we have proposed a dialect geolocation task, in which the prediction outputs are coordinate pairs rather than variety labels. The SMG task attracted a total of seven participants across three subtasks, a number that is comparable with other VarDial tasks in recent years. We received a wide range of technical solutions: solutions based on deep learning as well as traditional machine learning, constrained as well as unconstrained solutions, and regression-based as well as classification-based approaches. The best scores were obtained by unconstrained solutions based on pre-trained BERT models, on all three subtasks. Thanks to its reliance on easily available geolocated messages from social media services, another edition of the SMG task could be envisaged, possibly focusing on different language areas.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The first edition of the ULI shared task was a language identification task focusing on differentiating between minority Uralic languages and distinguishing them from a large number of other languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We define minority Uralic languages as those languages that are not official state languages in the countries where they are spoken. This definition excludes Estonian, Finnish and Hungarian. The remaining Uralic languages are all endangered. They can also be characterized as extremely diverse, at least when it comes to their use and current situation. Some of the languages in the shared task are extinct, while others have very young and varying orthographies that are still becoming established. Nevertheless, the task also includes languages that are widely used in the modern society and have a large online presence. Most of the Uralic languages spoken in Russia are written using the Cyrillic alphabet, often with additional individual characters that differ from the character set used for Russian. Since the Uralic languages form a large and old language family, the varieties in the task are generally far apart from one another. At the same time, the task also contains closely related Uralic languages from individual branches, which share a large percentage of their vocabulary and features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The shared task included a total of 178 languages, of which 29 were Uralic minority languages. The 29 endangered Uralic languages were considered relevant and the 148 languages non-relevant. The ULI task consisted of three tracks using the same training and testing data. The tracks differed from each other in how they were scored.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The motivation behind including the non-relevant languages in the shared task was to simulate the situation we faced when we were automatically searching for minority Uralic languages on the Internet during the Finno-Ugric Languages and the Internet project (Jauhiainen et al., 2015) . The different ways of scoring the tracks was also designed to highlight the inherent difficulties of such a search. The third track did not especially focus on the relevant languages, the second track focused on the relevant languages as a group, and the first track forced the participants to consider even the most rare of the relevant languages.", |
| "cite_spans": [ |
| { |
| "start": 258, |
| "end": 283, |
| "text": "(Jauhiainen et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The first track, ULI-RLE (Relevant languages as equals), considered all the relevant languages equal in value and the aim was to maximize their average F-score. This is important when one is interested in finding rare languages on, for example, the Internet. The F-score was calculated as a macro-averaged F1 score over the relevant languages in the training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The second track, ULI-RSS (Relevant sentences as equals), considered each sentence in the test set that was written in or was predicted to be in a relevant language as equals. When compared with the first track, this track gave less importance to the very rare languages as their precision was not as important when the resulting F-score was calculated. The resulting F-score was calculated as a micro-F1 over the sentences in the test set for both the sentences in the relevant languages and the ones that were predicted to be in relevant languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the first two tracks, there was no difference between the non-relevant languages. All the nonrelevant languages could have been labeled as English in the submissions and it would not have changed the resulting F1-scores. The third track, ULI-178 (All 178 languages as equals), however, did not focus on the 29 relevant languages, but instead the target was to maximize the average F-score over all the 178 languages present in the training set. The ULI shared task, and especially this track, was the language identification shared task with the largest number of languages used so far. The F-score was calculated as a macro-F1 score over all the languages in the training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uralic Language Identification ULI", |
| "sec_num": "6" |
| }, |
| { |
| "text": "For training, we provided texts from the Wanca 2016 corpora (Jauhiainen et al., 2019a) for the relevant languages and from the Leipzig corpora collection (Goldhahn et al., 2012) for the non-relevant languages.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 86, |
| "text": "(Jauhiainen et al., 2019a)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 154, |
| "end": 177, |
| "text": "(Goldhahn et al., 2012)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "The number of lines for the non-relevant languages in the training data varied from 10,000 lines of Cebuano and Corsican to 3 million lines of Indonesian. As relevant language sentences in the test set from the forthcoming Wanca 2017 corpora (Jauhiainen et al., 2020b) , we chose those sentences that were not present in the Wanca 2016 corpora which have been published in the Language Bank of Finland. The sentences for the non-relevant languages were from the Leipzig corpora collection. We did not create a separate set for development so the participants had to decide themselves how to use the given training material for that as well. The dataset used in the ULI shared task, as well as its creation, is described in detail by Jauhiainen et al. (2020b) .", |
| "cite_spans": [ |
| { |
| "start": 242, |
| "end": 268, |
| "text": "(Jauhiainen et al., 2020b)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 733, |
| "end": 758, |
| "text": "Jauhiainen et al. (2020b)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Unfortunately, the ULI shared task had only one team submitting results to the tracks. The NRC team submitted three runs for each of the shared task tracks. All the runs used BERT-related deep neural networks taking sequences of characters as input similar to what the NRC team used when they won the CLI shared task (Jauhiainen et al., 2019b) in the previous VarDial Evaluation Campaign (Bernier-Colborne et al., 2019) . The encoders of the networks were pre-trained on masked language modeling (MLM) and sentence pair classification (SPC) tasks (Devlin et al., 2019) . The third run on each track was using only the information on the training set as opposed to the second run, in which the MLM was also done on the unlabeled test set in order to adapt the model. The first run on each track was a plurality voting ensemble of the six models used in the second and third runs of all the tracks.", |
| "cite_spans": [ |
| { |
| "start": 317, |
| "end": 343, |
| "text": "(Jauhiainen et al., 2019b)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 388, |
| "end": 419, |
| "text": "(Bernier-Colborne et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 547, |
| "end": 568, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Participants and Approaches", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "For the baseline, we used an implementation of the HeLI method equal to the one we used when evaluating language identification methods for 285 languages . The baseline and the NRC teams results are listed in Tables 8, 9, and 10. All the results submitted by the NRC team are well below the baselines. After the shared task results were announced, the NRC team investigated reasons for the low performance of their classifiers and found that the low scores were mostly due to a flaw in the function they used to sample the data for training and evaluation (Bernier-Colborne and Goutte, 2020).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Needless to say, we were not happy that the ULI task attracted the submissions of only one team. The shared tasks at VarDial have historically attracted a good number of submissions but, as previously mentioned, the VarDial Evaluation Campaign 2020 run during the early stages of the COVID-19 pandemic, a period in which significant disruption has been observed in universities and research centers worldwide. This is likely to have precluded more teams from participating. Furthermore, we acknowledge that we did not make the task easy to participate with the larger than normal training sets. The results of the participating team also suggest that the task might have been more difficult than we anticipated. Due to the low number of participants in the shared task and the challenges caused by the COVID-19 pandemic, we have decided to continue accepting submissions until the next edition of the ULI shared task. Thus, we are not yet publishing the gold-labeled test set. Instead, we will set up a web-page 6 with information on how to request the training and the test sets. The web-page will also feature a table with all the results submitted so far.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "In this paper we present the results and findings of the shared tasks organized as part of the VarDial Evaluation Campaign 2020. Three shared tasks were organized this year: Romanian Dialect Identification (RDI), Social Media Variety Geolocation (SMG), and Uralic Language Identification (ULI). Each of these tasks tackled an important challenge in language and dialect identification on different languages and dialects. Furthermore, in these tasks we provided participants with new datasets that will be made freely available to the research community after the competitions. A total of 14 teams submitted runs across the three shared tasks. We included short descriptions for each team's systems in this report and references to all 11 system description papers in Table 1 . A complete description of these systems is available in the system description papers published in the VarDial workshop proceedings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 768, |
| "end": 775, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://github.com/butnaruandrei/MOROCO 4 https://github.com/raduionescu/MOROCO-Tweets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The DE-AT areas are based on those used in(Hovy and Purschke, 2018), augmented with a single area covering the territory of Austria. The CH areas correspond to the 10-cluster solution presented in(Scherrer and Stoeckle, 2016).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://urn.fi/urn:nbn:fi:lb-2020102201", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the shared task participants for their participation, support, and the feedback provided. We further thank the VarDial program committee for reviewing all submissions. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Dialect identification under domain shift: Experiments with discriminating Romanian and Moldavian", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "\u00c7 Agr\u0131 \u00c7\u00f6ltekin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "\u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Dialect identification under domain shift: Experiments with discriminating Romanian and Moldavian. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirec- tional transformers for language understanding. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The birth of Romanian BERT", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "\u015e Tefan", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei-Marius", |
| "middle": [], |
| "last": "Dumitrescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Avram", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "\u015e tefan Daniel Dumitrescu, Andrei-Marius Avram, and Sampo Pyysalo. 2020. The birth of Romanian BERT. In Findings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Building Large Monolingual Dictionaries at the Leipzig Corpora Collection: From 100 to 200 Languages", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Goldhahn", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Eckart", |
| "suffix": "" |
| }, |
| { |
| "first": "Uwe", |
| "middle": [], |
| "last": "Quasthoff", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Goldhahn, Thomas Eckart, and Uwe Quasthoff. 2012. Building Large Monolingual Dictionaries at the Leipzig Corpora Collection: From 100 to 200 Languages. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Discriminating Similar Languages: Evaluations and Explorations", |
| "authors": [ |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "L\u00e9ger", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cyril Goutte, Serge L\u00e9ger, Shervin Malmasi, and Marcos Zampieri. 2016. Discriminating Similar Languages: Evaluations and Explorations. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Combining deep learning and string kernels for the localization of Swiss German tweets", |
| "authors": [ |
| { |
| "first": "Mihaela", |
| "middle": [], |
| "last": "G\u0203man", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu Tudor", |
| "middle": [], |
| "last": "Ionescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihaela G\u0203man and Radu Tudor Ionescu. 2020a. Combining deep learning and string kernels for the localization of Swiss German tweets. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The Unreasonable Effectiveness of Machine Learning in Moldavian versus Romanian Dialect Identification", |
| "authors": [ |
| { |
| "first": "Mihaela", |
| "middle": [], |
| "last": "G\u0203man", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu Tudor", |
| "middle": [], |
| "last": "Ionescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2007.15700" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihaela G\u0203man and Radu Tudor Ionescu. 2020b. The Unreasonable Effectiveness of Machine Learning in Mol- davian versus Romanian Dialect Identification. arXiv preprint arXiv:2007.15700.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Capturing regional variation with distributed place representations and geographic retrofitting", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Purschke", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Hovy and Christoph Purschke. 2018. Capturing regional variation with distributed place representations and geographic retrofitting. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Kernel density estimation for text-based geolocation", |
| "authors": [ |
| { |
| "first": "Mans", |
| "middle": [], |
| "last": "Hulden", |
| "suffix": "" |
| }, |
| { |
| "first": "Miikka", |
| "middle": [], |
| "last": "Silfverberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jerid", |
| "middle": [], |
| "last": "Francom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mans Hulden, Miikka Silfverberg, and Jerid Francom. 2015. Kernel density estimation for text-based geolocation. In Proceedings of AAAI.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning to identify Arabic and German dialects using multiple kernels", |
| "authors": [ |
| { |
| "first": "Tudor", |
| "middle": [], |
| "last": "Radu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Ionescu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Butnaru", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Radu Tudor Ionescu and Andrei Butnaru. 2017. Learning to identify Arabic and German dialects using multiple kernels. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "String kernels for native language identification: Insights from behind the curtains", |
| "authors": [ |
| { |
| "first": "Marius", |
| "middle": [], |
| "last": "Radu Tudor Ionescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Aoife", |
| "middle": [], |
| "last": "Popescu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cahill", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computational Linguistics", |
| "volume": "42", |
| "issue": "3", |
| "pages": "491--525", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Radu Tudor Ionescu, Marius Popescu, and Aoife Cahill. 2016. String kernels for native language identification: Insights from behind the curtains. Computational Linguistics, 42(3):491-525.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The Finno-Ugric Languages and The Internet Project", |
| "authors": [ |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of IWCLUL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heidi Jauhiainen, Tommi Jauhiainen, and Krister Lind\u00e9n. 2015. The Finno-Ugric Languages and The Internet Project. In Proceedings of IWCLUL.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Evaluation of Language Identification Methods Using 285 Languages", |
| "authors": [ |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NoDaLiDa", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommi Jauhiainen, Krister Lind\u00e9n, and Heidi Jauhiainen. 2017. Evaluation of Language Identification Methods Using 285 Languages. In Proceedings of NoDaLiDa.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Wanca in Korp: Text corpora for underresourced Uralic languages", |
| "authors": [ |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Linden", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of RDHUM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heidi Jauhiainen, Tommi Jauhiainen, and Krister Linden. 2019a. Wanca in Korp: Text corpora for underresourced Uralic languages. In Proceedings of RDHUM.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Language and dialect identification of cuneiform texts", |
| "authors": [ |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tero", |
| "middle": [], |
| "last": "Alstola", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommi Jauhiainen, Heidi Jauhiainen, Tero Alstola, and Krister Lind\u00e9n. 2019b. Language and dialect identification of cuneiform texts. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Automatic Language Identification in Texts: A Survey", |
| "authors": [ |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Lui", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "65", |
| "issue": "", |
| "pages": "675--782", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommi Jauhiainen, Marco Lui, Marcos Zampieri, Timothy Baldwin, and Krister Lind\u00e9n. 2019c. Automatic Language Identification in Texts: A Survey. Journal of Artificial Intelligence Research, 65:675-782.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Experiments in language variety geolocation and dialect identification", |
| "authors": [ |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommi Jauhiainen, Heidi Jauhiainen, and Krister Lind\u00e9n. 2020a. Experiments in language variety geolocation and dialect identification. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Uralic Language Identification (ULI) 2020 shared task dataset and the Wanca", |
| "authors": [ |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Niko", |
| "middle": [], |
| "last": "Partanen", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tommi Jauhiainen, Heidi Jauhiainen, Niko Partanen, and Krister Lind\u00e9n. 2020b. Uralic Language Identification (ULI) 2020 shared task dataset and the Wanca 2017 corpora. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Reformer: The Efficient Transformer", |
| "authors": [ |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Kitaev", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Anselm", |
| "middle": [], |
| "last": "Levskaya", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. 2019. Reformer: The Efficient Transformer. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automatic Identification of Closely-related Indian Languages: Resources and Experiments", |
| "authors": [ |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Bornini", |
| "middle": [], |
| "last": "Lahiri", |
| "suffix": "" |
| }, |
| { |
| "first": "Deepak", |
| "middle": [], |
| "last": "Alok", |
| "suffix": "" |
| }, |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr", |
| "suffix": "" |
| }, |
| { |
| "first": "Mayank", |
| "middle": [], |
| "last": "Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdul", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Yogesh", |
| "middle": [], |
| "last": "Basit", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dawar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritesh Kumar, Bornini Lahiri, Deepak Alok, Atul Kr. Ojha, Mayank Jain, Abdul Basit, and Yogesh Dawar. 2018. Automatic Identification of Closely-related Indian Languages: Resources and Experiments. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Strukturen im Sprachraum: Analysen zur arealtypologischen Komplexit\u00e4t der Dialekte in Deutschland", |
| "authors": [ |
| { |
| "first": "Alfred", |
| "middle": [], |
| "last": "Lameli", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alfred Lameli. 2013. Strukturen im Sprachraum: Analysen zur arealtypologischen Komplexit\u00e4t der Dialekte in Deutschland. Walter de Gruyter.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "TweetGeo -a tool for collecting, processing and analysing geo-encoded linguistic data", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Samard\u017ei\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Curdin", |
| "middle": [], |
| "last": "Derungs", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Ljube\u0161i\u0107, Tanja Samard\u017ei\u0107, and Curdin Derungs. 2016. TweetGeo -a tool for collecting, processing and analysing geo-encoded linguistic data. In Proceedings of COLING.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Discriminating between Similar Languages and Arabic Dialect Identification: A Report on the Third DSL Shared Task", |
| "authors": [ |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shervin Malmasi, Marcos Zampieri, Nikola Ljube\u0161i\u0107, Preslav Nakov, Ahmed Ali, and J\u00f6rg Tiedemann. 2016. Discriminating between Similar Languages and Arabic Dialect Identification: A Report on the Third DSL Shared Task. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Geolocation of tweets with a BiLSTM regression model", |
| "authors": [ |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Mishra", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piyush Mishra. 2020. Geolocation of tweets with a BiLSTM regression model. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Applying multilingual and monolingual Transformer-based models for dialect identification", |
| "authors": [ |
| { |
| "first": "Cristian", |
| "middle": [], |
| "last": "Popa", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Vlad", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristian Popa and Vlad S , tef\u0203nescu. 2020. Applying multilingual and monolingual Transformer-based models for dialect identification. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A dual-encoding system for dialect classification", |
| "authors": [ |
| { |
| "first": "Petru", |
| "middle": [], |
| "last": "Rebeja", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Cristea", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petru Rebeja and Dan Cristea. 2020. A dual-encoding system for dialect classification. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "ArchiMob-A corpus of spoken Swiss German", |
| "authors": [ |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Samard\u017ei\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Elvira", |
| "middle": [], |
| "last": "Glaser", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tanja Samard\u017ei\u0107, Yves Scherrer, and Elvira Glaser. 2016. ArchiMob-A corpus of spoken Swiss German. In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "HeLju@VarDial 2020: Social media variety geolocation with BERT models", |
| "authors": [ |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yves Scherrer and Nikola Ljube\u0161i\u0107. 2020. HeLju@VarDial 2020: Social media variety geolocation with BERT models. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A quantitative approach to Swiss German -Dialectometric analyses and comparisons of linguistic levels", |
| "authors": [ |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Stoeckle", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Dialectologia et Geolinguistica", |
| "volume": "1", |
| "issue": "24", |
| "pages": "92--125", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yves Scherrer and Philipp Stoeckle. 2016. A quantitative approach to Swiss German -Dialectometric analyses and comparisons of linguistic levels. Dialectologia et Geolinguistica, 1(24):92-125.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Merging Comparable Data Sources for the Discrimination of Similar Languages: The DSL Corpus Collection", |
| "authors": [ |
| { |
| "first": "Liling", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161ic", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of BUCC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liling Tan, Marcos Zampieri, Nikola Ljube\u0161ic, and J\u00f6rg Tiedemann. 2014. Merging Comparable Data Sources for the Discrimination of Similar Languages: The DSL Corpus Collection. In Proceedings of BUCC.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Language discrimination and transfer learning for similar languages: Experiments with feature combinations and adaptation", |
| "authors": [ |
| { |
| "first": "Nianheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Demattos", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianheng Wu, Eric DeMattos, Kwok Him So, Pin-zhen Chen, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2019. Language discrimi- nation and transfer learning for similar languages: Experiments with feature combinations and adaptation. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Exploring the power of Romanian BERT for dialect identification", |
| "authors": [ |
| { |
| "first": "George-Eduard", |
| "middle": [], |
| "last": "Zaharia", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei-Marius", |
| "middle": [], |
| "last": "Avram", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Dumitru-Clementin Cercel, and Traian Rebedea. 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George-Eduard Zaharia, Andrei-Marius Avram, Dumitru-Clementin Cercel, and Traian Rebedea. 2020. Exploring the power of Romanian BERT for dialect identification. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Findings of the VarDial Evaluation Campaign", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "No\u00ebmi", |
| "middle": [], |
| "last": "Aepli", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Nikola Ljube\u0161i\u0107, Preslav Nakov, Ahmed Ali, J\u00f6rg Tiedemann, Yves Scherrer, and No\u00ebmi Aepli. 2017. Findings of the VarDial Evaluation Campaign 2017. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Language Identification and Morphosyntactic Tagging: The Second VarDial Evaluation Campaign", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "Suwon", |
| "middle": [], |
| "last": "Shon", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Samard\u017ei\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Van Der Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Grondelaers", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Ahmed Ali, Suwon Shon, James Glass, Yves Scherrer, Tanja Samard\u017ei\u0107, Nikola Ljube\u0161i\u0107, J\u00f6rg Tiedemann, Chris van der Lee, Stefan Grondelaers, Nelleke Oostdijk, Dirk Speelman, Antal van den Bosch, Ritesh Kumar, Bornini Lahiri, and Mayank Jain. 2018. Language Identifica- tion and Morphosyntactic Tagging: The Second VarDial Evaluation Campaign. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "A Report on the Third VarDial Evaluation Campaign", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Samard\u017ei\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Tyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Miikka", |
| "middle": [], |
| "last": "Silfverberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Klyueva", |
| "suffix": "" |
| }, |
| { |
| "first": "Tung-Le", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chu-Ren", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [ |
| "Tudor" |
| ], |
| "last": "Ionescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Butnaru", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jauhiainen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of VarDial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Yves Scherrer, Tanja Samard\u017ei\u0107, Francis Tyers, Miikka Silfverberg, Natalia Klyueva, Tung-Le Pan, Chu-Ren Huang, Radu Tudor Ionescu, Andrei Butnaru, and Tommi Jauhiainen. 2019. A Report on the Third VarDial Evaluation Campaign. In Proceedings of VarDial.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Natural Language Processing for Similar Languages, Varieties, and Dialects: A Survey", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Scherrer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Natural Language Engineering", |
| "volume": "26", |
| "issue": "", |
| "pages": "595--612", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Preslav Nakov, and Yves Scherrer. 2020. Natural Language Processing for Similar Languages, Varieties, and Dialects: A Survey. Natural Language Engineering, 26:595-612.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Character-level Convolutional Networks for Text Classification", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "649--657", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level Convolutional Networks for Text Classifica- tion. In Proceedings of NIPS, pages 649-657.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Geographic distribution of training instances in the three SMG datasets: DE-AT (top), CH (bottom left), BCMS (bottom right).", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td colspan=\"5\">: The teams that participated in the VarDial Evaluation Campaign 2020 along with their system</td></tr><tr><td colspan=\"5\">description papers. *The system description paper by team CUBoulder-UBC does not appear in the</td></tr><tr><td colspan=\"5\">VarDial workshop proceedings. CUBoulder-UBC reused a system described in Hulden et al. (2015).</td></tr><tr><td colspan=\"2\">4 Romanian Dialect Identification RDI</td><td/><td/><td/></tr><tr><td>4.1 Dataset</td><td/><td/><td/><td/></tr><tr><td colspan=\"5\">The training data is composed of news articles from the Moldavian and Romanian Dialectal Corpus (MO-</td></tr><tr><td colspan=\"5\">ROCO) 3 (Butnaru and Ionescu, 2019). MOROCO was collected from the top five news websites from</td></tr><tr><td colspan=\"5\">Romania and the Republic of Moldova, using each country's web domain (.ro or .md) to automatically</td></tr><tr><td colspan=\"2\">separate the news articles by dialect.</td><td/><td/><td/></tr><tr><td colspan=\"5\">The test data consists of short text samples from MOROCO-Tweets 4 (G\u0203man and Ionescu, 2020b).</td></tr><tr><td colspan=\"5\">The tweets are collected from Romania and the Republic of Moldova, the labels being assigned based on</td></tr><tr><td colspan=\"2\">the geographical location of tweets.</td><td/><td/><td/></tr><tr><td>Dialect</td><td>Training Set</td><td colspan=\"2\">Development Set Size</td><td>Test Set</td></tr><tr><td/><td>Size</td><td>News Articles</td><td>Tweets</td><td>Size</td></tr><tr><td>Romanian</td><td>18,161</td><td>3,205</td><td>102</td><td>2,523</td></tr><tr><td>Moldavian</td><td>15,403</td><td>2,718</td><td>113</td><td>2,499</td></tr><tr><td>Total</td><td>33,564</td><td>5,923</td><td>215</td><td>5,022</td></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF2": { |
| "content": "<table/>", |
| "text": "Number of text samples in the training, the development and the test sets considered for the RDI shared task.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "text": "SMG datasets.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td colspan=\"2\">Rank Team (Run)</td><td colspan=\"3\">Median distance Mean distance Dialect area accuracy</td></tr><tr><td>1</td><td>HeLju (UC ext)</td><td>143.30</td><td>166.64</td><td>36.1%</td></tr><tr><td/><td>HeLju (UC)</td><td>143.85</td><td>168.45</td><td>34.8%</td></tr><tr><td/><td>HeLju (C)</td><td>159.59</td><td>183.97</td><td>29.5%</td></tr><tr><td>2</td><td>Piyush Mishra</td><td>183.99</td><td>204.93</td><td>21.8%</td></tr><tr><td>3</td><td>CUBoulder-UBC (mean ens.)</td><td>198.27</td><td>218.51</td><td>25.1%</td></tr><tr><td/><td>Centroid baseline</td><td>201.34</td><td>221.55</td><td>17.7%</td></tr><tr><td>4</td><td>ZHAW-InIT (SVM)</td><td>205.81</td><td>230.78</td><td>27.7%</td></tr><tr><td/><td>CUBoulder-UBC (median ens.)</td><td>214.72</td><td>235.62</td><td>25.4%</td></tr><tr><td/><td>ZHAW-InIT (HELI)</td><td>217.80</td><td>241.33</td><td>19.5%</td></tr><tr><td/><td>CUBoulder-UBC (single)</td><td>219.08</td><td>239.47</td><td>25.7%</td></tr><tr><td>5</td><td>SUKI</td><td>243.12</td><td>266.85</td><td>24.4%</td></tr></table>", |
| "text": "SMG shared task -BCMS results. Unconstrained submissions above the horizontal line, constrained ones below.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF9": { |
| "content": "<table/>", |
| "text": "CH results. Unconstrained submissions above the horizontal line, constrained ones below.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF11": { |
| "content": "<table><tr><td colspan=\"2\">Rank Team</td><td colspan=\"2\">Run Method</td><td>Relevant micro F 1</td></tr><tr><td/><td>baseline</td><td/><td>HeLI</td><td>0.9632</td></tr><tr><td>1</td><td>NRC</td><td>1</td><td>ensemble of 6 deep neural networks</td><td>0.2596</td></tr><tr><td/><td>NRC</td><td>2</td><td>deep neural network with adaptation to the test set</td><td>0.1547</td></tr><tr><td/><td>NRC</td><td>3</td><td>deep neural network</td><td>0.1359</td></tr></table>", |
| "text": "ULI shaed task -RLE results.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF12": { |
| "content": "<table><tr><td colspan=\"2\">Rank Team</td><td colspan=\"2\">Run Method</td><td>Macro F 1</td></tr><tr><td/><td>baseline</td><td/><td>HeLI</td><td>0.9252</td></tr><tr><td>1</td><td>NRC</td><td>2</td><td>deep neural network with adaptation to the test set</td><td>0.6751</td></tr><tr><td/><td>NRC</td><td>3</td><td>deep neural network</td><td>0.6628</td></tr><tr><td/><td>NRC</td><td>1</td><td>ensemble of 6 deep neural networks</td><td>0.6356</td></tr></table>", |
| "text": "ULI shared task -RSS results.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF13": { |
| "content": "<table/>", |
| "text": "ULIshared task -178 results.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |