Add Batch ce6f78c6-9ab8-4d60-ae82-2bd8342dcfe1
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_content_list.json +3 -0
- 2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_model.json +3 -0
- 2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_origin.pdf +3 -0
- 2kenizetyingsubwordsequencesforchinesescriptconversion/full.md +460 -0
- 2kenizetyingsubwordsequencesforchinesescriptconversion/images.zip +3 -0
- 2kenizetyingsubwordsequencesforchinesescriptconversion/layout.json +3 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_content_list.json +3 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_model.json +3 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_origin.pdf +3 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/full.md +439 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/images.zip +3 -0
- abatchnormalizedinferencenetworkkeepstheklvanishingaway/layout.json +3 -0
- acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_content_list.json +3 -0
- acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_model.json +3 -0
- acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_origin.pdf +3 -0
- acallformorerigorinunsupervisedcrosslinguallearning/full.md +306 -0
- acallformorerigorinunsupervisedcrosslinguallearning/images.zip +3 -0
- acallformorerigorinunsupervisedcrosslinguallearning/layout.json +3 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_content_list.json +3 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_model.json +3 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_origin.pdf +3 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/full.md +181 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/images.zip +3 -0
- acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/layout.json +3 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_content_list.json +3 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_model.json +3 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_origin.pdf +3 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/full.md +280 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/images.zip +3 -0
- acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/layout.json +3 -0
- acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_content_list.json +3 -0
- acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_model.json +3 -0
- acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_origin.pdf +3 -0
- acorpusforlargescalephonetictypology/full.md +373 -0
- acorpusforlargescalephonetictypology/images.zip +3 -0
- acorpusforlargescalephonetictypology/layout.json +3 -0
- activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_content_list.json +3 -0
- activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_model.json +3 -0
- activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_origin.pdf +3 -0
- activeimitationlearningwithnoisyguidance/full.md +404 -0
- activeimitationlearningwithnoisyguidance/images.zip +3 -0
- activeimitationlearningwithnoisyguidance/layout.json +3 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_content_list.json +3 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_model.json +3 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_origin.pdf +3 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/full.md +362 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/images.zip +3 -0
- activelearningforcoreferenceresolutionusingdiscreteannotation/layout.json +3 -0
- adaptivecompressionofwordembeddings/616ae366-0520-4bac-92fe-f6a591154b5a_content_list.json +3 -0
- adaptivecompressionofwordembeddings/616ae366-0520-4bac-92fe-f6a591154b5a_model.json +3 -0
2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ea0f615c5b3daa3308f7bc3c3d8505fd4d4e301355bf5dddba14224a34c2fa9
|
| 3 |
+
size 112517
|
2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2a135e0268a0623e2991143d5b7a4569e18ab9a4171f2b060d13c5107706589
|
| 3 |
+
size 139359
|
2kenizetyingsubwordsequencesforchinesescriptconversion/5e2eb333-d3dc-4384-b313-9fcdcdebb6e1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:afcefa6d41805c4e1827f8d3b1a9b274c5dbfdbc2cd29f98811fb225c537dd53
|
| 3 |
+
size 633984
|
2kenizetyingsubwordsequencesforchinesescriptconversion/full.md
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 2kenize: Tying Subword Sequences for Chinese Script Conversion
|
| 2 |
+
|
| 3 |
+
Pranav A 徒 and Isabelle Augenstein
|
| 4 |
+
|
| 5 |
+
$^{1}$ Independent Researcher, Hong Kong
|
| 6 |
+
|
| 7 |
+
$^{\text{師}}$ Department of Computer Science, University of Copenhagen, Denmark
|
| 8 |
+
|
| 9 |
+
cs.pranav.a{at}gmail.com,augenstein{at}di.ku.dk
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Simplified Chinese to Traditional Chinese character conversion is a common preprocessing step in Chinese NLP. Despite this, current approaches have insufficient performance because they do not take into account that a simplified Chinese character can correspond to multiple traditional characters. Here, we propose a model that can disambiguate between mappings and convert between the two scripts. The model is based on subword segmentation, two language models, as well as a method for mapping between subword sequences. We further construct benchmark datasets for topic classification and script conversion. Our proposed method outperforms previous Chinese Character conversion approaches by 6 points in accuracy. These results are further confirmed in a downstream application, where 2kenize is used to convert pretraining dataset for topic classification. An error analysis reveals that our method's particular strengths are in dealing with code mixing and named entities. The code and dataset is available at https://github.com/pranav-ust/2kenize
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Chinese character (or script) conversion is a common preprocessing step for Chinese NLP practitioners (Zhang, 2014; Shi et al., 2011). Traditional Chinese (TC) and Simplified Chinese (SC) are the two standardized character sets (or scripts) for written Chinese. TC is predominantly used in Taiwan, Hong Kong, and Macau, whereas SC is mainly adopted in mainland China and SC characters are simplified versions of TC characters in terms of strokes and parts. Therefore, Chinese NLP practitioners apply script converters<sup>1</sup> to translate the
|
| 18 |
+
|
| 19 |
+
<table><tr><td>SC Sentence</td><td>维护发展中国家共同利益</td><td>Comments</td></tr><tr><td>Segmentation</td><td>维护发展中国家共同利益</td><td>护发: haircare</td></tr><tr><td>Conversion</td><td>維護髮展中國家共同利益</td><td>× Conversion</td></tr><tr><td>Segmentation</td><td>维护发展中国家共同利益</td><td>发展: develop</td></tr><tr><td>Conversion</td><td>維護發展中國家共同利益</td><td>✓ Conversion</td></tr></table>
|
| 20 |
+
|
| 21 |
+
Table 1: Example sentence with two different segmentations, and resulting different conversions. The sentence translates to Safeguarding the common interests of developing countries. This is a recurring example in this paper. Also refer §F.5.
|
| 22 |
+
|
| 23 |
+
dataset into their desired language. This is especially useful for TC NLP practitioners because TC is less widely used and under-resourced as compared to SC.
|
| 24 |
+
|
| 25 |
+
Converting from TC to SC is generally straightforward because there are one-to-one correspondences between most of the characters, so conversion can be performed using mapping tables (Denisowski, 2019; Chu et al., 2012). However, conversion from SC to TC is an arduous task as some SC characters can be mapped to more than one TC character depending on the context of the sentence. A detailed analysis by Halpern and Kerman (1999) shows that SC to TC conversion is a challenging and crucial problem, as $12\%$ of SC characters have one-to-many mappings to TC characters. Our experiments show that current script converters achieve sentence accuracy results of $55 - 85\%$ ( $\S 3$ ).
|
| 26 |
+
|
| 27 |
+
Another issue is that varying tokenization would lead to different results as Chinese is an unsegmented language, see Table 1 for an example. Off-the-shelf script converters would translate 维护发展中国家共同利益 into 維護髮展中國家共同利益,² whereas the correct conversion is 維
|
| 28 |
+
|
| 29 |
+
護發展中國家共同利益. Here, the SC character 发 (hair; issue) has two TC mappings, 髖 (hair, issue) and 發 (hair; issue), depending on the context and tokenization; which shows that this task is non-trivial.
|
| 30 |
+
|
| 31 |
+
Despite this being an important task, there is a lack of benchmarks, $^{3}$ which implies that this problem is understudied in NLP. In this study, we propose 2kenize, a subword segmentation model which jointly considers Simplified Chinese and forecasting Traditional Chinese constructions. We achieve this by constructing a joint Simplified Chinese and Traditional Chinese language model based Viterbi tokenizer. Performing mapping disambiguation based on this tokenization method improves sentence accuracy by 6 points as compared to off-the-shelf converters and supervised models. Our qualitative error analysis reveals that our method's particular strengths are in dealing with code-mixing and named entities. Additionally, we address the issue of a lack of benchmark datasets by constructing datasets for script conversion and TC topic classification.
|
| 32 |
+
|
| 33 |
+
# 2 2kenize: Joint Segmentation and Conversion
|
| 34 |
+
|
| 35 |
+
We employ subword tokenization, as it addresses the issue of rare and unknown words (Mikolov et al., 2012) and has been shown advantageous for the language modelling of morphologically-rich languages (Czapla et al., 2018; Mielke and Eisner, 2019). This achieves improvements in accuracy for neural machine translation (NMT) tasks and has now become a prevailing practice (Denkowski and Neubig, 2017). The most widely-utilized method is Byte Pair Encoding (BPE, Sennrich et al. (2016)), a compression algorithm that combines frequent sequences of characters, which results in rare strings being segmented into subwords. Unigram (Kudo, 2018) and BPE-Drop (Provilkov et al., 2019) use subword ambiguity as noise, as well as stochastically-corrupted BPE segmentation to make it less deterministic. For NMT tasks generally, subword segmentation is seen as a monolingual task and applied independently on source and target corpora. We hypothesize that translation tasks, and specifically conversion tasks, as investigated here, would have a bet-
|
| 36 |
+
|
| 37 |
+
ter performance if segmentation were performed jointly. Hence, in this section, we describe our proposed method 2kenize, which jointly segments by taking the source and its approximate target sentences into account. This motivates the main idea of this paper: We propose 2kenize which jointly considers the source sentence and its corresponding target conversions by doing lookaheads with mappings.
|
| 38 |
+
|
| 39 |
+
# 2.1 Outline of the proposed approach
|
| 40 |
+
|
| 41 |
+
Given the possible SC character sequence $\mathbf{s} = s_1s_2\ldots s_n$ and TC character sequence $\mathbf{t} = t_1t_2\ldots t_n$ , we want to find the most likely $\mathbf{t}$ , which is given by the Bayes decision rule as follows:
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
\mathbf {t} = \underset {\mathbf {t} ^ {\prime} \in T ^ {*}} {\arg \max } p (\mathbf {s}, \mathbf {t} ^ {\prime}) \tag {1}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
where $T^{*}$ denotes the set of all strings over symbols $(t_{i})$ in $T$ (Kleene star). We divide this problem into two parts: finding the mapping sequence (2) and finding the TC sequence from mappings (7).
|
| 48 |
+
|
| 49 |
+
We define a mapping, which is given by $m_{i} = (\mathfrak{s}_{i},\mathfrak{t}_{i}) = (s_{j:k},\mathfrak{t}_{j:k})$ . Here, $\mathfrak{t}_{j:k} = \{t_{j:k}^{1}\ldots t_{j:k}^{n}\}$ is a set of TC characters that correspond to the SC character in the mapping. Thus, a mapping sequence can be defined as a concatenation of mappings, which is $\mathbf{m} = m_1m_2\dots m_l$ . Let $\mathcal{M}$ be the superset of all possible mapping sequences and $\mathcal{M}(\mathbf{s})$ be the all mapping sequences resulting from $\mathbf{s}$ . Then, the best possible mapping sequence is given by
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
\mathbf {m} = \underset {\mathbf {m} ^ {\prime} \in \mathcal {M} (\mathbf {s})} {\arg \max } p \left(\mathbf {m} ^ {\prime}\right) \tag {2}
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
Moreover, $p(\mathbf{m})$ can be expanded as such:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
\begin{array}{l} p (\mathbf {m}) = p \left(m _ {1} m _ {2} \dots m _ {l}\right) (3) \\ = p \left( \begin{array}{l l l l} \mathfrak {s} _ {1} & \mathfrak {s} _ {2} & \dots & \mathfrak {s} _ {l} \\ \mathfrak {t} _ {1} & \mathfrak {t} _ {2} & \dots & \mathfrak {t} _ {l} \end{array} \right) (4) \\ \approx p \left(\mathfrak {s} _ {1} \mathfrak {s} _ {2} \dots \mathfrak {s} _ {l}\right) + p \left(\mathrm {t} _ {1} \mathrm {t} _ {2} \dots \mathrm {t} _ {l}\right) (5) \\ = p _ {L M} \left(\mathfrak {s} _ {1: l}\right) + \sum_ {t \in \prod_ {i} t _ {i}} p _ {L M} \left(t _ {1: l}\right) (6) \\ \end{array}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
After expanding the mapping sequences (4), we take an approximation by estimating this as the sum of likelihoods of two sequences formed due to co-segmentations (5). The set of possible TC sequences is given by the Cartesian product of $t_i$ . These likelihoods can then be estimated using language model (LM) probabilities as shown in (6).
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
\mathbf {t} = \underset {\mathbf {t} ^ {\prime} \in \mathbf {m} _ {\mathbf {t}}} {\arg \max } p \left(\mathbf {t} ^ {\prime}\right) \tag {7}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
Figure 1: Language model architecture with subword and subsequence sampling. (Alt text: §F.1).
|
| 69 |
+
|
| 70 |
+
Once the mapping sequence $\mathbf{m}$ has been found, all possible TC sequences are found from the set $\mathbf{m}_{\mathbf{t}}$ which is the Cartesian product for all $t_i$ in $\mathbf{m}$ . From (7), we calculate approximate final sequence using beam search.
|
| 71 |
+
|
| 72 |
+
# 2.2 Model Architecture
|
| 73 |
+
|
| 74 |
+
Viterbi, a dynamic programming (DP) algorithm, considers phrases (or subsequences) and performs segmentation in a 'bottom-up' fashion (Nagata, 1994; Sproat et al., 1996). RNN-based language models are theoretically considered to be ' $\infty$ ’-gram (Khandelwal et al., 2018), which constitutes a challenge. Consider this sentence,维护发展中国家共同利益. A potential challenge could be to adequately estimate the probability of共同利益. As this sequence occurs infrequently in the beginning of sentences in the corpus, an RNN would under-estimate the probability of this subsequence. Moreover, an RNN would likely lose some useful context and perform worse without it (Kim et al., 2019). So for Viterbi to perform well with an RNN, we train the language model on subsequences. We approach this by training our model in such a way that it samples subsequences ran
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
Figure 2: From the given SC sentence, we create possible TC sequences using mappings. We input these to Viterbi, which recursively calls LSTM. Using Eq. (6) as the scoring function, Viterbi outputs the mapping sequence. We perform beam search to find the best TC sequence from the mapping sequence. (Alt text: §F.2).
|
| 78 |
+
|
| 79 |
+
domly in each epoch. As shown in Fig 1, we randomly split the sentence and use subsequences in separate epochs.
|
| 80 |
+
|
| 81 |
+
Using Kudo (2018) regularization method, we sample from the $n$ -best segmentations in each epoch. This is done so that the model can understand different segmentations of a subsequence using a similar motivation as above. Recent works have shown that varying subword segmentations lead to a better downstream model performance (Provilkov et al., 2019; Kudo, 2018; Hiraoka et al., 2019); therefore, we use it as a data augmentation strategy. Once we get the $n$ -best segmentations with scores, we normalize them, and then use the normalized scores as sampling probabilities (see Fig 1). As opposed to other subword tokenizers where the vocabulary size is fixed, we do not limit the vocabulary in our model. Hence, there are numerous possibilities of segment combinations which raises a need of caching most frequent tokens. Inspired by the work related to cache-based LMs (Kawakami et al., 2017) and ghost batches (Hoffer et al., 2017), we only consider the top- $k$ tokens in the main network memory and keep track of gradients of less recently used token embeddings (commonly known as LRU, Least Recently Used policy). This could be thought of as
|
| 82 |
+
|
| 83 |
+
<table><tr><td></td><td>HK Literature</td><td>HK News</td><td>TW Literature</td><td>TW News</td></tr><tr><td rowspan="3">Sources</td><td>Liu (1962)</td><td>Singpao (2017-2018)</td><td>Jiubadao (2011)</td><td>AS subset Emerson (2005)</td></tr><tr><td>Lau Yee (1972)</td><td>Mingpao (2017-2018)</td><td>Ko (2010)</td><td>Liberty Times (2017-2018)</td></tr><tr><td>Foon (1988)</td><td>CityU subset Emerson (2005)</td><td>Yao (1964)</td><td>United Daily News (2017-2018)</td></tr><tr><td>Average Length</td><td>194.8</td><td>214.6</td><td>188.2</td><td>223.6</td></tr><tr><td>IAA</td><td>0.982</td><td>0.979</td><td>0.981</td><td>0.971</td></tr><tr><td>Mapping Examples</td><td>干-[幹,乾,干,幹]
|
| 84 |
+
須-[須,髒]</td><td>苏-[蘇,噉,甦]
|
| 85 |
+
暗-[暗,闊]</td><td>复-[復,複,覆]
|
| 86 |
+
叹-[嘆,歎]</td><td>胡-[胡,衛,鬍]
|
| 87 |
+
迹-[蹟,跡]</td></tr></table>
|
| 88 |
+
|
| 89 |
+
Table 2: An overview of the dataset used for intrinsic evaluation. We report sources, average character lengths and sentence level inter-annotator agreements (IAA, reported in $\kappa$ ) and some examples of ambiguous SC-TC mappings.
|
| 90 |
+
|
| 91 |
+
virtual embeddings as delayed gradient accumulation allows to accommodate larger number of tokens. This virtual size embedding architecture is related to the continuous cache implementation and stochastic tokenization architectures (Grave et al., 2016; Hiraoka et al., 2019).
|
| 92 |
+
|
| 93 |
+
# 2.3 Segmentation and Disambiguation
|
| 94 |
+
|
| 95 |
+
This optimal sequencing problem can be formulated as an overlapping subsequence approach, which can be solved using LM based Viterbi (Nagata, 1994; Sproat et al., 1996). Fig. 2 explains this process of joint subword modelling. Here, we take Eq. (6) as the objective function for finding the mapping sequence, however, we use subword perplexities (Cotterell et al., 2018; Mielke et al., 2019; Mielke, 2019) in our implementation. For the TC LSTM, we add the probabilities of the beams of the possible sequences.
|
| 96 |
+
|
| 97 |
+
As discussed in §2.1 and Eq. (7), beam search is needed to select the best subword sequence for TC. Once the sentences are tokenized, the mapping table is used to convert each SC token to the corresponding TC token. We extract the final TC sentence by resolving ambiguities through beam search using the TC LSTM (Fig. 2).
|
| 98 |
+
|
| 99 |
+
# 3 Intrinsic Evaluation
|
| 100 |
+
|
| 101 |
+
# 3.1 Dataset for Intrinsic Evaluation
|
| 102 |
+
|
| 103 |
+
We construct a gold standard corpus for both Chinese scripts consisting of 4 domains: HK Literature and Newswire, and Taiwanese Literature and Newswire (Table 2) with each domain containing 3000 sentences. SC-TC mapping tables are constructed from existing resources (Denisowski, 2019; Chu et al., 2012). We heuristically convert selected TC sentences to SC using OpenCC. We asked the annotators to manually correct any incorrect conversions. $^{4}$
|
| 104 |
+
|
| 105 |
+
# 3.2 Language Model Training
|
| 106 |
+
|
| 107 |
+
We choose the SIGHAN-2005 Bakeoff dataset to train the segmentation-based language model (Emerson, 2005). For SC, we select the PKU and MSR partitions, and for TC, we use the Academia Sinica and CityU partitions. We apply maximal matching (or heuristic dictionary-based word segmenter) to pre-process these datasets by segmenting words into subwords (Wong and Chan, 1996). Here, 'dictionary' refers to the word-list in the mapping table. We then train a 2-layer LSTM language model LSTM with tied weights, and embedding and hidden sizes of 512 (Sundermeyer et al., 2012) on this segmented dataset with subsequence sampling and stochastic tokenization as discussed in $\S 2.2$ .
|
| 108 |
+
|
| 109 |
+
# 3.3 Baselines and Ablations
|
| 110 |
+
|
| 111 |
+
We implement the following baselines for the experimentation:
|
| 112 |
+
|
| 113 |
+
Off-the-shelf Converters: Hanziconv $^{6}$ and Mafan $^{7}$ are dictionary-based script character converters. Evaluating this could be useful to understand the lower accuracy bound. OpenCC $^{8}$ uses a hybrid of characters and words (specifically trie based tokenizer) for script conversion (Pranav A et al., 2019).
|
| 114 |
+
|
| 115 |
+
Language Model Disambiguation: A strong baseline to this problem would be to build a language model to disambiguate between the characters, which is quite similar to STCP (Xu et al., 2017). We use a 2-layer LSTM language model trained on Traditional Chinese corpus.
|
| 116 |
+
|
| 117 |
+
Neural Sequence Models: We heuristically convert Traditional Chinese Wikipedia to Simplified Chinese using OpenCC and use it for training the seq2seq model (Sutskever et al., 2014). We
|
| 118 |
+
|
| 119 |
+
<table><tr><td rowspan="2">Conversion System</td><td colspan="2">HK Lit</td><td colspan="2">HK News</td><td colspan="2">TW Lit</td><td colspan="2">TW News</td><td colspan="2">Overall</td></tr><tr><td>DED</td><td>SA</td><td>DED</td><td>SA</td><td>DED</td><td>SA</td><td>DED</td><td>SA</td><td>DED</td><td>SA</td></tr><tr><td>Dictionary based conversion, Hanziconv</td><td>34.1</td><td>54.7</td><td>37.7</td><td>59.1</td><td>31.3</td><td>60.0</td><td>39.3</td><td>58.9</td><td>34.2</td><td>55.6</td></tr><tr><td>Dictionary based conversion, Mafan</td><td>14.7</td><td>71.2</td><td>17.7</td><td>72.5</td><td>14.5</td><td>73.8</td><td>13.3</td><td>72.7</td><td>14.4</td><td>72.6</td></tr><tr><td>Trie dictionary based conversion, OpenCC</td><td>5.5</td><td>87.3</td><td>5.1</td><td>83.4</td><td>4.1</td><td>84.7</td><td>3.8</td><td>88.5</td><td>4.3</td><td>85.3</td></tr><tr><td>Language Model Disambiguation, STCP</td><td>6.3</td><td>85.6</td><td>5.4</td><td>79.9</td><td>4.7</td><td>84.1</td><td>5.2</td><td>83.9</td><td>5.3</td><td>84.0</td></tr><tr><td>Convolutional Sequence Models</td><td>6.7</td><td>85.8</td><td>5.3</td><td>79.3</td><td>4.8</td><td>84.5</td><td>5.2</td><td>83.9</td><td>5.4</td><td>84.4</td></tr><tr><td>2kenize with word tokenization</td><td>11.2</td><td>84.3</td><td>12.1</td><td>81.3</td><td>11.3</td><td>82.1</td><td>10.0</td><td>81.1</td><td>11.5</td><td>82.7</td></tr><tr><td>2kenize with maximal matching</td><td>5.2</td><td>88.7</td><td>3.3</td><td>93.1</td><td>4.0</td><td>88.6</td><td>4.8</td><td>87.7</td><td>4.5</td><td>88.9</td></tr><tr><td>2kenize with Unigram subwords</td><td>3.4</td><td>91.9</td><td>3.8</td><td>90.9</td><td>4.3</td><td>88.1</td><td>3.9</td><td>87.8</td><td>3.7</td><td>89.3</td></tr><tr><td>2kenize with joint LSTM modelling</td><td>2.8</td><td>94.9</td><td>3.1</td><td>93.7</td><td>3.8</td><td>91.3</td><td>2.9</td><td>91.9</td><td>3.0</td><td>92.4</td></tr></table>
|
| 120 |
+
|
| 121 |
+
Table 3: Results of the intrinsic evaluation experiments which are reported as a mean across 10 different seeds. We use disambiguation error density (DED, the lower, the better) and sentence accuracy (SA, the higher the better) metrics for evaluation. **Bold:** best, **Underlined:** second-best.
|
| 122 |
+
|
| 123 |
+
construct a 20-layer neural convolutional sequence model (Gehring et al., 2017) (both in encoder and decoder) using fairseq (Ott et al., 2019).
|
| 124 |
+
|
| 125 |
+
We perform ablation tests by inserting following segmentation models.
|
| 126 |
+
|
| 127 |
+
Word tokenization: We use Jieba, which is a commonly used hidden markov model based word tokenizer for Chinese NLP.
|
| 128 |
+
|
| 129 |
+
Dictionary substrings: We apply maximal string matching, which is a dictionary based greedy tokenizer (Pranav A et al., 2019; Wong and Chan, 1996).
|
| 130 |
+
|
| 131 |
+
Unigram from Sentencepiece: Subword segmentation is performed by sampling unigram language model perplexity values (Kudo, 2018).
|
| 132 |
+
|
| 133 |
+
Joint subwords: As discussed in §2.3, we use joint SC-TC subwords.
|
| 134 |
+
|
| 135 |
+
# 3.4 Results for Intrinsic Evaluation
|
| 136 |
+
|
| 137 |
+
We evaluate our models using the metrics of disambiguation error density (DED) and sentence accuracy (SA). DED is the average of total edit distances per 1000 ambiguous Simplified characters, which is $\frac{\sum\text{edit distances}}{\sum\text{ambiguous Simplified characters}} \times 1000$ . SA is the number of sentences correctly converted in percentages. Contrary to previous papers, we do not report character based accuracy values, as generally most characters have straightforward mappings — a reason why we opt for a less forgiving metric like SA where every character in a sentence has to be correctly converted.
|
| 138 |
+
|
| 139 |
+
Results are shown in Table 3, broken down by domain, and overall. Our model attains an average DED of 3.0 and a SA of $92.4\%$ overall, whereas the best existing converter, OpenCC, only achieves a DED of 4.3 and a SA of $85.3\%$ . We
|
| 140 |
+
|
| 141 |
+
find that seq2seq and LM based disambiguation perform almost on par with OpenCC, due to the large number of false positive errors by these models. Jieba achieves an average DED of 11.2 as it does not handle OOV words well. For maximal matching of segmented words and Unigram subwords, it achieves an overall DED of 4.5 and 3.7, respectively — showing that joint segmentation yields better results. We observe that accuracy values are slightly worse on news text, due to the relatively high number of new entities in those datasets. We find that seq2seq and LM based disambiguation gives rise to many false positives. Heuristically converting TC to SC results in certain conversion errors in the training dataset; and additionally, seq2seq approaches tend to reword the target sentence, which shows that they are unsuitable for this task.
|
| 142 |
+
|
| 143 |
+
# 3.5 Qualitative Error Analysis
|
| 144 |
+
|
| 145 |
+
We manually inspect incorrect conversions in the intrinsic evaluation and find four interesting recurring linguistic patterns which confused the converters. We instructed the annotators to classify the items in the dataset (overall 12000 sentences in intrinsic evaluation dataset) if the sentences contain any of these patterns. In Table 4, we provide an overview of statistical information of these patterns and the performance by the converters.
|
| 146 |
+
|
| 147 |
+
Code mixing: Vernacular Cantonese characters (zh-yue) are a subset of TC characters but do not follow the norms of the standard written Chinese (Snow, 2004). We find that some of the sentences in our dataset are code-mixed with zh-yue (e.g. speech transcription) or English (e.g. named entities). Consider the snippet, "...古惑架 BENZ 190E 撞埋支...", which is code-mixed with
|
| 148 |
+
|
| 149 |
+
<table><tr><td>Case</td><td>Method</td><td>SA</td><td>Example</td></tr><tr><td rowspan="2">Code mixing with</td><td></td><td></td><td>肯尼迪咾多囉做,掂唔掂呀?</td></tr><tr><td></td><td></td><td>With so much to do in Kennedy, can you handle it?</td></tr><tr><td>Cantonese</td><td>OpenCC</td><td>20.5</td><td>肯尼迪咾多囉做,掂唔掂呀?</td></tr><tr><td rowspan="2">(34 cases, 0.3%)</td><td>STCP</td><td>8.8</td><td>肯尼迪咾多囉做,掂唔掂呀?</td></tr><tr><td>2kenize</td><td>91.1</td><td>甘迺迪咾多囉做,掂唔掂呀?</td></tr><tr><td>Code mixing with English</td><td>OpenCC</td><td>95.6</td><td>自从我捲住大古惑架 BENZ 190E 撞埋支电灯柱嚱度之後, After I drove Slick's Benz 190E into the telephone pole,</td></tr><tr><td rowspan="2">(1532 cases, 12.8%)</td><td>STCP</td><td>86.5</td><td>自從我捲住大古惑架 BENZ 190E 撞埋支電燈柱嚱度之後, 自從我捲住大古惑架 BENZ 190E 撞埋支電燈柱嚱度之后,</td></tr><tr><td>2kenize</td><td>98.7</td><td>自從我捲住大古惑架 BENZ 190E 撞埋支電燈柱嚱度之後,</td></tr><tr><td>Disguised</td><td></td><td></td><td>维护发展中国家共同利益</td></tr><tr><td>Named</td><td></td><td></td><td>Safeguard the common interests of developing countries</td></tr><tr><td>Entities</td><td>OpenCC</td><td>85.7</td><td>維護髮展中國家共同利益</td></tr><tr><td rowspan="2">(378 cases, 3.15%)</td><td>STCP</td><td>82.1</td><td>維護髮展中國家共同利益</td></tr><tr><td>2kenize</td><td>93.2</td><td>維護發展中國家共同利益</td></tr><tr><td>Repeated</td><td></td><td></td><td>乔治亞来到了乔治亞洲旅游</td></tr><tr><td>Named</td><td></td><td></td><td>Georgia came to Georgia for travelling.</td></tr><tr><td>Entities</td><td>OpenCC</td><td>84.4</td><td>佐治亞來到了佐治亞洲旅遊</td></tr><tr><td rowspan="2">(428 cases, 3.57%)</td><td>STCP</td><td>17.9</td><td>佐治亞來到了喬治亞洲旅遊</td></tr><tr><td>2kenize</td><td>87.8</td><td>喬治亞來到了喬治亞洲旅遊</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 4: Casewise breakdown of common errors. The first sentence is SC, second is the English translation and rest are TC outputs from the converters.
|
| 152 |
+
|
| 153 |
+
both zh-yue and English. The characters "BENZ 190E", 架 and 埋支 are not a part of standard written Chinese. We find that OOV words are 2kenized into single-character tokens which results in: "古 惑 | 架 | B|E|N|Z| 1|9|0|E| 撞 | 埋 | 支" Thus, 2kenize distributes the entropy over multiple tokens rather than a single token (generally UNK is used in such cases). This allows the language model to have more space for multiple guesses, which shows a massive advantage over word models or just UNK-ing it, a reason why subword tokenizers outperform closed-vocabulary models (Merit, 2019).
|
| 154 |
+
|
| 155 |
+
Disguised Named Entities: Take the recurring sentence: "维护发展中国家共同利益". Observe that the sentence contains a frequent word 中国 (China). However, the actual meaning and English translation do not include "China" at all. This is an interesting linguistic trait of Chinese, where words often appear in the sentence, but are not being interpreted. This could easily trip up a tokenizer, as the probability of 中国 being a token independently is high. Having 中国 as a separate token in the sentence could lead into an incorrect conversion (Table 1). We find in 2kenizer's trellis<sup>10</sup> that "维护 | 发展 | 中" has a higher probability than other possible segmentations. Substructure lookups and beam search in our setup considerably reduces the probability of getting wrong tokenization. The sen
|
| 156 |
+
|
| 157 |
+
tence is 2kenized into “维护 | 发展 | 中 | 国家 | 共同 | 利益”, which results in the correct conversion –維護發展中國家共同利益.
|
| 158 |
+
|
| 159 |
+
Repetitions: We find that in $3.57\%$ of sentences, named entities are repeated. Interestingly, STCP, which uses a language model for disambiguation, often only converts one out of the repeated tokens correctly, which we can see in the table. As also shown, STCP prefers 佐治亞 over 喬治亞 in the first occurrence, but then prefers 喬治亞11 in the second occurrence as it gets more context. 2kenize converts both of the entities correctly, very likely due to substructure lookups.
|
| 160 |
+
|
| 161 |
+
Failure Cases: Dictionary-based converters (OpenCC, HanziConv and Mafan) only use the first conversion candidate<sup>12</sup> if multiple candidates are available. STCP often converts named entities wrongly, especially the ones which have long-range dependencies and repetitions. Although we find that 2kenize converts some of the unseen named entities perfectly, some of the errors caused were due to infrequent characters. Few cases are mainly related to variant characters<sup>13</sup> which are often used interchangeably.
|
| 162 |
+
|
| 163 |
+
<table><tr><td colspan="2">Formal Text Classification Dataset Overview</td></tr><tr><td>Source</td><td>Singtao</td></tr><tr><td>Pretraining Corpus Size</td><td>17500</td></tr><tr><td>Training Size</td><td>3000</td></tr><tr><td>Validation Size</td><td>450</td></tr><tr><td>Testing Size</td><td>450</td></tr><tr><td>Categories</td><td>Financial, Educational, Local International, Sports</td></tr><tr><td>Language</td><td>zh-hant-hk</td></tr></table>
|
| 164 |
+
|
| 165 |
+
<table><tr><td colspan="2">Informal Text Classification Dataset Overview</td></tr><tr><td>Source</td><td>LIHKG</td></tr><tr><td>Pretraining Corpus Size</td><td>21000</td></tr><tr><td>Training Size</td><td>4000</td></tr><tr><td>Validation Size</td><td>450</td></tr><tr><td>Testing Size</td><td>450</td></tr><tr><td>Categories</td><td>Sports, Opinions, IT</td></tr><tr><td></td><td>Financial, Leisure, Memes</td></tr><tr><td>Languages</td><td>zh-hant-hk, zh-yue, en-HK</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Table 5: Characteristics of classification dataset (Traditional Chinese) for extrinsic evaluation experiments.
|
| 168 |
+
|
| 169 |
+
# 4 Extrinsic Evaluation
|
| 170 |
+
|
| 171 |
+
An accurate script converter should produce a less erroneous dataset, which should in turn improves the accuracy of the downstream tasks. In this section, we demonstrate the effect of script conversion on topic classification tasks to examine this assumption. We also study the impact of tokenization and pooling on the accuracy of topic classification. We apply the converter to the language modelling corpus (Wikitext), then train a classifier for informal and formal topic classification on that translated data. This allows us to measure the performance of the converter compared to other ones for a specific downstream task.
|
| 172 |
+
|
| 173 |
+
# 4.1 Dataset for Extrinsic Evaluation
|
| 174 |
+
|
| 175 |
+
This section describes the dataset that we used for extrinsic evaluation experiments. It involves a pretraining dataset which consists Chinese Wikipedia and topic classification datasets.
|
| 176 |
+
|
| 177 |
+
# 4.1.1 Pretraining Dataset
|
| 178 |
+
|
| 179 |
+
We use Chinese Wikipedia articles for pretraining the language model. Script conversion is an issue in Chinese Wikipedia, and currently, they use a server-side mechanism to automatically convert the scripts (dictionary-based) based on the location of the user. However, Wikipedia provides an option to view the article without conversion, which
|
| 180 |
+
|
| 181 |
+
we use in the corpus. $^{14}$ We use $zh-CN$ , $zh-HK$ and $zh-yue$ wikis to retrieve articles originally written SC, TC and vernacular Cantonese + TC respectively with the help of wikiextractor. $^{15}$ We pretrain the formal text classification models on articles from $zh-HK$ and converted $zh-CN$ ; and classification models for informal text on articles from $zh-HK$ , $zh-yue$ , and converted $zh-CN$ .
|
| 182 |
+
|
| 183 |
+
# 4.1.2 Classification Datasets:
|
| 184 |
+
|
| 185 |
+
We choose two classification tasks: formal news and informal topic classification (Table 5). For formal news, we scrape recent articles (2017-2019) from Singtao, $^{16}$ for informal topics, we scrape posts (2017-2018) from LIHKG. $^{17}$
|
| 186 |
+
|
| 187 |
+
# 4.2 Performance of various classifiers
|
| 188 |
+
|
| 189 |
+
For classification baselines, we use character-based SVM (Support Vector Machines, Joachims (1998)), CNN (Convolutional Nets, Zhang et al. (2015)) and Chinese BERT (Devlin et al., 2019). We also employ a state-of-the-art text classifier, MultiFiT (Eisenschlos et al., 2019), a lightweight RNN-based language model based classifier, which has shown to achieve a performance competitive with BERT (Devlin et al., 2019) and ULMFiT (Howard and Ruder, 2018). The base architecture of MultiFiT is a 4-layer QRNN (Bradbury et al., 2016) with classifier head. We choose rectified Adam (Liu et al., 2019) with Lookahead (Zhang et al., 2019) as the optimizer. We employ the cosine cyclic learning scheduler (Smith, 2015), where the limits of learning rate cycles are found by increasing the learning rate logarithmically and computing the evaluation loss for each learning rate (Smith, 2018). To compute the batch size, we apply gradient noise scale to each batch size candidate and pick the one which gives the highest gradient noise scale (McCandlish et al., 2018). We apply label smoothing (Szegedy et al., 2015) and use mixed precision training on RTX 2080. We implement our experiments using Pytorch (Paszke et al., 2019) and FastAI (Howard and Gugger, 2020).
|
| 190 |
+
|
| 191 |
+
MultiFiT uses concat pooling after the last layer of QRNN, which means that the last time step is concatenated with an average and maximum
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 3: Proposed architecture for topic classification where we tweak MultiFiT to concatenate concat-pools from all layers. (Alt text: §F.3).
|
| 195 |
+
|
| 196 |
+
<table><tr><td></td><td>Formal</td><td>Informal</td></tr><tr><td>Char-SVM</td><td>73.2</td><td>63.7</td></tr><tr><td>Char-CNN</td><td>78.5</td><td>64.9</td></tr><tr><td>Chinese BERT (base)</td><td>84.5</td><td>66.3</td></tr><tr><td>MultiFiT with no pooling</td><td>87.5</td><td>68.5</td></tr><tr><td>MultiFiT with concat pooling</td><td>88.6</td><td>69.9</td></tr><tr><td>MultiFiT with layer pooling</td><td>89.0</td><td>70.3</td></tr></table>
|
| 197 |
+
|
| 198 |
+
pooled over previous time steps. Studies show that in LM based classifiers, different layers capture different types of knowledge—the last layer would be domain-specific and initial layers would be more generalized (Yosinski et al., 2014; Peters et al., 2019). We speculate that concat pooling only on the last layer limits the information available to the classifier head and we hypothesise that the classifier would perform better if domain-specific as well as generalized knowledge were available to the head. For this reason, we augment the original MultiFIT architecture with layer pooling, which is concat pooling from all the layers, and pass that to the dense layer in the classifier, as shown in Fig 3.
|
| 199 |
+
|
| 200 |
+
We fine-tune the BERT language model and pretrain the MultiFiT language model on Chinese Wikipedia subsets (§4.1.1). All classifiers are then trained on the given training set (character based models) and evaluated on the test set in terms of accuracy as number of items in each class are roughly equal. This experiment (and subsequent experiments in this section) is repeated across ten different seeds (Reimers and Gurevych, 2018) and data splits (Gorman and Bedrick, 2019) and the results are shown in Table 6. Layer pooling shows an absolute improvement of $0.4\%$ improvement over concat pooling on formal and informal topic clas
|
| 201 |
+
|
| 202 |
+
Table 6: Performance of various architectures on topic classification in terms of accuracy. The results are reported as a mean result across 10 different seeds and data splits. **Bold:** best, **underlined:** second best.
|
| 203 |
+
|
| 204 |
+
<table><tr><td>Pretraining data of MultiFiT</td><td>Formal</td><td>Informal</td></tr><tr><td>No Conversions</td><td>89.0</td><td>70.3</td></tr><tr><td>Including conversions with OpenCC</td><td>91.7</td><td>75.6</td></tr><tr><td>Including conversions with STCP</td><td>92.3</td><td>73.4</td></tr><tr><td>Including conversions with 2kenize</td><td>93.2</td><td>77.9</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Table 7: Ablation test of MultiFiT on different script converters. The results are reported as a mean accuracy result across 10 different seeds and data splits. **Bold:** best, **underlined:** second best.
|
| 207 |
+
|
| 208 |
+
<table><tr><td>Corpus Tokenization</td><td>Formal</td><td>Informal</td></tr><tr><td>Char</td><td>93.2</td><td>77.9</td></tr><tr><td>Jieba</td><td>92.4</td><td>78.3</td></tr><tr><td>BPE</td><td>92.7</td><td>81.0</td></tr><tr><td>BPE-Drop</td><td>93.7</td><td>82.7</td></tr><tr><td>Unigram</td><td>94.8</td><td>82.2</td></tr><tr><td>1kenize</td><td>94.8</td><td>83.2</td></tr></table>
|
| 209 |
+
|
| 210 |
+
Table 8: Ablation test of MultiFiT on tokenizers. The results are reported as a mean accuracy result across 10 different seeds and data splits. **Bold:** best, **underlined:** second best.
|
| 211 |
+
|
| 212 |
+
sification, thus confirming our hypothesis.
|
| 213 |
+
|
| 214 |
+
# 4.3 Effect of Conversion on Classification
|
| 215 |
+
|
| 216 |
+
For each converter (OpenCC, STCP, 2kenize), we translate $zh-CN$ wiki dataset and augment it with the TC wiki dataset. Then, we pretrain on this dataset, finetune on the domain data and train MultiFiT with layer pooling on these three datasets. We demonstrate test set accuracies in Table 7. The dataset translated by 2kenize outperforms other converters, giving an absolute improvement of $0.9\%$ on formal and $4.5\%$ over second-best converters on informal topic classification. These results emphasise that better script conversion improves the quality of the pretraining dataset, which boosts the performance of the downstream tasks like topic classification.
|
| 217 |
+
|
| 218 |
+
# 4.4 Effect of Tokenization on Classification
|
| 219 |
+
|
| 220 |
+
Studies show that tokenization affects classification accuracy; open-vocabulary methods generally perform best (Eisenschlos et al., 2019; Hiraoka et al., 2019). For this experiment, we perform further ablations on our previous best classifier setup (MultiFiT with layer pooling on 2kenize) to understand the effect of various subword tokenizers. Pretraining generally takes a long time (1-2 GPU days), hence we pretrain the classifier once for each tokenized corpus and do not perform sub
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
Figure 4: Log-log plots for different tokenizers. This is plotted frequency vs rank for the first 10000 tokens. Negative slopes calculated from least squares are in the legend (lower means less skewed). (Alt text: §F.4).
|
| 224 |
+
|
| 225 |
+
word sampling for this experiment. For closed vocabulary methods, we use character and word segmentations (here with Jieba). Likewise, for open-vocabulary methods, we employ BPE, BPE-Drop and Unigram subword tokenizers.
|
| 226 |
+
|
| 227 |
+
Subword tokenizers mostly rely on frequency and do not take likelihood (something similar to $n$ -gram language model) of tokenized sentence into consideration. Hence, we choose LM-based Viterbi segmentation (henceforth referred as lkenize), and here the LM would be the TC LSTM described in §2.2. We report results in Table 8. We find that for formal classification, lkenize and Uni-gram perform best. lkenize outperforms other subword tokenizers for the noisier informal dataset, giving an absolute improvement of $0.5\%$ over the second best method, which is BPE-Drop.
|
| 228 |
+
|
| 229 |
+
We plot a log frequency of tokens vs log order rank, which is shown in Figure 4. This distribution is based on the LIHKG dataset, which is noisier than other domains. We observe that character and word distributions are steeper than language model based subword tokenizers. This indicates that subword tokenizers produce a less skewed token distribution. Subword tokenizers like BPE and Unigram are deterministic and rely on frequency for segmentation. Since Ikenize is contextual, being LM-based, we find that it produces the least skewed distribution (lowest Zipf's law coefficient (Zipf, 1949)), which also reduces variance, a reason why this simple segmentation method outperforms others for informal text classification.
|
| 230 |
+
|
| 231 |
+
# 5 Takeaways and Open Questions
|
| 232 |
+
|
| 233 |
+
The contributions of our work are:
|
| 234 |
+
|
| 235 |
+
- 2kenize, a subword segmentation model, which jointly segments source sentence and its corresponding approximate target conversions.
|
| 236 |
+
- An unsupervised script converter based on 2kenize which shows a significant improvement over existing script converters and supervised models.
|
| 237 |
+
- 1kenize, a variant of 2kenize which performs tokenization on only Traditional Chinese sentences which improves accuracy on topic classification tasks.
|
| 238 |
+
- Character conversion evaluation datasets: spanning Hong Kong and Taiwanese literature and news genres.
|
| 239 |
+
- Traditional Chinese topic Classification datasets: formal (scraped from Singtao) and informal (scraped from LIHKG) styles spanning genres like news, social media discussions, and memes.
|
| 240 |
+
|
| 241 |
+
The key findings of our work are:
|
| 242 |
+
|
| 243 |
+
- Our script converter shows a strong performance when dealing with code mixing and named entities. Supervised models are prone to anaphora and unseen entities related errors.
|
| 244 |
+
- A simple LM-based Viterbi segmentation model outperforms other subword tokenizers on topic classification tasks and reduces skewness of token distribution on a noisy dataset.
|
| 245 |
+
|
| 246 |
+
We leave some open questions to explore:
|
| 247 |
+
|
| 248 |
+
- How can we exploit subword variations to reduce skewness in the NLU tasks?
|
| 249 |
+
- Would subword-segmentation-transfer be helpful for other NMT-NLU task pairs like we did for 2kenize (script conversion) to 1kenize (classification)?
|
| 250 |
+
|
| 251 |
+
We anticipate that this study would be useful to TC NLP practitioners, as we address several research gaps, namely script conversion and a lack of benchmark datasets.
|
| 252 |
+
|
| 253 |
+
# Acknowledgements
|
| 254 |
+
|
| 255 |
+
The first author would like to thank Dayta AI Limited, S.F. Hui, I-Tsun Cheng, Ishaan Batra, Conrad Ho, Roy Fork, Abhishek Gupta, Ajay Singh, Eugene Ho, Patrick Tu, Alex Chu, and Leland So for making valuable additions to this work. The second author would like to acknowledge funding from the Swedish Research Council for the project under grant agreement 2019-04129, which partly funded this work.
|
| 256 |
+
|
| 257 |
+
# References
|
| 258 |
+
|
| 259 |
+
Emily M. Bender and Batya Friedman. 2018. Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6:587-604.
|
| 260 |
+
James Bradbury, Stephen Merity, Caiming Xiong, and Richard Socher. 2016. Quasi-recurrent neural networks.
|
| 261 |
+
Chenhui Chu, Toshiaki Nakazawa, and Sadao Kurohashi. 2012. Chinese characters mapping table of Japanese, traditional Chinese and simplified Chinese. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC-2012), pages 2149-2152, Istanbul, Turkey. European Languages Resources Association (ELRA).
|
| 262 |
+
Wikipedia Contributors. 2019. Chinese script conversion and word processing in wikipedia. Page Version ID: 56925003.
|
| 263 |
+
Ryan Cotterell, Sabrina J. Mielke, Jason Eisner, and Brian Roark. 2018. Are all languages equally hard to language-model? In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 536-541, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 264 |
+
Piotr Czapla, Jeremy Howard, and Marcin Kardas. 2018. Universal language model fine-tuning with subword tokenization for polish. *ArXiv*, abs/1810.10222.
|
| 265 |
+
Paul Denisowski. 2019. Cc-ceduct. https://cc-ceduct.org/.
|
| 266 |
+
Michael Denkowski and Graham Neubig. 2017. Stronger baselines for trustable results in neural machine translation. In Proceedings of the First Workshop on Neural Machine Translation, pages 18-27, Vancouver. Association for Computational Linguistics.
|
| 267 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 268 |
+
Julian Eisenschlos, Sebastian Ruder, Piotr Czapla, Marcin Kadras, Sylvain Gugger, and Jeremy Howard. 2019. MultiFiT: Efficient multi-lingual language model fine-tuning. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing
|
| 269 |
+
|
| 270 |
+
(EMNLP-IJCNLP), pages 5706-5711, Hong Kong, China. Association for Computational Linguistics.
|
| 271 |
+
Thomas Emerson. 2005. The second international Chinese word segmentation bakeoff. In Proceedings of the Fourth SIGHAN Workshop on Chinese Language Processing.
|
| 272 |
+
A Foon. 1988. Diary of the little man. Book. Pp.5-6.
|
| 273 |
+
Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann N. Dauphin. 2017. Convolutional sequence to sequence learning. In Proceedings of the 34th International Conference on Machine Learning - Volume 70, ICML '17, page 1243-1252. JMLR.org.
|
| 274 |
+
Kyle Gorman and Steven Bedrick. 2019. We need to talk about standard splits. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2786-2791, Florence, Italy. Association for Computational Linguistics.
|
| 275 |
+
Edouard Grave, Armand Joulin, and Nicolas Usunier. 2016. Improving neural language models with a continuous cache. *ArXiv*, abs/1612.04426.
|
| 276 |
+
Jack Halpern and Jouni Kerman. 1999. Pitfalls and complexities of chinese to chinese conversion. In International Unicode Conference (14th) in Boston.
|
| 277 |
+
Tatsuya Hiraoka, Hiroyuki Shindo, and Yuji Matsumoto. 2019. Stochastic tokenization with a language model for neural text classification. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1620-1629, Florence, Italy. Association for Computational Linguistics.
|
| 278 |
+
Elad Hoffer, Itay Hubara, and Daniel Soudry. 2017. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. In NIPS.
|
| 279 |
+
Jeremy Howard and Sylvain Gugger. 2020. Fastai: A layered api for deep learning. Information, 11(2):108.
|
| 280 |
+
Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.
|
| 281 |
+
Jiubadao. 2011. You Are the Apple of My Eye. Chun Tian Chu Ban.
|
| 282 |
+
Thorsten Joachims. 1998. Text categorization with support vector machines: Learning with many relevant features. In ECML.
|
| 283 |
+
Kazuya Kawakami, Chris Dyer, and Phil Blunsom. 2017. Learning to create and reuse words in open-vocabulary neural language modeling. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1492-1502, Vancouver, Canada. Association for Computational Linguistics.
|
| 284 |
+
|
| 285 |
+
Urvashi Khandelwal, He He, Peng Qi, and Dan Jurafsky. 2018. Sharp nearby, fuzzy far away: How neural language models use context. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 284-294, Melbourne, Australia. Association for Computational Linguistics.
|
| 286 |
+
Yoon Kim, Alexander Rush, Lei Yu, Adhiguna Kuncoro, Chris Dyer, and Gabor Melis. 2019. Unsupervised recurrent neural network grammars. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1105-1117, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 287 |
+
Giddens Ko. 2010. Cafe, Waiting, Love. Spring Press.
|
| 288 |
+
Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple subword candidates. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 66-75, Melbourne, Australia. Association for Computational Linguistics.
|
| 289 |
+
Cheung Lau Yee. 1972. Intersection. Benefits Publishing Co., Ltd.
|
| 290 |
+
Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. 2019. On the variance of the adaptive learning rate and beyond. ArXiv, abs/1908.03265.
|
| 291 |
+
Yichang Liu. 1962. *Drunkard*. Benefits Publishing Co., Ltd.
|
| 292 |
+
Sam McCandlish, Jared Kaplan, Dario Amodei, and OpenAI Dota Team. 2018. An empirical model of large-batch training. *ArXiv*, abs/1812.06162.
|
| 293 |
+
Stephen Merity. 2019. Single headed attention rnn: Stop thinking with your head. ArXiv, abs/1911.11423.
|
| 294 |
+
Sabrina J. Mielke. 2019. Can you compare perplexity across different segmentations?
|
| 295 |
+
Sabrina J. Mielke, Ryan Cotterell, Kyle Gorman, Brian Roark, and Jason Eisner. 2019. What kind of language is hard to language-model? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4975-4989, Florence, Italy. Association for Computational Linguistics.
|
| 296 |
+
Sabrina J. Mielke and Jason Eisner. 2019. Spell once, summon anywhere: A two-level open-vocabulary language model. AAAI.
|
| 297 |
+
Tomáš Mikolov, Ilya Sutskever, Anoop Deoras, Hai-son Le, Stefan Kombrink, and Jan Cernocky. 2012. Subword language modeling with neural networks. preprint (http://www.fit.vutbr.cz/imikolov/rnml/char.pdf), 8.
|
| 298 |
+
|
| 299 |
+
Masaaki Nagata. 1994. A stochastic Japanese morphological analyzer using a forward-DP backward-A* n-best search algorithm. In COLING 1994 Volume 1: The 15th International Conference on Computational Linguistics.
|
| 300 |
+
Xue Nianwen, Zhang Xiuhong, Jiang Zixin, Palmer Martha, Xia Fei, Chiou Fu-Dong, and Meiyu Chang. 2016. Chinese treebank 9.0. LDC2016T13. Web Download. Philadelphia: Linguistic Data Consortium.
|
| 301 |
+
Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. *fairseq: A fast, extensible toolkit for sequence modeling.* In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)*, pages 48-53, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 302 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Dipl.-Ing. Kopf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS 2019.
|
| 303 |
+
Matthew E. Peters, Sebastian Ruder, and Noah A. Smith. 2019. To tune or not to tune? adapting pretrained representations to diverse tasks. In Proceedings of the 4th Workshop on Representation Learning for NLP (RepL4NLP-2019), pages 7-14, Florence, Italy. Association for Computational Linguistics.
|
| 304 |
+
Pranav A, S.F. Hui, I-Tsun Cheng, Ishaan Batra, and Chiu Yik Hei. 2019. Learn languages first and then convert: Towards effective simplified to traditional chinese conversion. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Student Research Workshop, non-archival), Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 305 |
+
Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2019. Bpe-dropout: Simple and effective subword regularization. *ArXiv*, abs/1910.13267.
|
| 306 |
+
Nils Reimers and Iryna Gurevych. 2018. Why comparing single performance scores does not allow to draw conclusions about machine learning approaches.
|
| 307 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany. Association for Computational Linguistics.
|
| 308 |
+
|
| 309 |
+
Xiaodong Shi, Yidong Chen, and Xiuping Huang. 2011. Key problems in conversion from simplified to traditional chinesesecharacters. In International Conference on Asian Language Processing.
|
| 310 |
+
Leslie N. Smith. 2015. Cyclical learning rates for training neural networks. 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 464-472.
|
| 311 |
+
Leslie N. Smith. 2018. A disciplined approach to neural network hyper-parameters: Part 1 - learning rate, batch size, momentum, and weight decay. *ArXiv*, abs/1803.09820.
|
| 312 |
+
Don Snow. 2004. Cantonese as written language: The growth of a written Chinese vernacular, volume 1. Hong Kong University Press.
|
| 313 |
+
Richard W. Sproat, Chilin Shih, William Gale, and Nancy Chang. 1996. A stochastic finite-state word-segmentation algorithm for Chinese. Computational Linguistics, 22(3):377-404.
|
| 314 |
+
Martin Sundermeyer, Ralf Schluter, and Hermann Ney. 2012. Lstm neural networks for language modeling. In INTERSPEECH.
|
| 315 |
+
Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Z. Ghahramani, M. Welling, C. Cortes, N. D. Lawrence, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems 27, pages 3104-3112. Curran Associates, Inc.
|
| 316 |
+
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. 2015. Rethinking the inception architecture for computer vision. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2818-2826.
|
| 317 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.
|
| 318 |
+
Pak-kwong Wong and Chorkin Chan. 1996. Chinese word segmentation based on maximum matching and word binding force. In COLING 1996 Volume 1: The 16th International Conference on Computational Linguistics.
|
| 319 |
+
Jiarui Xu, Xuezhe Ma, Chen-Tse Tsai, and Eduard Hovy. 2017. Stcp: Simplified-traditional chinese conversion and proofreading. Proceedings of the IJCNLP 2017, System Demonstrations, pages 61-64.
|
| 320 |
+
Chiung Yao. 1964. Fire and rain. Book. ISBN 0-330-36076-0.
|
| 321 |
+
Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks? In NIPS.
|
| 322 |
+
|
| 323 |
+
Michael Ruogu Zhang, James Lucas, Geoffrey E. Hinton, and Jimmy Ba. 2019. Lookahead optimizer: k steps forward, 1 step back. ArXiv, abs/1907.08610.
|
| 324 |
+
Xiang Zhang, Junbo Jake Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. In NIPS.
|
| 325 |
+
Xiaoheng Zhang. 2014. A comparative study on simplified-traditional chinese translation. In Chinese Computational Linguistics and Natural Language Processing Based on Naturally Annotated Big Data, pages 212-222. Springer.
|
| 326 |
+
George Kingsley Zipf. 1949. Human behavior and the principle of least effort.
|
| 327 |
+
|
| 328 |
+
# A Summary in Traditional Chinese: 簡體中文到繁體中文的文本轉換器
|
| 329 |
+
|
| 330 |
+
研究中文NLP時,將文本進行繁簡轉換是常見的數據預處理步驟。在簡繁轉換過程中,經常出現多個繁字轉換成同一簡體字,反之亦然。藉此透過測試現行的繁簡轉換算法,發現只有 $55 - 85\%$ 準確度。進一步的調查發現,現代的神經網絡,譬如神經語言模型的字符歧義消除(neural language model disambiguation)和神經序列模型(neural sequence models),均只達到 $84 - 85\%$ 的句子準確性,都是由第一類錯誤(Type I error)所致。我們推斷上述問題,是由於模型未能有效釐清子詞(subword)的邊界所導致。
|
| 331 |
+
|
| 332 |
+
在此,我們提出了2kenize,一個子詞分割模型(subword segmentation model),同時利用先行式繁體中文以及簡體中文進行建構。我們將聯合簡體中文及繁體中文共同訓練Viterbi分詞器。即使利用較具挑戰性的數據集測試,本模型亦達到 $91 - 95\%$ 消歧準確度。透過定性誤差分析(qualitative error analysis),展示了本模型更擅長處理code-mixing以及命名個體(named entities)除此以外,我們亦在主題分類領域中进行了外部評估,本模型更在主題分類的字符及詞語模型(character and word-based models)的領域中表現出眾,更在子詞正則化(subword regularization)中,獲得比BPE更好的名次。然後針對繁體中文句子對2kenize進行調整,誕生了1kenize。1kenize分別在正式數據集與其他子詞分詞器(subword tokenizers)名列前茅,在非正式數據集上更表現超群。由此,我們推斷子詞分詞器會嚴重地受token的分佈及偏度而影響
|
| 333 |
+
|
| 334 |
+
是次研究的貢獻:
|
| 335 |
+
|
| 336 |
+
1. 2kenize:簡體中文到繁體中文的文本轉換器
|
| 337 |
+
2. 字符轉換評估數據集:跨越香港和台灣文獻及新聞等多個類型的數據集
|
| 338 |
+
3. 主題分類數據集:繁體中文的正式和非正式文本數據涵蓋新聞,社交媒體討論,改圖,改歌,memes等二次創作文本。
|
| 339 |
+
|
| 340 |
+
# B Data Statement for Intrinsic Evaluation
|
| 341 |
+
|
| 342 |
+
# B.1 Corpus
|
| 343 |
+
|
| 344 |
+
In this subsection, we discuss the annotation procedure and the characteristics of the corpus used for the intrinsic evaluation. We have used Bender
|
| 345 |
+
|
| 346 |
+
and Friedman (2018) data statement design for the description.
|
| 347 |
+
|
| 348 |
+
# B.1.1 Curation Rationale
|
| 349 |
+
|
| 350 |
+
The script conversion task is understudied in NLP and we could not find good quality parallel corpora to evaluate our approaches. The idea is to curate a diverse collection of TC works and convert them to SC, due to its one-to-one correspondence. However, we find out that some of the conversions were wrong because
|
| 351 |
+
|
| 352 |
+
1. sometimes dictionaries resulted in incorrect conversion,
|
| 353 |
+
2. stylistic differences between HK and TW characters and phrasing,
|
| 354 |
+
3. code-mixing of Cantonese and Traditional Chinese,
|
| 355 |
+
4. code-mixing with non-Chinese characters,
|
| 356 |
+
5. some characters in TC-SC conversion have one-to-many mappings as well.
|
| 357 |
+
|
| 358 |
+
Hence, we need quality control with human annotators to validate our conversions.
|
| 359 |
+
|
| 360 |
+
# B.1.2 Annotation Process
|
| 361 |
+
|
| 362 |
+
Demographic: We opted for 4 trained annotators, 2 for annotating HK-style TC and 2 for annotating TW-style TC and thus going for double annotation for the corpus. They ranged in age from 18-20 years, included 2 men and 2 women, gave their ethnicity as Hong Kongers (2) and Taiwanese (2), and their native spoken languages were Cantonese (2) and Taiwanese Mandarin (2).
|
| 363 |
+
|
| 364 |
+
Workload: Annotators approximately validated 100 sentences per hour, comprising of total workload of 60 hours. They were given a month to annotate and were paid 5000 Hong Kong Dollars on completion.
|
| 365 |
+
|
| 366 |
+
Procedure: The annotators were shown TC and converted SC sentences (we used OpenCC to convert) and were asked to validate and correct any conversion mistakes. In case of disagreement, we used majority voting between automatically converted and annotators' corrections.
|
| 367 |
+
|
| 368 |
+
We provide raw agreement and Krippendorf's $\alpha$ in Table 1 for pooled data and various sub-groups of the dataset. We also report inter-annotator agreements on character and phrasal levels in Table 2. These agreement values are difficult to interpret, but generally $\alpha \geq 0.8$ is considered to be substantial.
|
| 369 |
+
|
| 370 |
+
<table><tr><td colspan="2"></td><td>RA</td><td>α</td></tr><tr><td rowspan="3">HK</td><td></td><td>0.98</td><td>0.98</td></tr><tr><td>Lit</td><td>0.982</td><td>0.98</td></tr><tr><td>News</td><td>0.979</td><td>0.97</td></tr><tr><td rowspan="3">TW</td><td></td><td>0.98</td><td>0.98</td></tr><tr><td>Lit</td><td>0.981</td><td>0.98</td></tr><tr><td>News</td><td>0.971</td><td>0.97</td></tr></table>
|
| 371 |
+
|
| 372 |
+
Table 9: Inter-annotator agreements
|
| 373 |
+
|
| 374 |
+
<table><tr><td></td><td>RA</td><td>α</td></tr><tr><td>Character Level</td><td>0.98</td><td>0.97</td></tr><tr><td>Word Level</td><td>0.95</td><td>0.94</td></tr><tr><td>Sentence Level</td><td>0.93</td><td>0.92</td></tr></table>
|
| 375 |
+
|
| 376 |
+
Table 10: Inter-annotator agreements as per different levels
|
| 377 |
+
|
| 378 |
+
# B.1.3 Speech Situation
|
| 379 |
+
|
| 380 |
+
The publication dates and sources are listed in the Table 2. HK and TW literature consists of popular books for which many movie and drama adaptations are made.[18] Specifically, for HK literature, the text contains code-mixed characters with Vernacular Cantonese, which is quite unusual in formal publishing practices, and these books are often cited as an example for popularizing Cantonese in the 60s (Snow, 2004). We also found code-mixing with English and numerous transliterated named entities which we have used for qualitative error analysis in the Table 4.
|
| 381 |
+
|
| 382 |
+
# B.1.4 Text Characteristics
|
| 383 |
+
|
| 384 |
+
Although Hong Kong and Taiwan both use Traditional Chinese, they are stylistically different as the dominant spoken language in HK is Cantonese and in TW is Taiwanese Mandarin. Thus, it is quite essential to test the performance of our algorithms on these two styles. We collected two genres for each style: informal literature and formal news. We found more variation within informal HK-TW literature as compared to the formal news. We intentionally chose long sentences (average length of 200 characters), especially which contain more ambiguous characters to make the dataset more challenging for testing.
|
| 385 |
+
|
| 386 |
+
# C Data Statement for Extrinsic Evaluation
|
| 387 |
+
|
| 388 |
+
This subsection describes the characteristics of the topic classification in Traditional Chinese. For the
|
| 389 |
+
|
| 390 |
+
short overview, please see Table 5.
|
| 391 |
+
|
| 392 |
+
# C.1 Curation Rationale
|
| 393 |
+
|
| 394 |
+
We choose two different styles for curating this dataset: formal and informal. The formal text consists of news dataset scraped from Singtao, one of the popular newswire in Hong Kong. The classes in this dataset consist of Financial, Educational, Local, International, and Sports subsections. There are 17500 unlabelled and 3900 labelled items in this section. Authors would like to credit I-Tsun Cheng for giving us helpful suggestions in curating this dataset.
|
| 395 |
+
|
| 396 |
+
The informal text consists of social media posts dataset scraped from LIHKG, a Twitter equivalent in Hong Kong. The classes in this dataset consist of Sports, Opinions, Memes, IT, Financial and Leisure. There are 21000 unlabelled and 4900 labelled items in this section. Authors would like to credit Leland So for giving us helpful suggestions in curating this dataset.
|
| 397 |
+
|
| 398 |
+
# C.2 Language Variety
|
| 399 |
+
|
| 400 |
+
The texts in the formal subsection are typically written in Hong Kong style Traditional Chinese (zh-hant-hk). The posts scraped from LIHKG are predominantly in Traditional Chinese (zh-hant-hk), and they are often code-mixed with Vernacular Cantonese (zh-yue) and English (en-HK).
|
| 401 |
+
|
| 402 |
+
# C.3 Speaker Demographic
|
| 403 |
+
|
| 404 |
+
Speakers were not directly approached for inclusion in this dataset and thus could not be asked for demographic information. Our best guess for demographic of LIHKG forum users are typically university students (19-23 years), and the majority of them speak Cantonese as a native language.
|
| 405 |
+
|
| 406 |
+
# C.4 Text Characteristics
|
| 407 |
+
|
| 408 |
+
The news articles are scraped from 2017-2019 and LIHKG posts are scraped from 2017-2018. Some of the posts in LIHKG are in the transliterated Cantonese form and some of them are not written in Standard Written Chinese. The news posts are generally quite long and often contains more than 5 sentences (average length of nearly 300 characters). On the other hand, the LIHKG posts are shorter and forums titles are generally one sentence each (average length of nearly 50 characters). Please note that due to the current situations in Hong Kong, we do not include political posts and news from mid-2019.
|
| 409 |
+
|
| 410 |
+
# D Description of Intrinsic Evaluation Experiments
|
| 411 |
+
|
| 412 |
+
# D.1 Heuristic Grid Search of Learning Rate and Batch Size Hyperparameters
|
| 413 |
+
|
| 414 |
+
We employ the cosine cyclic learning scheduler (Smith, 2015), where the limits of learning rate cycles are found by increasing the learning rate logarithmically and computing the evaluation loss for each learning rate (Smith, 2018). To compute the batch size, we apply gradient noise scale to each batch size candidate and pick the one which gives the highest gradient noise scale (McCandlish et al., 2018).
|
| 415 |
+
|
| 416 |
+
# D.2 Training of SC and TC Language Model
|
| 417 |
+
|
| 418 |
+
The datasets are described in §3.2. The model architecture is 2-layer LSTM language model with tied weights. Embedding size is 512 and hidden size is 512. We perform a concat pooling in the last layer where we concatenate the last output of the word, mean pool and max pool of all representations. We adopt comparable subword perplexity as suggested by Cotterell et al. (2018); Mielke et al. (2019); Mielke (2019), where we use a common denominator, referring to the number of segments per word in order to compare. On average, we achieve a perplexity of 168.6 on the Chinese Treebank test set (Nianwen et al., 2016). Also refer to Chinese LM Benchmark: https://chinesenlp.xyz/#/docs/language_modeling. The training took 2 days on RTX 2080 with FP16 training, with a batch size of 256 and number of epochs of 250.
|
| 419 |
+
|
| 420 |
+
# D.3 Training of Convolutional seq2seq
|
| 421 |
+
|
| 422 |
+
Training dataset is a heuristically converted Traditional Chinese Wikipedia with OpenCC. We use 20 layers in encoder and decoder with the embedding size of 512 implemented in Fairseq (Ott et al., 2019). Dropout is 0.1 and we use adaptive softmax to speed up the training. The training took 1 day on RTX 2080 with FP16 training, with a batch size of 128 and number of epochs of 250.
|
| 423 |
+
|
| 424 |
+
# E Description of Extrinsic Evaluation Experiments
|
| 425 |
+
|
| 426 |
+
# E.1 Character CNN training
|
| 427 |
+
|
| 428 |
+
The datasets are described in $\S 4.1.2$ . The model architecture is 7-layer CNN with tied weights and residual blocks. Embedding size is 512 and hidden size is 512. We perform a concat pooling in
|
| 429 |
+
|
| 430 |
+
the last layer where we concatenate the last output of the word, mean pool and max pool of all representations. The training took 16 hours on RTX 2080 with FP16 training, with a batch size of 256 and number of epochs of 350.
|
| 431 |
+
|
| 432 |
+
# E.2 Chinese BERT training
|
| 433 |
+
|
| 434 |
+
The datasets are described in §4.1.2. We use Chinese BERT base (12-layer, 768-hidden, 12-heads, 110M parameters) using Transformers library (Wolf et al., 2019). We use sequence length of 384 and batch size of 12. Finetuning language model took 2 hours (learning rate of 3e-5) and finetuning classifier took 1 hour each on both datasets, including grid search on learning rates: 3e-4, 1e-4, 5e-5, 3e-5, where 3e-5 gives the best results (on RTX 2080 with FP16 training).
|
| 435 |
+
|
| 436 |
+
# E.3 MultiFiT training
|
| 437 |
+
|
| 438 |
+
We found MultiFiT is highly reproducible as compared to other models as it gives the least variance across the seeds and data splits. Hyperparameters are chosen by heuristic grid search on learning rate and batch size. The datasets are described in §4.1.2. Pretraining language model takes 1 GPU day for each experiment of MultiFiT. Finetuning language model takes 3 hours where we used a patience of 2 epochs. Finetuning classifiers takes 3 hours where we used a patience of 2 epochs. All experiments of MultiFiT are implemented using FastAI (Howard and Gugger, 2020).
|
| 439 |
+
|
| 440 |
+
# F Alternative texts for figures and Chinese explanations
|
| 441 |
+
|
| 442 |
+
# F.1 Alternative text for Figure 1
|
| 443 |
+
|
| 444 |
+
The recurring Chinese sentence is split and we take one subsequence of it. The other subsequence is used in next iteration. We perform Unigram viterbi segmentation on this and get the probabilities. The probabilities are normalized and we sample a segmentation using this probability. This segmentation goes into the model which goes through cached embeddings, followed by stacked LSTM layers, followed by concat pooling (which consists of last output, mean pooling and max pooling) which then goes through a linear layer. We cache the top-k embeddings in the main memory and for the least frequent embeddings we track the gradients and do not keep them in the main network (we used gradient accumulation).
|
| 445 |
+
|
| 446 |
+
# F.2 Alternative text for Figure 2
|
| 447 |
+
|
| 448 |
+
From the given SC sentence, we create possible TC sequences using mappings. We input these to Viterbi, which recursively calls LSTM. Using Eq. (6) as the scoring function, Viterbi outputs the mapping sequence. We perform beam search to find the best TC sequence from the mapping sequence where we used the same TC LSTM again.
|
| 449 |
+
|
| 450 |
+
# F.3 Alternative text for Figure 3
|
| 451 |
+
|
| 452 |
+
The architecture contains 4 stacked QRNN layers. Each layer has QRNN cells. After every layer we perform a concat pool (taking the last output, max pool and mean pool). We aggregate these pools in the final layer which goes into a linear layer. We highly recommend this for making the training more stable.
|
| 453 |
+
|
| 454 |
+
# F.4 Alternative text for Figure 4
|
| 455 |
+
|
| 456 |
+
We have plotted log-log token distribution. On x-axis we have order rank and on y-axis we have frequencies. Character based tokenization gives a slope of 1.703, BPE-Drop gives 1.31, BPE gives 1.27, word tokenization (Jieba) gives 1.41, unigram sampling gives 1.28 and 1kenize gives the least skewed distribution with a slope of 1.1. Note that these are negative slope and lower the slope is, more efficiently vocabulary is tokenized.
|
| 457 |
+
|
| 458 |
+
# F.5 Recurring Chinese sentence
|
| 459 |
+
|
| 460 |
+
Here, we explain the recurring sentence in this paper. In Table 1 we had SC sentence维护发展中国家共同利益, which means Safeguarding the common interests of developing countries. This is pronounced as Weihu fāzhàn zhong guójiā gòngtóng liyi in Mandarin. Its correct TC translation is 維護發展中國家共同利益, which is pronounced as wai4 wu6 faat3 zin2zung1 gwok3 gaa1 gung6 tung4 lei6 jik1 in Cantonese (note that the numerals are the tones).
|
2kenizetyingsubwordsequencesforchinesescriptconversion/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51a402fd476dd82a248fefedf00f1f8dd414322ce76e7afe7f3ab0f5dc32bce7
|
| 3 |
+
size 640037
|
2kenizetyingsubwordsequencesforchinesescriptconversion/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5452aade638dbf962283e057f2ca993a663de821ca560f887d04cdd69b42754e
|
| 3 |
+
size 497638
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b1a2be5c3e3d59658c6545a9d2f4103beaf5553028723a9001b1134f25322d7
|
| 3 |
+
size 102311
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc7cdd4ca42cbd3f5951758bb4c5087e318212f09218efe50bfee63a009a33dd
|
| 3 |
+
size 125706
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/76710d6c-bd02-47c7-86e5-0f31e1b093ae_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de87d8a6708e9f6bd172fe9612f7dcce3c02bb1bff6783b54874fc075e564bcd
|
| 3 |
+
size 1110954
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/full.md
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Batch Normalized Inference Network Keeps the KL Vanishing Away
|
| 2 |
+
|
| 3 |
+
Qile Zhu $^{1}$ , Wei Bi $^{2}$ , Xiaojiang Liu $^{2}$ , Xiyao Ma $^{1}$ , Xiaolin Li $^{3}$ and Dapeng Wu $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Florida, <sup>2</sup>Tencent AI Lab, <sup>3</sup>AI Institute, Tongdun Technology
|
| 6 |
+
{valder,maxiy,dpwu}@ufl.edu
|
| 7 |
+
{victoriabi,kieranliu}@tencent.com
|
| 8 |
+
xiaolin.li@tongdun.net
|
| 9 |
+
|
| 10 |
+
# Abstract
|
| 11 |
+
|
| 12 |
+
Variational Autoencoder (VAE) is widely used as a generative model to approximate a model's posterior on latent variables by combining the amortized variational inference and deep neural networks. However, when paired with strong autoregressive decoders, VAE often converges to a degenerated local optimum known as "posterior collapse". Previous approaches consider the Kullback-Leibler divergence (KL) individual for each datapoint. We propose to let the KL follow a distribution across the whole dataset, and analyze that it is sufficient to prevent posterior collapse by keeping the expectation of the KL's distribution positive. Then we propose Batch Normalized-VAE (BN-VAE), a simple but effective approach to set a lower bound of the expectation by regularizing the distribution of the approximate posterior's parameters. Without introducing any new model component or modifying the objective, our approach can avoid the posterior collapse effectively and efficiently. We further show that the proposed BN-VAE can be extended to conditional VAE (CVAE). Empirically, our approach surpasses strong autoregressive baselines on language modeling, text classification and dialogue generation, and rivals more complex approaches while keeping almost the same training time as VAE.
|
| 13 |
+
|
| 14 |
+
# 1 Introduction
|
| 15 |
+
|
| 16 |
+
Variational Autoencoder (VAE) (Kingma and Welling, 2014; Rezende et al., 2014) is one of the most popular generative framework to model complex distributions. Different from the Autoencoder (AE), VAE provides a distribution-based latent representation for the data, which encodes the input $\mathbf{x}$ into a probability distribution $\mathbf{z}$ and reconstructs the original input using samples from $\mathbf{z}$ . When
|
| 17 |
+
|
| 18 |
+
inference, VAE first samples the latent variable from the prior distribution and then feeds it into the decoder to generate an instance. VAE has been successfully applied in many NLP tasks, including topic modeling (Srivastava and Sutton, 2017; Miao et al., 2016; Zhu et al., 2018), language modeling (Bowman et al., 2016), text generation (Zhao et al., 2017b) and text classification (Xu et al., 2017).
|
| 19 |
+
|
| 20 |
+
An autoregressive decoder (e.g., a recurrent neural network) is a common choice to model the text data. However, when paired with strong autoregressive decoders such as LSTMs (Hochreiter and Schmidhuber, 1997) and trained under conventional training strategy, VAE suffers from a well-known problem named the posterior collapse or the KL vanishing problem. The decoder in VAE learns to reconstruct the data independent of the latent variable $\mathbf{z}$ , and the KL vanishes to 0.
|
| 21 |
+
|
| 22 |
+
Many convincing solutions have been proposed to prevent posterior collapse. Among them, fixing the KL as a positive constant is an important direction (Davidson et al., 2018; Guu et al., 2018; van den Oord et al., 2017; Xu and Durrett, 2018; Tomczak and Welling, 2018; Kingma et al., 2016; Razavi et al., 2019). Some change the Gaussian prior with other distributions, e.g., a uniform prior (van den Oord et al., 2017; Zhao et al., 2018) or a von Mises-Fisher (vMf) distribution (Davidson et al., 2018; Guu et al., 2018; Xu and Durrett, 2018). However, these approaches force the same constant KL and lose the flexibility to allow various KLs for different data points (Razavi et al., 2019). Without changing the Gaussian prior, free-bits (Kingma et al., 2016) adds a threshold (free-bits) of the KL term in the ELBO object and stops the optimization of the KL part when its value is smaller than the threshold. Chen et al. (2017) point out that the objective of free-bits is non-smooth and suffers from the optimization challenges. $\delta$ -VAE (Razavi et al., 2019) sets the parameters in a specific range
|
| 23 |
+
|
| 24 |
+
to achieve a positive KL value for every latent dimension, which may limit the model performance.
|
| 25 |
+
|
| 26 |
+
Other work analyzes this problem from a view of optimization (Bowman et al., 2016; Zhao et al., 2017a; Chen et al., 2017; Alemi et al., 2018). Recently, He et al. (2019) observe that the inference network is lagging far behind the decoder during training. They propose to add additional training loops for the inference network only. Li et al. (2019) further propose to initialize the inference network with an encoder pretrained from an AE objective, then trains the VAE with the free-bits. However, these two methods are much slower than the original VAE.
|
| 27 |
+
|
| 28 |
+
The limitation of the constant KL and the high cost of additional training motivate us to seek an approach that allows flexible modeling for different data points while keeping as fast as the original VAE. In this paper, instead of considering the KL individually for each data point, we let it follow a distribution across the whole dataset. We demonstrate that keeping a positive expectation of the KL's distribution is sufficient to prevent posterior collapse in practice. By regularizing the distribution of the approximate posterior's parameters, a positive lower bound of this expectation could be ensured. Then we propose Batch Normalized-VAE (BN-VAE), a simple yet effective approach to achieving this goal, and discuss the connections between BN-VAE and previous enhanced VAE variants. We further extend BN-VAE to the conditional VAE (CVAE). Last, experimental results demonstrate the effectiveness of our approach on real applications, including language modeling, text classification and dialogue generation. Empirically, our approach surpasses strong autoregressive baselines and is competitive with more sophisticated approaches while keeping extremely higher efficiency. Code and data are available at https://github.com/valdersoul/bn-vae.
|
| 29 |
+
|
| 30 |
+
# 2 Background and Related Work
|
| 31 |
+
|
| 32 |
+
In this section, we first introduce the basic background of VAE, then we discuss the lagging problem (He et al., 2019). At last, we present more related work.
|
| 33 |
+
|
| 34 |
+
# 2.1 VAE Background
|
| 35 |
+
|
| 36 |
+
VAE (Kingma and Welling, 2014; Rezende et al., 2014) aims to learn a generative model $p(\mathbf{x}, \mathbf{z})$ to maximize the marginal likelihood $\log p(\mathbf{x})$ on a
|
| 37 |
+
|
| 38 |
+
dataset. The marginal likelihood cannot be calculated directly due to an intractable integral over the latent variable $\mathbf{z}$ . To solve this, VAE introduces a variational distribution $q_{\phi}(\mathbf{z}|\mathbf{x})$ which is parameterized by a complex neural network to approximate the true posterior. Then it turns out to optimize the ELBO of $\log p(\mathbf{x})$ :
|
| 39 |
+
|
| 40 |
+
$$
|
| 41 |
+
\mathcal {L} = \mathrm {E} _ {q _ {\phi} (\mathbf {z} | \mathbf {x})} [ \log p _ {\theta} (\mathbf {x} | \mathbf {z}) ] - K L (q _ {\phi} (\mathbf {z} | \mathbf {x}) | | p (\mathbf {z})) \tag {1}
|
| 42 |
+
$$
|
| 43 |
+
|
| 44 |
+
where $\phi$ represents the inference network and $\theta$ denotes the decoder. The above first term is the reconstruction loss, while the second one is the KL between the approximate posterior and the prior. The Gaussian distribution $\mathcal{N} \sim (0, I)$ is a usual choice for the prior, and the KL between the approximate posterior $q_{\phi}(\mathbf{z}|\mathbf{x})$ and the prior $p(\mathbf{z})$ can be computed as:
|
| 45 |
+
|
| 46 |
+
$$
|
| 47 |
+
K L = \frac {1}{2} \sum_ {i = 1} ^ {n} \left(\mu_ {i} ^ {2} + \sigma_ {i} ^ {2} - \log \sigma_ {i} ^ {2} - 1\right), \tag {2}
|
| 48 |
+
$$
|
| 49 |
+
|
| 50 |
+
where $\mu_{i}$ and $\sigma_{i}$ is the mean and standard deviation of approximate posterior for the $i_{th}$ latent dimension, respectively. When the decoder is autoregressive, it can recover the data independent of the latent $\mathbf{z}$ (Bowman et al., 2016). The optimization will encourage the approximate posterior to approach the prior which results in the zero value of the KL.
|
| 51 |
+
|
| 52 |
+
# 2.2 The Lagging Problem
|
| 53 |
+
|
| 54 |
+
Recently, He et al. (2019) analyze posterior collapse with the Gaussian prior from a view of training dynamics. The collapse is a local optimum of VAE when $q_{\phi}(\mathbf{z}|\mathbf{x}) = p_{\theta}(\mathbf{z}|\mathbf{x}) = p(\mathbf{z})$ for all inputs. They further define two partial collapse states: model collapse, when $p_{\theta}(\mathbf{z}|\mathbf{x}) = p(\mathbf{z})$ , and inference collapse, when $q_{\phi}(\mathbf{z}|\mathbf{x}) = p(\mathbf{z})$ . They observe that the inference collapse always happens far before the model collapse due to the existence of autoregressive decoders. Different from the model posterior, the inference network lacks guidance and easily collapses to the prior at the initial stage of training, and thus posterior collapse happens. Based on this understanding, they propose to aggressively optimize the inference network. However, this approach cost too much time compared with the original VAE. In our work, we also employ the Gaussian prior and thus suffer from the same lagging problem. Yet, our proposed approach does
|
| 55 |
+
|
| 56 |
+
not involve additional training efforts, which can effectively avoid the lagging problem (Section 3.3) and keep almost the same training efficiency as the original VAE (Section 5.1). More details can be found in Section 3.3.
|
| 57 |
+
|
| 58 |
+
# 2.3 Related Work
|
| 59 |
+
|
| 60 |
+
To prevent posterior collapse, we have mentioned many work about changing the prior in the introduction. Besides these approaches, some work modifies the original training objective directly. For example, Bowman et al. (2016) introduce an annealing strategy, where they slightly increase the weight of KL from 0 to 1 during the warm-up period. $\beta$ -VAE (Higgins et al., 2017) treats the KL weight as a hyperparameter to constrain the minimum value of the KL. Alemi et al. (2017), on the other hand, set a fixed KL weight to control the mutual information between $\mathbf{z}$ and $\mathbf{x}$ . Tolstikhin et al. (2018) leverage the Wasserstein distance to replace the KL. Zhao et al. (2017a) replace the KL with maximum mean discrepancy. Fang et al. (2019) introduce sample-based representations which lead to implicit latent features with an auxiliary network.
|
| 61 |
+
|
| 62 |
+
Some change the training strategy. Kim et al. (2018) address the amortization gap (Cremer et al., 2018) in VAE and propose Semi-Amortized VAE to compose the inference network with additional mean-field updates. Fu et al. (2019) propose a cyclical annealing schedule, which repeats the process of increasing $\beta$ multiple times.
|
| 63 |
+
|
| 64 |
+
There are various other approaches to solve the posterior collapse. For example, some researchers choose to weaken the decoder by replacing the LSTM decoder with convolution neural networks without autoregressive modeling (Semeniuta et al., 2017; Yang et al., 2017). Chen et al. (2017) input a lossy representation of data to the autoregressive decoder and enforce $\mathbf{z}$ to capture the information about the original input. Inheriting this idea, some following work add direct connections between $\mathbf{z}$ and $\mathbf{x}$ (Zhao et al., 2017b; Dieng et al., 2019). Ma et al. (2019) introduce an additional regularization to learn diverse latent representation. $\delta$ -VAE (Razavi et al., 2019) and free-bits (Kingma et al., 2016) set a minimum number of KL for each latent dimension to prevent the posterior collapse.
|
| 65 |
+
|
| 66 |
+
Srivastava and Sutton (2017, 2018) find that using ADAM (Kingma and Ba, 2014) with a high learning rate to train VAE may cause the gradients to diverge early. Their explanation for the diverg
|
| 67 |
+
|
| 68 |
+
ing behavior lies in the exponential curvature of the gradient from the inference network which produces the variance part of the approximate posterior. Then they apply batch normalization to the variance part to solve this problem. We use the simple SGD without momentum to train our model. Moreover, we apply batch normalization to the mean part of the inference network to keep the expectation of the KL's distribution positive, which is different from their work. We also find that Sønderby et al. (2016) utilize batch normalization in all fully connected layers with nonlinear activation functions to improve the model performance. Different from it, our approach directly applies batch normalization to the parameters of the approximate posterior, which is the output of the inference network.
|
| 69 |
+
|
| 70 |
+
# 3 Batch-Normalized VAE
|
| 71 |
+
|
| 72 |
+
In this section, we first derive the expectation of the KL's distribution and show that it is enough to avoid posterior collapse by keeping the expectation of the KL's distribution positive. Then we propose our regularization method on the parameters of the approximate posterior to ensure a positive lower bound of this expectation. We further discuss the difference between our approach and previous work.
|
| 73 |
+
|
| 74 |
+
# 3.1 Expectation of the KL's Distribution
|
| 75 |
+
|
| 76 |
+
Given an $\pmb{x} \in \mathcal{X}$ , the inference network parametrizes a $n$ -dimensional Gaussian distribution with its mean $\mu = f_{\mu}(\pmb{x})$ and diagonal covariance $\pmb{\Sigma} = \text{diag}(f_{\Sigma}(\pmb{x}))$ , where $f_{\mu}$ and $f_{\Sigma}$ are two neural networks. In practice, the ELBO is computed through a Monte Carlo estimation from $b$ samples. The KL in Eq. 2 is then computed over $b$ samples from $\mathcal{X}$ :
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\begin{array}{l} K L = \frac {1}{2 b} \sum_ {j = 1} ^ {b} \sum_ {i = 1} ^ {n} \left(\mu_ {i j} ^ {2} + \sigma_ {i j} ^ {2} - \log \sigma_ {i j} ^ {2} - 1\right) \\ = \frac {1}{2} \sum_ {i = 1} ^ {n} \left(\frac {\sum_ {j = 1} ^ {b} \mu_ {i j} ^ {2}}{b} + \frac {\sum_ {j = 1} ^ {b} \sigma_ {i j} ^ {2}}{b} \right. \\ - \frac {\sum_ {j = 1} ^ {b} \log \sigma_ {i j} ^ {2}}{b} - 1). \tag {3} \\ \end{array}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
When $b$ gets larger, the above empirical value will approach the mean of the KL across the whole dataset.
|
| 83 |
+
|
| 84 |
+
To make use of this observation, we assume that $\mu_{i}$ and $\log \sigma_i^2$ for each latent dimension $i$ follow
|
| 85 |
+
|
| 86 |
+
a certain distribution with a fixed mean and variance across the dataset respectively. The distribution may vary between different latent dimensions. In this way, the KL turns to a distribution of $\mu_{i}$ 's and $\log \sigma_{i}^{2}$ 's. From Eq. 3, we can see that $\sum_{j=1}^{b} \mu_{ij}^{2} / b$ is the sample mean of $\mu_{i}^{2}$ , which converges to $\mathrm{E}[\mu_{i}^{2}] = \mathrm{Var}[\mu_{i}] + \mathrm{E}^{2}[\mu_{i}]$ . Similarly, $\sum_{j=1}^{b} \sigma_{ij}^{2} / b$ converges to $\mathrm{E}[\sigma_{i}^{2}]$ , and $\sum_{j=1}^{b} \log \sigma_{ij}^{2} / b$ to $\mathrm{E}[\log \sigma_{i}^{2}]$ . Thus, we can derive the expectation of the KL's distribution as:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\begin{array}{l} \operatorname {E} [ K L ] = \frac {1}{2} \sum_ {i = 1} ^ {n} (\operatorname {V a r} [ \mu_ {i} ] + \operatorname {E} ^ {2} [ \mu_ {i} ] \\ + \mathrm {E} \left[ \sigma_ {i} ^ {2} \right] - \mathrm {E} \left[ \log \sigma_ {i} ^ {2} \right] - 1) \\ \geq \frac {1}{2} \sum_ {i = 1} ^ {n} \left(\operatorname {V a r} \left[ \mu_ {i} \right] + \mathrm {E} ^ {2} \left[ \mu_ {i} \right]\right), \tag {4} \\ \end{array}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $\operatorname{E}[\sigma_i^2 - \log \sigma_i^2] \geq 1$ since the minimum of $e^x - x$ is 1. If we can guarantee a positive lower bound of $\operatorname{E}[KL]$ , we can then effectively prevent the posterior collapse.
|
| 93 |
+
|
| 94 |
+
Based on Eq. 4, the lower bound is only dependent on the number of latent dimensions $n$ and $\mu_{i}$ 's mean and variance. This motivates our idea that with proper regularization on the distributions of $\mu_{i}$ 's to ensure a positive lower bound of $\operatorname{E}[KL]$ .
|
| 95 |
+
|
| 96 |
+
# 3.2 Normalizing Parameters of the Posterior
|
| 97 |
+
|
| 98 |
+
The remaining key problem is to construct proper distributions of $\mu_{i}$ 's that can result in a positive lower bound of $\operatorname{E}[KL]$ in Eq. 4. Here, we propose a simple and efficient approach to accomplish this by applying a fixed batch normalization on the output of the inference network $(\mu_{i})$ . Batch Normalization (BN) (Ioffe and Szegedy, 2015) is a widely used regularization technique in deep learning. It normalizes the output of neurons and makes the optimization landscape significantly smoother (Santurkar et al., 2018). Different from other tasks that apply BN in the hidden layers and seek fast and stable training, here we leverage BN as a tool to transform $\mu_{i}$ into a distribution with a fixed mean and variance. Mathematically, the regularized $\mu_{i}$ is written by:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\hat {\mu} _ {i} = \gamma \frac {\mu_ {i} - \mu_ {\mathcal {B} i}}{\sigma_ {\mathcal {B} i}} + \beta , \tag {5}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $\mu_{i}$ and $\hat{\mu_i}$ are means of the approximate posterior before and after BN. $\mu_{\mathcal{B}i}$ and $\sigma_{\mathcal{B}i}$ denote the mean and standard deviations of $\mu_{i}$ . They are biased estimated within a batch of samples for each
|
| 105 |
+
|
| 106 |
+
dimension indecently. $\gamma$ and $\beta$ are the scale and shift parameter. Instead of using a learnable $\gamma$ in Eq. 5, we use a fixed BN which freezes the scale $\gamma$ . In this way, the distribution of $\mu_{i}$ has the mean of $\beta$ and the variance of $\gamma^2$ . $\beta$ is a learnable parameter that makes the distribution more flexible.
|
| 107 |
+
|
| 108 |
+
Now, we derive the lower bound of $\operatorname{E}[KL]$ by using the fixed BN. With the fixed mean $\beta$ and variance $\gamma^2$ for $\mu_i$ in hand, we get a new lower bound as below:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\begin{array}{l} \operatorname {E} [ K L ] \geq \frac {1}{2} \sum_ {i} ^ {n} (\operatorname {V a r} [ \mu_ {i} ] + \operatorname {E} ^ {2} [ \mu_ {i} ]) \\ = \frac {n \cdot \left(\gamma^ {2} + \beta^ {2}\right)}{2}. \tag {6} \\ \end{array}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
To this end, we can easily control the lower bound of $\operatorname{E}[KL]$ by setting $\gamma$ . Algorithm 1 shows the training process.
|
| 115 |
+
|
| 116 |
+
Algorithm 1 BN-VAE training.
|
| 117 |
+
|
| 118 |
+
1: Initialize $\phi$ and $\theta$ .
|
| 119 |
+
2: for $i = 1,2,\dots$ Until Convergence do
|
| 120 |
+
3: Sample a mini-batch $\mathbf{x}$
|
| 121 |
+
4: $\mu, \log \sigma^2 = f_{\phi}(\mathbf{x})$
|
| 122 |
+
5: $\mu^{\prime} = BN_{\gamma ,\beta}(\mu)$
|
| 123 |
+
6: Sample $\mathbf{z} \sim \mathcal{N}(\mu', \sigma^2)$ and reconstruct $\mathbf{x}$ from $f_{\theta}(\mathbf{z})$ .
|
| 124 |
+
7: Compute gradients $\mathbf{g}_{\phi ,\theta}\gets \nabla_{\phi ,\theta}\mathcal{L}(\mathbf{x};\phi ,\theta)$
|
| 125 |
+
8: Update $\phi, \theta$ using $\mathbf{g}_{\phi, \theta}$ .
|
| 126 |
+
9: end for
|
| 127 |
+
|
| 128 |
+
# 3.3 Connections with Previous Approaches
|
| 129 |
+
|
| 130 |
+
Constructing a positive KL: Both free-bits (Kingma et al., 2016) and $\delta$ -VAE (Razavi et al., 2019) set a threshold on the KL value. Free-bits changes the KL term in the ELBO to a hinge loss term: $\sum_{i}^{n}\max (\lambda ,KL(q_{\phi}(z_{i}|x)||p(z_{i}))$ . Another version of free-bits is to apply the threshold to the entire sum directly instead of the individual value. Training with the free-bits objective, the model will stop to drive down the KL value when it is already below $\lambda$ . However, Chen et al. (2017) point out that the objective of free-bits is non-smooth and suffers from the optimization challenges. Our approach does not face the optimization problem since we use the original ELBO objective.
|
| 131 |
+
|
| 132 |
+
$\delta$ -VAE sets a target rate of $\delta$ for each latent dimension by constraining the mean and variance of
|
| 133 |
+
|
| 134 |
+
the approximate posterior:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\begin{array}{l} \sigma_ {q} = \sigma_ {q} ^ {l} + \left(\sigma_ {q} ^ {u} - \sigma_ {q} ^ {l}\right) \frac {1}{1 + e ^ {- q _ {\phi} (x)}}, (7) \\ \mu = 2 \delta + 1 + \ln \left(\sigma_ {q} ^ {2}\right) - \sigma_ {q} ^ {2} + \max (0, \mu_ {\phi} (\mathbf {x})), (8) \\ \end{array}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
where $[\sigma^l, \sigma^u]$ are the feasible interval for $\sigma_q$ by solving $\ln (\sigma_q^2) - \sigma_q^2 + 2\delta + 1 \geq 0$ . Although $\delta$ -VAE can ensure a minimum value for the KL, it limits the model performance due to that the parameters are constrained in the interval. Our approach only constrains the distributions of $\mu$ , which is more flexible than $\delta$ -VAE. Experiments further show that our approach surpass both free-bits and $\delta$ -VAE.
|
| 141 |
+
|
| 142 |
+
Reducing inference lag: As we focus on the setting of the conventional Gaussian prior, the lagging problem mentioned in Section 2.2 is crucial. To this point, it is beneficial to analyze an alternate form of the ELBO:
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
\mathcal {L} = \log p _ {\theta} (\mathbf {x}) - K L \left(q _ {\phi} (\mathbf {z} | \mathbf {x}) \mid \mid p _ {\theta} (\mathbf {z} | \mathbf {x})\right). \tag {9}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
With this view, the only goal of the approximate posterior $q_{\phi}(\mathbf{z}|\mathbf{x})$ is to match the model posterior $p_{\theta}(\mathbf{z}|\mathbf{x})$ . We examine the performance of our approach to reduce inference lag using the same synthetic experiment in He et al. (2019). Details can be found in Section 1 of the Appendix. The synthetic experiment indicates that our approach with the regularization is beneficial to rebalance the optimization between inference and generation, and finally overcomes posterior collapse. We also prefer a large $\gamma$ due to that a small $\gamma$ will push the approximate posterior to the prior. More details on the synthetic experiment can be found in the Appendix.
|
| 149 |
+
|
| 150 |
+
# 4 Extension to CVAE
|
| 151 |
+
|
| 152 |
+
Given an observation $\mathbf{x}$ and its output $\mathbf{y}$ , CVAE (Sohn et al., 2015; Zhao et al., 2017b) models the conditional distribution $p(\mathbf{y}|\mathbf{x})$ . The variational lower bound of the conditional log-likelihood is:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\begin{array}{l} \mathcal {L} = \operatorname {E} _ {q _ {\phi} (\mathbf {z} | \mathbf {x}, \mathbf {y})} [ \log p _ {\kappa} (\mathbf {y} | \mathbf {x}, \mathbf {z}) ] \\ - K L \left(q _ {\phi} (\mathbf {z} | \mathbf {x}, \mathbf {y}) \mid \mid p _ {\theta} (\mathbf {z} | \mathbf {x})\right) \\ \leq \log p (\mathbf {y} | \mathbf {x}). \tag {10} \\ \end{array}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
Different from VAE, the prior $p_{\theta}(\mathbf{z}|\mathbf{x})$ in CVAE is not fixed, which is also parametrized by a neural network. It is possible to apply another BN on the
|
| 159 |
+
|
| 160 |
+
mean of the prior with a different $\gamma$ so that the expectation of the KL becomes a constant. However, this lower bound is uncontrollable due to the density of $\mu_{1} + \mu_{2}$ is the convolution of their densities, which is intractable.
|
| 161 |
+
|
| 162 |
+
To overcome this issue, we propose to constrain the prior with a fixed distribution. We achieve it by adding another KL between the prior and a known Gaussian distribution $r(\mathbf{z})$ , i.e. $KL(p_{\theta}(\mathbf{z}|\mathbf{x})||r(\mathbf{z}))$ . Instead of optimizing the ELBO in Eq. 10, we optimize a lower bound of the ELBO for CVAE:
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
\mathcal {L} ^ {\prime} = \mathcal {L} - K L (p _ {\theta} (\mathbf {z} | \mathbf {x}) | | r (\mathbf {z})) \leq \mathcal {L}. \tag {11}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
The KL term in the new bound is the sum of $KL(q_{\phi}(\mathbf{z}|\mathbf{x},\mathbf{y})||p_{\theta}(\mathbf{z}|\mathbf{x}))$ and $KL(p_{\theta}(z|x)||r(z))$ which can be computed as:
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\begin{array}{l} K L = \frac {1}{2} \sum_ {i = 1} ^ {n} \left(\frac {\sigma_ {q i} ^ {2} + (\mu_ {q i} - \mu_ {p i}) ^ {2}}{\sigma_ {p i} ^ {2}}\right) \\ + \sigma_ {p i} ^ {2} + \mu_ {p i} ^ {2} - \log \sigma_ {q i} ^ {2} - 1), \tag {12} \\ \end{array}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
where $\sigma_q$ , $\mu_q$ and $\sigma_p$ , $\mu_p$ are the parameters of $q_{\phi}$ and $p_{\theta}$ respectively. $n$ denotes the hidden size. The KL term vanishes to 0 when and only when $q_{\phi}$ and $p_{\theta}$ collapse to $r(\mathbf{z})$ , which is the normal distribution. As we explained in Section 3.2, KL won't be 0 when we apply BN in $q_{\phi}$ . We then prove that when $q_{\phi}$ collapses to $p_{\theta}$ , the KL term is not the minima (details in Section 2 of the Appendix) so that $KL(q_{\phi}(\mathbf{z}|\mathbf{x},\mathbf{y})||p_{\theta}(\mathbf{z}|\mathbf{x}))$ won't be 0. In this way, we can avoid the posterior collapse in CVAE. Algorithm 2 shows the training details.
|
| 175 |
+
|
| 176 |
+
# Algorithm 2 BN-CVAE training.
|
| 177 |
+
|
| 178 |
+
1: Initialize $\phi, \theta$ and $\kappa$ .
|
| 179 |
+
2: for $i = 1,2,\dots$ Until Convergence do
|
| 180 |
+
3: Sample a mini-batch $\mathbf{x},\mathbf{y}$
|
| 181 |
+
4: $\mu_q, \log \sigma_q^2 = f_\phi(\mathbf{x}, \mathbf{y})$ and $\mu_p, \log \sigma_p^2 = f_\theta(\mathbf{x})$ .
|
| 182 |
+
5: $\mu_q' = BN_{\gamma, \beta}(\mu_q)$ .
|
| 183 |
+
6: Sample $\mathbf{z} \sim \mathcal{N}(\mu_q', \sigma_q^2)$ and reconstruct $\mathbf{y}$ from $f_{\kappa}(\mathbf{z}, \mathbf{x})$ .
|
| 184 |
+
7: Compute gradients $\mathbf{g}_{\phi ,\theta ,\kappa}\gets \nabla_{\phi ,\theta ,\kappa}\mathcal{L}^{\prime}$
|
| 185 |
+
8: Update $\phi, \theta, \kappa$ using $\mathbf{g}_{\phi, \theta, \kappa}$ .
|
| 186 |
+
9: end for
|
| 187 |
+
|
| 188 |
+
<table><tr><td rowspan="2">Model</td><td colspan="4">Yahoo</td><td colspan="4">Yelp</td></tr><tr><td>NLL</td><td>KL</td><td>MI</td><td>AU</td><td>NLL</td><td>KL</td><td>MI</td><td>AU</td></tr><tr><td colspan="9">Without a pretrained AE encoder</td></tr><tr><td>CNN-VAE</td><td>≤332.1</td><td>10.0</td><td>-</td><td>-</td><td>≤359.1</td><td>7.6</td><td>-</td><td>-</td></tr><tr><td>LSTM-LM</td><td>328</td><td>-</td><td>-</td><td>-</td><td>351.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VAE</td><td>328.6</td><td>0.0</td><td>0.0</td><td>0.0</td><td>357.9</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>β-VAE (0.4)</td><td>328.7</td><td>6.3</td><td>2.8</td><td>8.0</td><td>358.2</td><td>4.2</td><td>2.0</td><td>4.2</td></tr><tr><td>cyclic*</td><td>330.6</td><td>2.1</td><td>2.0</td><td>2.3</td><td>359.5</td><td>2.0</td><td>1.9</td><td>4.1</td></tr><tr><td>Skip-VAE*</td><td>328.5</td><td>2.3</td><td>1.3</td><td>8.1</td><td>357.6</td><td>1.9</td><td>1.0</td><td>7.4</td></tr><tr><td>SA-VAE</td><td>327.2</td><td>5.2</td><td>2.7</td><td>9.8</td><td>355.9</td><td>2.8</td><td>1.7</td><td>8.4</td></tr><tr><td>Agg-VAE</td><td>326.7</td><td>5.7</td><td>2.9</td><td>15.0</td><td>355.9</td><td>3.8</td><td>2.4</td><td>11.3</td></tr><tr><td>FB (4)</td><td>331.0</td><td>4.1</td><td>3.8</td><td>3.0</td><td>359.2</td><td>4.0</td><td>1.9</td><td>32.0</td></tr><tr><td>FB (5)</td><td>330.6</td><td>5.7</td><td>2.0</td><td>3.0</td><td>359.8</td><td>4.9</td><td>1.3</td><td>32.0</td></tr><tr><td>δ-VAE (0.1)*</td><td>330.7</td><td>3.2</td><td>0.0</td><td>0.0</td><td>359.8</td><td>3.2</td><td>0.0</td><td>0.0</td></tr><tr><td>vMF-VAE (13)*</td><td>327.4</td><td>2.0</td><td>-</td><td>32.0</td><td>357.5</td><td>2.0</td><td>-</td><td>32.0</td></tr><tr><td>BN-VAE (0.6)*</td><td>326.7</td><td>6.2</td><td>5.6</td><td>32.0</td><td>356.5</td><td>6.5</td><td>5.4</td><td>32.0</td></tr><tr><td>BN-VAE (0.7)*</td><td>327.4</td><td>8.8</td><td>7.4</td><td>32.0</td><td>355.9</td><td>9.1</td><td>7.4</td><td>32.0</td></tr><tr><td colspan="9">With a pretrained AE encoder</td></tr><tr><td>cyclic*</td><td>333.1</td><td>25.8</td><td>9.1</td><td>32.0</td><td>361.5</td><td>20.5</td><td>9.3</td><td>32.0</td></tr><tr><td>FB (4)*</td><td>326.2</td><td>8.1</td><td>6.8</td><td>32.0</td><td>356.0</td><td>7.6</td><td>6.6</td><td>32.0</td></tr><tr><td>δ-VAE (0.15)*</td><td>331.0</td><td>5.6</td><td>1.1</td><td>11.2</td><td>359.4</td><td>5.2</td><td>0.5</td><td>5.9</td></tr><tr><td>vMF-VAE (13)*</td><td>328.4</td><td>2.0</td><td>-</td><td>32.0</td><td>357.0</td><td>2.0</td><td>-</td><td>32.0</td></tr><tr><td>BN-VAE (0.6)*</td><td>326.7</td><td>6.4</td><td>5.8</td><td>32.0</td><td>355.5</td><td>6.6</td><td>5.9</td><td>32.0</td></tr><tr><td>BN-VAE (0.7)*</td><td>326.5</td><td>9.1</td><td>7.6</td><td>32.0</td><td>355.7</td><td>9.1</td><td>7.5</td><td>32.0</td></tr></table>
|
| 189 |
+
|
| 190 |
+
Table 1: Results on Yahoo and Yelp datasets. We report mean values across 5 different random runs. * indicates the results are from our experiments, while others are from He et al. (2019); Li et al. (2019). We only show the best performance of every model for each dataset. More results on various parameters can be found in the Appendix.
|
| 191 |
+
|
| 192 |
+
# 5 Experiments
|
| 193 |
+
|
| 194 |
+
# 5.1 VAE for Language Modeling
|
| 195 |
+
|
| 196 |
+
Setup: We test our approach on two benchmark datasets: Yelp and Yahoo corpora (Yang et al., 2017). We use a Gaussian prior $\mathcal{N}(0, I)$ , and the approximate posterior is a diagonal Gaussian. Following previous work (Burda et al., 2016; He et al., 2019), we report the estimated negative log likelihood (NLL) from 500 importance weighted samples, which can provide a tighter lower bound compared to the ELBO and shares the same information with the perplexity (PPL). Besides the NLL, we also report the KL, the mutual information (MI) $I_q$ (Alemi et al., 2017) and the number of activate units (AU) (Burda et al., 2016) in the latent space. The $I_q$ can be calculated as:
|
| 197 |
+
|
| 198 |
+
$$
|
| 199 |
+
\begin{array}{l} I _ {q} = \mathrm {E} _ {p _ {d} (\mathbf {x})} [ K L (q _ {\phi} (\mathbf {z} | \mathbf {x}) | | p (\mathbf {z})) ] - \\ K L \left(q _ {\phi} (\mathbf {z}) | | p (\mathbf {z})\right), \tag {13} \\ \end{array}
|
| 200 |
+
$$
|
| 201 |
+
|
| 202 |
+
where $p_d(\mathbf{x})$ is the empirical distribution. The aggregated posterior $q_{\phi}(\mathbf{z}) = \mathrm{E}_{p_d(\mathbf{x})}[q_{\phi}(\mathbf{z}|\mathbf{x})]$ and $KL(q_{\phi}(\mathbf{z})||p(\mathbf{z}))$ can be approximated with Monte Carlo estimations. The AU is measured as $A_z = Cov(\mathrm{E}_{\mathbf{z}\sim q(\mathbf{z}|\mathbf{x})}[\mathbf{z}])$ . We set the threshold of 0.01, which means if $A_{zi} > 0.01$ , the unit $i$ is active.
|
| 203 |
+
|
| 204 |
+
Configurations: We use a 512-dimension word embedding layer for both datasets. For the encoder and the decoder, a single layer LSTM with 1024
|
| 205 |
+
|
| 206 |
+
hidden size is used. We use $\mathbf{z}$ to generate the initial state of the encoder following Kim et al. (2018); He et al. (2019); Li et al. (2019). To optimize the objective, we use mini-batch SGD with 32 samples per batch. We use one NVIDIA Tesla v100 for the experiments. For all experiments, we use the linear annealing strategy that increases the KL weight from 0 to 1 in the first 10 epochs if possible.
|
| 207 |
+
|
| 208 |
+
Compared methods: We compare our model with several strong baselines and methods that hold the previous state-of-the-art performance on text modeling benchmarks.
|
| 209 |
+
|
| 210 |
+
- Baselines, including neural autoregressive models (the LSTM language model).
|
| 211 |
+
- Methods with weakening the decoder: CNN-VAE (Yang et al., 2017).
|
| 212 |
+
- Methods with a modified model structure: SkipVAE (Dieng et al., 2019).
|
| 213 |
+
Methods with a modified training objective:
|
| 214 |
+
|
| 215 |
+
- VAE with annealing (Bowman et al., 2016).
|
| 216 |
+
- $\beta$ -VAE (Higgins et al., 2017).
|
| 217 |
+
- Cyclic annealing (Fu et al., 2019), we use the default cyclic schedule.
|
| 218 |
+
|
| 219 |
+
Methods with a lower bound for KL values:
|
| 220 |
+
|
| 221 |
+
- Free-bits (FB) (Kingma et al., 2016).
|
| 222 |
+
- $\delta$ -VAE (Razavi et al., 2019).
|
| 223 |
+
- vMF-VAE (Xu and Durrett, 2018)
|
| 224 |
+
- Methods with a modified training strategy.
|
| 225 |
+
|
| 226 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Yahoo</td><td colspan="2">Yelp</td></tr><tr><td>Hours</td><td>Ratio</td><td>Hours</td><td>Ratio</td></tr><tr><td>VAE</td><td>3.83</td><td>1.00</td><td>4.50</td><td>1.00</td></tr><tr><td>SA-VAE</td><td>52.99</td><td>12.80</td><td>59.37</td><td>12.64</td></tr><tr><td>Agg VAE</td><td>11.76</td><td>2.84</td><td>21.44</td><td>4.56</td></tr><tr><td>AE+FB</td><td>7.70</td><td>2.01</td><td>9.22</td><td>2.05</td></tr><tr><td>BN-VAE</td><td>3.98</td><td>1.04</td><td>4.60</td><td>1.02</td></tr></table>
|
| 227 |
+
|
| 228 |
+
- Semi-amortized VAE (SA-VAE) (Kim et al., 2018).
|
| 229 |
+
- VAE with an aggressive training (Agg-VAE) (He et al., 2019).
|
| 230 |
+
- FB with a pretrained inference network (AE+FB) (Fu et al., 2019)
|
| 231 |
+
|
| 232 |
+
Main results: Table 1 shows the results. We further split the results into two different settings, one for models with a pretrained inference network and one without it. Our approach achieves the best NLL in the setting without a pretrained inference network on both datasets and is competitive in the setting with a pretrained encoder. Moreover, we can observe that:
|
| 233 |
+
|
| 234 |
+
- $\delta$ -VAE does not perform well in both settings, which shows that constraining the parameters in a small interval is harmful to the model. In vMF-VAE, data points share the same KL value. Our approach is flexible and gets better performance.
|
| 235 |
+
- Although Agg-VAE and SA-VAE both get good performance, they require additional updates on the inference network and cost more training efforts, which are validated in the next part.
|
| 236 |
+
- Cyclic annealing with a pretrained inference network achieves the highest KL, but it may not be a good generative model.
|
| 237 |
+
- Paired with a pretrained inference network, all methods except cyclic annealing can someday boost the performance. This phenomenon indicates that the lagging problem (He et al., 2019) is important in VAE training. When leveraging the pretrained inference network, our approach achieves the smallest performance gap compared with other methods. In other words, our approach can alleviate the lagging problem efficiently.
|
| 238 |
+
|
| 239 |
+
Training time: Table 2 shows the training time (until convergence) and the relative ratio of the basic VAE, our approach and the other best three models in Table 1. SA-VAE is about 12 times slower than our approach due to the local update for each data point. Agg-VAE is 2-4 times slower
|
| 240 |
+
|
| 241 |
+
Table 2: Comparison of training time to convergence. We report both the absolute hours and relative speed.
|
| 242 |
+
|
| 243 |
+
<table><tr><td>#label</td><td>100</td><td>500</td><td>1k</td><td>2k</td><td>10k</td></tr><tr><td>AE</td><td>81.1</td><td>86.2</td><td>90.3</td><td>89.4</td><td>94.1</td></tr><tr><td>VAE</td><td>66.1</td><td>82.6</td><td>88.4</td><td>89.6</td><td>94.5</td></tr><tr><td>δ-VAE</td><td>61.8</td><td>61.9</td><td>62.6</td><td>62.9</td><td>93.8</td></tr><tr><td>Agg-VAE</td><td>80.9</td><td>85.9</td><td>88.8</td><td>90.6</td><td>93.7</td></tr><tr><td>cyclic</td><td>62.4</td><td>75.5</td><td>80.3</td><td>88.7</td><td>94.2</td></tr><tr><td>FB (9)</td><td>79.8</td><td>84.4</td><td>88.8</td><td>91.12</td><td>94.7</td></tr><tr><td>AE+FB (6)</td><td>87.6</td><td>90.2</td><td>92.0</td><td>93.4</td><td>94.9</td></tr><tr><td>BN-VAE (0.7)</td><td>88.8</td><td>91.6</td><td>92.5</td><td>94.1</td><td>95.4</td></tr></table>
|
| 244 |
+
|
| 245 |
+
Table 3: Accuracy on Yelp.
|
| 246 |
+
|
| 247 |
+
<table><tr><td>Model</td><td>CVAE</td><td>CVAE (BOW)</td><td>BN-VAE</td></tr><tr><td>PPL</td><td>36.40</td><td>24.49</td><td>30.67</td></tr><tr><td>KL</td><td>0.15</td><td>9.30</td><td>5.18</td></tr><tr><td>BLEU-4</td><td>10.23</td><td>8.56</td><td>8.64</td></tr><tr><td>A-bow Prec</td><td>95.87</td><td>96.89</td><td>96.64</td></tr><tr><td>A-bow Recall</td><td>90.93</td><td>93.95</td><td>94.43</td></tr><tr><td>E-bow Prec</td><td>86.26</td><td>83.55</td><td>84.69</td></tr><tr><td>E-bow Recall</td><td>77.91</td><td>81.13</td><td>81.75</td></tr></table>
|
| 248 |
+
|
| 249 |
+
Table 4: Comparison on dialogue generation.
|
| 250 |
+
|
| 251 |
+
than ours because it requires additional training for the inference network. AE+FB needs to train an autoencoder before the VAE. However, our approach is fast since we only add one-layer batch normalization, and thus the training cost is almost the same as the basic VAE. More results about the training behavior can be found in Section 3 of the Appendix.
|
| 252 |
+
|
| 253 |
+
Performance on a downstream task - Text classification: The goal of VAE is to learn a good representation of the data for downstream tasks. Here, we evaluate the quality of latent representations by training a one-layer linear classifier based on the mean of the posterior distribution. We use a downsampled version of the Yelp sentiment dataset (Shen et al., 2017). Li et al. (2019) further sampled various labeled data to train the classifier. To compare with them fairly, we use the same samples in Li et al. (2019). Results are shown in Table 3. Our approach achieves the best accuracy in all the settings. For 10k training samples, all the methods get a good result. However, when only using 100 training samples, different methods vary a lot in accuracy. The text classification task shows that our approach can learn a good latent representation even without a pretrained inference network.
|
| 254 |
+
|
| 255 |
+
# 5.2 CVAE for Dialogue Generation
|
| 256 |
+
|
| 257 |
+
Setup: For dialogue generation, we test our approach in the setting of CVAE. Following previous work (Zhao et al., 2017b), we use the Switchboard (SW) Corpus (Godfrey and Holliman, 1997), which contains 2400 two-sided telephone conversations.
|
| 258 |
+
|
| 259 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">Fluency</td><td colspan="3">Relevance</td><td colspan="3">Informativeness</td></tr><tr><td>Avg</td><td>#Accept</td><td>#High</td><td>Avg</td><td>#Accept</td><td>#High</td><td>Avg</td><td>#Accept</td><td>#High</td></tr><tr><td>CVAE</td><td>2.11 (0.58)</td><td>87%</td><td>23%</td><td>1.90 (0.49)</td><td>82%</td><td>8%</td><td>1.39 (0.59)</td><td>34%</td><td>5%</td></tr><tr><td>CVAE (BOW)</td><td>2.08 (0.73)</td><td>84%</td><td>23%</td><td>1.86 (0.58)</td><td>75%</td><td>11%</td><td>1.54 (0.65)</td><td>46%</td><td>8%</td></tr><tr><td>BN-CVAE</td><td>2.16 (0.71)</td><td>88%</td><td>27%</td><td>1.92 (0.67)</td><td>80%</td><td>12%</td><td>1.54 (0.67)</td><td>43%</td><td>10%</td></tr></table>
|
| 260 |
+
|
| 261 |
+
Table 5: Human evaluation results. Numbers in parentheses is the corresponding variance on 200 test samples.
|
| 262 |
+
|
| 263 |
+
<table><tr><td colspan="3">Topic: ETHICS IN GOVERNMENT</td></tr><tr><td colspan="3">Context: have trouble drawing lines as to what's illegal and what's not</td></tr><tr><td colspan="3">Target (statement): well i mean the other problem is that they're always up for</td></tr><tr><td>CVAE</td><td>CVAE (BOW)</td><td>BN-CVAE</td></tr><tr><td>1. yeah</td><td>1. yeah</td><td>1. it's not a country</td></tr><tr><td>2. yeah</td><td>2. oh yeah they're not</td><td>2. it is the same thing that's what i think is about the state is a state</td></tr><tr><td>3. yeah</td><td>3. no it's not too bad</td><td>3. yeah it's</td></tr></table>
|
| 264 |
+
|
| 265 |
+
Table 6: Sampled generated responses. Only the last sentence in the context is shown here.
|
| 266 |
+
|
| 267 |
+
We use a bidirectional GRU with hidden size 300 to encode each utterance and then a one-layer GRU with hidden size 600 to encode previous $k - 1$ utterances as the context. The response decoder is a one-layer GRU with hidden size 400. The latent representation $\mathbf{z}$ has a size of 200. We use the evaluation metrics from Zhao et al. (2017b): (1) Smoothed Sentence-level BLEU (Chen and Cherry, 2014); (2) Cosine Distance of Bag-of-word Embedding, which is a simple method to obtain sentence embeddings. We use the pretrained Glove embedding (Pennington et al., 2014) and denote the average method as A-bow and the extreme method as E-bow. Higher values indicate more plausible responses. We compared our approach with CVAE and CVAE with bag-of-words (BOW) loss (Zhao et al., 2017b), which requires the decoder in the generation network to predict the bag-of-words in the response $\mathbf{y}$ based on $\mathbf{z}$ .
|
| 268 |
+
|
| 269 |
+
Automatic evaluation: Table 4 shows the results of these three approaches. From the KL values, we find that CVAE suffers from posterior collapse while CVAE (BOW) and our approach avoid it effectively. For BLEU-4, we observe the same phenomenon in the previous work (Fu et al., 2019; Zhao et al., 2017b) that CVAE is slightly better than the others. This is because CVAE tends to generate the most likely and safe responses repeatedly with the collapsed posterior. As for precision, these three models do not differ much. However, CVAE (BOW) and our BN-VAE outperform CVAE in recall with a large margin. This indicates that BN-VAE can also produce diverse responses with good quality like CVAE (BOW).
|
| 270 |
+
|
| 271 |
+
Human evaluation: We conduct the human evaluation by asking five annotators from a commercial annotation company to grade 200 sampled conver
|
| 272 |
+
|
| 273 |
+
sations from the aspect of fluency, relevance and informativeness on a scale of 1-3 (see Section 4 of the Appendix for more details on the criteria). We also report the proportion of acceptable/high scores $(\geq 2$ and $= 3)$ on each metric. Table 5 shows the annotation results. Overall, our approach beats the other two compared methods in relevance and fluency with more informative responses. Also, our approach has the largest proportion of responses whose scores are High. This indicates that our model can produce more meaningful and relevant responses than the other two.
|
| 274 |
+
|
| 275 |
+
Case study: Table 6 shows the sampled responses generated by the three methods (more can be found in the Appendix). By maintaining a reasonable KL, responses generated by our approach are more relevant to the query with better diversity compared to the other two. We test the three methods in the simplest setting of dialogue generation. Note that the focus of this work is to improve the CVAE itself by avoiding its KL vanishing problem but not to hack the state-of-the-art dialogue generation performance. To further improve the quality of generated responses, we can enhance our approach by incorporating knowledge such as dialogue acts (Zhao et al., 2017b), external facts (Ghazvininejad et al., 2018) and personal profiles (Zhang et al., 2018).
|
| 276 |
+
|
| 277 |
+
# 6 Conclusions and Future Work
|
| 278 |
+
|
| 279 |
+
In this paper, we tackle the posterior collapse problem when VAE is paired with autoregressive decoders. Instead of considering the KL individually, we make it follow a distribution $D_{KL}$ and show that keeping the expectation of $D_{KL}$ positive is sufficient to prevent posterior collapse. We propose Batch Normalized VAE (BN-VAE), a simple but effective approach to set a lower bound of $D_{KL}$
|
| 280 |
+
|
| 281 |
+
by regularization the approximate posterior's parameters. Our approach can also avoid the recently proposed lagging problem efficiently without additional training efforts. We show that our approach can be easily extended to CVAE. We test our approach on three real applications, language modeling, text classification and dialogue generation. Experiments show that our approach outperforms strong baselines and is competitive with more complex methods which keeping substantially faster.
|
| 282 |
+
|
| 283 |
+
We leverage the Gaussian prior as the example to introduce our method in this work. The key to our approach to be applicable is that we can get a formula for the expectation of the KL. However, it is hard to get the same formula for some more strong or sophisticated priors, e.g., the Dirichlet prior. For these distributions, we can approximate them by the Gaussian distributions (such as in Srivastava and Sutton (2017)). In this way, we can batch normalize the corresponding parameters. Further study in this direction may be interesting.
|
| 284 |
+
|
| 285 |
+
# References
|
| 286 |
+
|
| 287 |
+
Alexander Alemi, Ben Poole, Ian Fischer, Joshua Dillon, Rif A Saurous, and Kevin Murphy. 2018. Fixing a broken elbo. In ICML.
|
| 288 |
+
Alexander A. Alemi, Ian Fischer, Joshua V. Dillon, and Kevin Murphy. 2017. Deep variational information bottleneck. In ICLR.
|
| 289 |
+
Samuel R. Bowman, Luke Vilnis, Oriol Vinyals, Andrew Dai, Rafal Jozefowicz, and Samy Bengio. 2016. Generating sentences from a continuous space. In CONLL.
|
| 290 |
+
Yuri Burda, Roger B. Grosse, and Ruslan R. Salakhutdinov. 2016. Importance weighted autoencoders. In ICLR.
|
| 291 |
+
Boxing Chen and Colin Cherry. 2014. A systematic comparison of smoothing techniques for sentence-level bleu. In Proceedings of the Ninth Workshop on Statistical Machine Translation.
|
| 292 |
+
Xi Chen, Diederik P. Kingma, Tim Salimans, Yan Duan, Prafulla Dhariwal, John Schulman, Ilya Sutskever, and Pieter Abbeel. 2017. Variational lossy autoencoder. In $ICLR$ .
|
| 293 |
+
Chris Cremer, Xuechen Li, and David Duvenaud. 2018. Inference suboptimality in variational autoencoders. In ICML.
|
| 294 |
+
Tim R. Davidson, Luca Falorsi, Nicola De Cao, Thomas Kipf, and Jakub M. Tomczak. 2018. Hyperspherical variational auto-encoders. In UAI.
|
| 295 |
+
|
| 296 |
+
Adji B Dieng, Yoon Kim, Alexander M Rush, and David M Blei. 2019. Avoiding latent variable collapse with generative skip models. In AISTATS.
|
| 297 |
+
Le Fang, Chunyuan Li, Jianfeng Gao, Wen Dong, and Changyou Chen. 2019. Implicit deep latent variable models for text generation. In EMNLP-IJCNLP.
|
| 298 |
+
Hao Fu, Chunyuan Li, Xiaodong Liu, Jianfeng Gao, Asli Celikyilmaz, and Lawrence Carin. 2019. Cyclic annealing schedule: A simple approach to mitigating KL vanishing. In NAACL.
|
| 299 |
+
Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, Bill Dolan, Jianfeng Gao, Wen-tau Yih, and Michel Galley. 2018. A knowledge-grounded neural conversation model. In AAAI.
|
| 300 |
+
J Godfrey and E Holliman. 1997. Switchboard-1 release 2: Linguistic data consortium. In SWITCHBOARD: A User's Manual.
|
| 301 |
+
Kelvin Guu, Tatsunori B Hashimoto, Yonatan Oren, and Percy Liang. 2018. Generating sentences by editing prototypes. In Transactions of the Association of Computational Linguistics. MIT Press.
|
| 302 |
+
Junxian He, Daniel Spokoyny, Graham Neubig, and Taylor Berg-Kirkpatrick. 2019. Lagging inference networks and posterior collapse in variational autoencoders. In *ICLR*.
|
| 303 |
+
Irina Higgins, Loic Matthey, Arka Pal, Christopher Burgess, Xavier Glorot, Matthew Botvinick, Shakir Mohamed, and Alexander Lerchner. 2017. beta-vae: Learning basic visual concepts with a constrained variational framework. In ICLR.
|
| 304 |
+
Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. In Neural computation. MIT Press.
|
| 305 |
+
Sergey Ioffe and Christian Szegedy. 2015. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In ICML.
|
| 306 |
+
Yoon Kim, Sam Wiseman, Andrew C. Miller, David A Sontag, and Alexander M. Rush. 2018. Semi-amortized variational autoencoders. In ICML.
|
| 307 |
+
Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. In ICLR.
|
| 308 |
+
Diederik P. Kingma and Max Welling. 2014. Auto-encoding variational bayes. In ICLR.
|
| 309 |
+
Durk P Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, and Max Welling. 2016. Improved variational inference with inverse autoregressive flow. In NeurlIPS.
|
| 310 |
+
Bohan Li, Junxian He, Graham Neubig, Taylor Berg-Kirkpatrick, and Yiming Yang. 2019. A surprisingly effective fix for deep latent variable modeling of text. In EMNLP-IJCNLP.
|
| 311 |
+
|
| 312 |
+
Xuezhe Ma, Chunting Zhou, and Eduard Hovy. 2019. MAE: Mutual posterior-divergence regularization for variational autoencoders. In ICLR.
|
| 313 |
+
Yishu Miao, Lei Yu, and Phil Blunsom. 2016. Neural variational inference for text processing. In ICML.
|
| 314 |
+
Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. 2017. Neural discrete representation learning. In NeurlIPS.
|
| 315 |
+
Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word representation. In EMNLP.
|
| 316 |
+
Ali Razavi, Aaron van den Oord, Ben Poole, and Oriol Vinyals. 2019. Preventing posterior collapse with delta-VAEs. In *ICLR*.
|
| 317 |
+
Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. 2014. Stochastic backpropagation and approximate inference in deep generative models. In ICML.
|
| 318 |
+
Shibani Santurkar, Dimitris Tsipras, Andrew Ilyas, and Aleksander Madry. 2018. How does batch normalization help optimization? In NeurIPS.
|
| 319 |
+
Stanislau Semeniuta, Aliaksei Severyn, and Erhardt Barth. 2017. A hybrid convolutional variational autoencoder for text generation. In EMNLP.
|
| 320 |
+
Tianxiao Shen, Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2017. Style transfer from non-parallel text by cross-alignment. In NeurlIPS.
|
| 321 |
+
Kihyuk Sohn, Honglak Lee, and Xinchen Yan. 2015. Learning structured output representation using deep conditional generative models. In NeurIPS.
|
| 322 |
+
Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. 2016. Ladder variational autoencoders. In NeurlIPS.
|
| 323 |
+
Akash Srivastava and Charles Sutton. 2017. Autoencoding variational inference for topic models. In ICLR.
|
| 324 |
+
Akash Srivastava and Charles Sutton. 2018. Variational inference in pachinko allocation machines. In arXiv preprint arXiv:1804.07944.
|
| 325 |
+
Ilya Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Schoelkopf. 2018. Wasserstein autoencoders. In ICLR.
|
| 326 |
+
Jakub M. Tomczak and Max Welling. 2018. Vae with a vampprior. In AISTATS.
|
| 327 |
+
Jiacheng Xu and Greg Durrett. 2018. Spherical latent spaces for stable variational autoencoders. In EMNLP.
|
| 328 |
+
Weidi Xu, Haoze Sun, Chao Deng, and Ying Tan. 2017. Variational autoencoder for semi-supervised text classification. In AAAI.
|
| 329 |
+
|
| 330 |
+
Zichao Yang, Zhiting Hu, Ruslan Salakhutdinov, and Taylor Berg-Kirkpatrick. 2017. Improved variational autoencoders for text modeling using dilated convolutions. In ICML.
|
| 331 |
+
Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Personalizing dialogue agents: I have a dog, do you have pets too? In ACL.
|
| 332 |
+
Shengjia Zhao, Jiaming Song, and Stefano Ermon. 2017a. Infovae: Information maximizing variational autoencoders. In arXiv preprint arXiv:1706.02262.
|
| 333 |
+
Tiancheng Zhao, Kyusong Lee, and Maxine Eskenazi. 2018. Unsupervised discrete sentence representation learning for interpretable neural dialog generation. In ACL.
|
| 334 |
+
Tiancheng Zhao, Ran Zhao, and Maxine Eskenazi. 2017b. Learning discourse-level diversity for neural dialog models using conditional variational autoencoders. In ACL.
|
| 335 |
+
Qile Zhu, Zheng Feng, and Xiaolin Li. 2018. Graphbtm: Graph enhanced autoencoded variational inference for biterm topic model. In EMNLP.
|
| 336 |
+
|
| 337 |
+
# A Appendix
|
| 338 |
+
|
| 339 |
+
# A.1 Experiments on Synthetic Data
|
| 340 |
+
|
| 341 |
+
We follow the Agg-VAE and construct the synthetic data to validate whether our approach can avoid the lagging problem. VAE used in this synthetic task has a LSTM encoder and a LSTM decoder. We use a scalar latent variable because we need to compute $\mu_{x,\theta}$ which is approximated by discretization of $p_{\theta}(z|x)$ . To visualize the training progress, we sample 500 data points from the validation set and show them on the mean space.
|
| 342 |
+
|
| 343 |
+
We plot the mean value of the approximate posterior and the model posterior during training for the basic VAE and BN-VAE. As shown the first column in Fig. 1, all points have the zero mean of the model posterior (the x-axis), which indicates that $\mathbf{z}$ and $\mathbf{x}$ are independent at the beginning of training. For the basic VAE, points start to spread in the x-axis during training while sharing almost the same y value, since the model posterior $p_{\theta}(\mathbf{z}|\mathbf{x})$ is well learned with the help of the autoregressive decoder. However, the inference posterior $q_{\phi}(\mathbf{z}|\mathbf{x})$ is lagging behind $p_{\theta}(\mathbf{z}|\mathbf{x})$ and collapses to the prior in the end. Our regularization approximated by BN, on the other hand, pushes the inference posterior $q_{\phi}(\mathbf{z}|\mathbf{x})$ away from the prior $(p(\mathbf{z}))$ at the initial training stage, and forces $q_{\phi}(\mathbf{z}|\mathbf{x})$ to catch up with $p_{\theta}(\mathbf{z}|\mathbf{x})$ to minimize $KL(q_{\phi}(\mathbf{z}|\mathbf{x})||p_{\theta}(\mathbf{z}|\mathbf{x}))$ in Eq. 9. As in the second row of Fig. 1, points spread in both directions and towards the diagonal.
|
| 344 |
+
|
| 345 |
+
We also report the results on different $\gamma$ 's with different batch sizes (32 in Fig. 1). Fig. 2 shows the training dynamics. Both settings of $\gamma$ avoid posterior collapse efficiently. A larger $\gamma$ produces more diverse $\mu$ 's which spread on the diagonal. However, a small $\gamma$ results in a small variance for the distribution of $\mu$ , thus $\mu$ 's in the bottom row are closer to the original (mean of the distribution). When $\gamma$ is 0, posterior collapse happens. Different batch sizes do not diff a lot, so 32 is a decent choice. An intuitive improvement of our method is to automatically learn different $\gamma$ for different latent dimensions, which we leave for future work.
|
| 346 |
+
|
| 347 |
+
# A.2 Proof in CVAE
|
| 348 |
+
|
| 349 |
+
The KL can be computed as:
|
| 350 |
+
|
| 351 |
+
$$
|
| 352 |
+
\begin{array}{l} K L = \frac {1}{2} \sum_ {i = 1} ^ {n} \left(\frac {\sigma_ {q i} ^ {2} + \left(\mu_ {q i} - \mu_ {p i}\right) ^ {2}}{\sigma_ {p i} ^ {2}}\right) \tag {14} \\ + \sigma_ {p i} ^ {2} + \mu_ {p i} ^ {2} - \log \sigma_ {q i} ^ {2} - 1). \\ \end{array}
|
| 353 |
+
$$
|
| 354 |
+
|
| 355 |
+
We need to prove that KL will not achieve the minimum number when $\mu_{pi}$ equals to $\mu_{qi}$ and $\sigma_{pi}$ equals $\sigma_{qi}$ . We take hidden size as 1 for example. The binary function about $\mu_{pi}$ and $\sigma_{pi}$ is:
|
| 356 |
+
|
| 357 |
+
$$
|
| 358 |
+
\begin{array}{l} f _ {\mu_ {p i}, \sigma_ {p i}} = \left(\frac {\sigma_ {q i} ^ {2} + \left(\mu_ {q i} - \mu_ {p i}\right) ^ {2}}{\sigma_ {p i} ^ {2}}\right) \tag {15} \\ + \sigma_ {p i} ^ {2} + \mu_ {p i} ^ {2} - l o g \sigma_ {q i} ^ {2} - 1), \\ \end{array}
|
| 359 |
+
$$
|
| 360 |
+
|
| 361 |
+
the maxima and minima of $f_{\mu_{pi},\sigma_{pi}}$ must be the stationary point of $f_{\mu_{pi},\sigma_{pi}}$ due to its continuity. The stationary point is:
|
| 362 |
+
|
| 363 |
+
$$
|
| 364 |
+
\frac {\partial f}{\partial \mu_ {p i}} = \frac {2 \left(\mu_ {p i} - \mu_ {q i}\right)}{\sigma_ {p i} ^ {2}} + 2 \mu_ {p i} \tag {16}
|
| 365 |
+
$$
|
| 366 |
+
|
| 367 |
+
$$
|
| 368 |
+
\frac {\partial f}{\partial \sigma_ {p i}} = \frac {- 2 \left(\sigma_ {q i} ^ {2} + \left(\mu_ {q i} - \mu_ {p i}\right) ^ {2}\right)}{\sigma_ {p i} ^ {3}} + 2 \sigma_ {p i}. \tag {17}
|
| 369 |
+
$$
|
| 370 |
+
|
| 371 |
+
When $\mu_{pi} = \mu_{qi}$ and $\sigma_{pi} = \sigma_{qi}$ , both partial derivative is not 0. So it is not the stationary point of $f$ , then it won't be the minima.
|
| 372 |
+
|
| 373 |
+
# A.3 Language Modeling
|
| 374 |
+
|
| 375 |
+
We investigate the training procedure for different models. We plot the MI $I_{q}$ , $D_{KL}$ in the ELBO and the distance between the approximated posterior and the prior, $D_{KL}(q_{\phi}(z)||p(z))$ . As in Eq. 4 in the main paper, $D_{KL}$ in the ELBO is the sum of the other two. Fig. 3 shows these three values throughout the training. Although $D_{KL}$ is the upper bound of the mutual information, we notice that the gap is usually large. In the initial training stage, $D_{KL}$ increases in the basic VAE with annealing, while its MI remains small. With the weight decreases, the method finally suffers from posterior collapse. In contrast, our approach can obtain a high MI with a small $D_{KL}$ value like aggressive VAE. The full results on language modeling are in Table 8.
|
| 376 |
+
|
| 377 |
+
# A.4 CVAE for dialogue generation
|
| 378 |
+
|
| 379 |
+
Human evaluation: We evaluate the generated responses from three aspects: relevance, fluency and informativeness. Here we introduce the criteria of the evaluation as shown in Table 7. We sample 200 conversations from the test set. For each conversation, we sample three generated responses from each model, totally 600 responses.
|
| 380 |
+
|
| 381 |
+
Case study: We report 4 examples generated from these three models, shown in Table 9. CVAE (BOW) and our approach both can generate diverse responses. However, responses from ours are more related to the context compared with the other two.
|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
|
| 385 |
+

|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
Figure 1: Visualization of 500 sampled data from the synthetic dataset during the training. The x-axis is $\mu_{x,\theta}$ , the approximate model posterior mean. The y-axis is $\mu_{x,\phi}$ , which represents the inference posterior mean. b is batch size and $\gamma$ is 1 in BN.
|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
Figure 2: Visualization of our BN-VAE on different $\gamma$ for synthetic data.
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
Figure 3: Training behavior on Yelp. Left/Middle/Right: VAE/Agg-VAE/BN-VAE (all models are with annealing).
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
|
| 424 |
+
Table 7: Human evaluation criteria.
|
| 425 |
+
|
| 426 |
+
<table><tr><td colspan="2">Fluency</td><td>Relevance</td><td>Informativeness</td></tr><tr><td>1 Point</td><td>1. Hard to understand
|
| 427 |
+
2. Too many syntax mistakes</td><td>Not related to the query at all</td><td>1. Generic responses.
|
| 428 |
+
2. Repeated query.</td></tr><tr><td>2 Points</td><td>1. Several syntax mistakes but still understandable
|
| 429 |
+
2. short responses, e.g., Generic responses</td><td>1. Response and query are in the same domain/topic but are not directly related
|
| 430 |
+
2. Generic responses</td><td>between 1 and 3.</td></tr><tr><td>3 Points</td><td>Only few syntax mistakes with a moderate length</td><td>closely related to the query</td><td>1. Creative responses.
|
| 431 |
+
2. Contain new information about the query.</td></tr></table>
|
| 432 |
+
|
| 433 |
+
<table><tr><td rowspan="2">Model</td><td colspan="4">Yahoo</td><td colspan="4">Yelp</td></tr><tr><td>NLL</td><td>KL</td><td>MI</td><td>AU</td><td>NLL</td><td>KL</td><td>MI</td><td>AU</td></tr><tr><td>CNN-VAE</td><td>≤332.1</td><td>10.0</td><td>-</td><td>-</td><td>≤359.1</td><td>7.6</td><td>-</td><td>-</td></tr><tr><td>LSTM-LM</td><td>328</td><td>-</td><td>-</td><td>-</td><td>351.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VAE</td><td>328.6</td><td>0.0</td><td>0.0</td><td>0.0</td><td>357.9</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>β-VAE (0.2)</td><td>332.2</td><td>19.1</td><td>3.3</td><td>20.4</td><td>360.7</td><td>11.7</td><td>3.0</td><td>10.0</td></tr><tr><td>β-VAE (0.4)</td><td>328.7</td><td>6.3</td><td>2.8</td><td>8.0</td><td>358.2</td><td>4.2</td><td>2.0</td><td>4.2</td></tr><tr><td>β-VAE (0.6)</td><td>328.5</td><td>0.3</td><td>0.0</td><td>1.0</td><td>357.9</td><td>0.2</td><td>0.1</td><td>3.8</td></tr><tr><td>β-VAE (0.8)</td><td>328.8</td><td>0.0</td><td>0.0</td><td>0.0</td><td>358.1</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>cyclic*</td><td>330.6</td><td>2.1</td><td>2.0</td><td>2.3</td><td>359.5</td><td>2.0</td><td>1.9</td><td>4.1</td></tr><tr><td>Skip-VAE*</td><td>328.5</td><td>2.3</td><td>1.3</td><td>8.1</td><td>357.6</td><td>1.9</td><td>1.0</td><td>7.4</td></tr><tr><td>SA-VAE</td><td>327.2</td><td>5.2</td><td>2.7</td><td>9.8</td><td>355.9</td><td>2.8</td><td>1.7</td><td>8.4</td></tr><tr><td>Agg-VAE</td><td>326.7</td><td>5.7</td><td>2.9</td><td>15.0</td><td>355.9</td><td>3.8</td><td>2.4</td><td>11.3</td></tr><tr><td>FB (4)</td><td>331.0</td><td>4.1</td><td>3.8</td><td>3.0</td><td>359.2</td><td>4.0</td><td>1.9</td><td>32.0</td></tr><tr><td>FB (5)</td><td>330.6</td><td>5.7</td><td>2.0</td><td>3.0</td><td>359.8</td><td>4.9</td><td>1.3</td><td>32.0</td></tr><tr><td>δ-VAE (0.1)*</td><td>330.7</td><td>3.2</td><td>0.0</td><td>0.0</td><td>359.8</td><td>3.2</td><td>0.0</td><td>0.0</td></tr><tr><td>δ-VAE (0.15)*</td><td>331.6</td><td>4.8</td><td>0.0</td><td>0.0</td><td>360.4</td><td>4.8</td><td>0.0</td><td>0.0</td></tr><tr><td>δ-VAE (0.2)*</td><td>332.2</td><td>6.4</td><td>0.0</td><td>0.0</td><td>361.5</td><td>6.4</td><td>0.0</td><td>0.0</td></tr><tr><td>δ-VAE (0.25)*</td><td>333.5</td><td>8.0</td><td>0.0</td><td>0.0</td><td>362.5</td><td>8.0</td><td>0.0</td><td>0.0</td></tr><tr><td>vMF-VAE (13)*</td><td>327.4</td><td>2.0</td><td>-</td><td>32.0</td><td>357.5</td><td>2.0</td><td>-</td><td>32.0</td></tr><tr><td>vMF-VAE (16)*</td><td>328.5</td><td>3.0</td><td>-</td><td>32.0</td><td>367.8</td><td>3.0</td><td>-</td><td>32.0</td></tr><tr><td>vMF-VAE (20)*</td><td>329.4</td><td>4.0</td><td>-</td><td>32.0</td><td>358.0</td><td>4.0</td><td>-</td><td>32.0</td></tr><tr><td>vMF-VAE (23)*</td><td>328.7</td><td>5.0</td><td>-</td><td>32.0</td><td>357.3</td><td>5.0</td><td>-</td><td>32.0</td></tr><tr><td>vMF-VAE (25)*</td><td>330.1</td><td>6.0</td><td>-</td><td>32.0</td><td>357.8</td><td>6.0</td><td>-</td><td>32.0</td></tr><tr><td>vMF-VAE (30)*</td><td>329.5</td><td>7.0</td><td>-</td><td>32.0</td><td>357.8</td><td>7.0</td><td>-</td><td>32.0</td></tr><tr><td>BN-VAE (0.3)*</td><td>328.1</td><td>1.6</td><td>1.4</td><td>32.0</td><td>356.7</td><td>1.7</td><td>1.4</td><td>32.0</td></tr><tr><td>BN-VAE (0.4)*</td><td>327.7</td><td>2.7</td><td>2.2</td><td>32.0</td><td>356.2</td><td>3.1</td><td>2.5</td><td>32.0</td></tr><tr><td>BN-VAE (0.5)*</td><td>327.4</td><td>4.2</td><td>3.3</td><td>32.0</td><td>356.4</td><td>4.4</td><td>3.8</td><td>32.0</td></tr><tr><td>BN-VAE (0.6)*</td><td>326.7</td><td>6.2</td><td>5.6</td><td>32.0</td><td>356.5</td><td>6.5</td><td>5.4</td><td>32.0</td></tr><tr><td>BN-VAE (0.7)*</td><td>327.4</td><td>8.8</td><td>7.4</td><td>32.0</td><td>355.9</td><td>9.1</td><td>7.4</td><td>32.0</td></tr><tr><td>Pretrained encoder</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>+cyclic*</td><td>333.1</td><td>25.8</td><td>9.1</td><td>32.0</td><td>361.5</td><td>20.5</td><td>9.3</td><td>32.0</td></tr><tr><td>+FB (2)*</td><td>327.2</td><td>4.3</td><td>3.8</td><td>32.0</td><td>356.6</td><td>4.6</td><td>4.2</td><td>32.0</td></tr><tr><td>+FB (3)*</td><td>327.1</td><td>4.5</td><td>3.9</td><td>32.0</td><td>356.3</td><td>5.8</td><td>5.2</td><td>32.0</td></tr><tr><td>+FB (4)*</td><td>326.2</td><td>8.1</td><td>6.8</td><td>32.0</td><td>356.0</td><td>7.6</td><td>6.6</td><td>32.0</td></tr><tr><td>+FB (5)*</td><td>326.6</td><td>8.9</td><td>7.3</td><td>32.0</td><td>356.5</td><td>9.0</td><td>7.4</td><td>32.0</td></tr><tr><td>+FB (6)*</td><td>326.6</td><td>10.8</td><td>8.1</td><td>32.0</td><td>356.5</td><td>12.0</td><td>8.6</td><td>32.0</td></tr><tr><td>+FB (7)*</td><td>326.6</td><td>12.1</td><td>8.5</td><td>32.0</td><td>356.8</td><td>13.4</td><td>8.9</td><td>32.0</td></tr><tr><td>+FB (8)*</td><td>326.7</td><td>13.6</td><td>8.9</td><td>32.0</td><td>357.5</td><td>15.8</td><td>9.2</td><td>32.0</td></tr><tr><td>+δ-VAE (0.15)*</td><td>331.0</td><td>5.6</td><td>1.1</td><td>11.2</td><td>359.4</td><td>5.2</td><td>0.5</td><td>5.9</td></tr><tr><td>vMF-VAE (13)*</td><td>328.4</td><td>2.0</td><td>-</td><td>32.0</td><td>357.0</td><td>2.0</td><td>-</td><td>32.0</td></tr><tr><td>+BN-VAE (0.6)*</td><td>326.7</td><td>6.4</td><td>5.8</td><td>32.0</td><td>355.5</td><td>6.6</td><td>5.9</td><td>32.0</td></tr><tr><td>+BN-VAE (0.7)*</td><td>326.5</td><td>9.1</td><td>7.6</td><td>32.0</td><td>355.7</td><td>9.1</td><td>7.5</td><td>32.0</td></tr></table>
|
| 434 |
+
|
| 435 |
+
Table 8: Results on Yahoo and Yelp datasets. We report mean values across 5 different random runs. * indicates the results are from our experiments, while others are from previous report.
|
| 436 |
+
|
| 437 |
+
Table 9: Sampled generated responses. Only the last sentence in the context is shown here.
|
| 438 |
+
|
| 439 |
+
<table><tr><td colspan="3">Topic: ETHICS IN GOVERNMENT</td></tr><tr><td colspan="3">Context: have trouble drawing lines as to what's illegal and what's not</td></tr><tr><td colspan="3">Target (statement): well i mean the other problem is that they'are always up for</td></tr><tr><td>CVAE1. yeah2. yeah3. yeah</td><td>CVAE (BOW)1. yeah2. oh yeah, they're not3. no it's not too bad</td><td>BN-CVAE1. it's not a country2. it is the same thing that'swhat i think is about the stateis a state3. yeah it's</td></tr><tr><td colspan="3">Topic: VACATION SPOTS</td></tr><tr><td colspan="3">Context: well i 'll talk to you later</td></tr><tr><td colspan="3">Target (conventional-closing): okay now do you push the buttons now</td></tr><tr><td>CVAE1. okay bye - bye2. bye3. okay bye - bye</td><td>CVAE (BOW)1. okay so we'll go ahead and start2. so i guess it depends on how muchyou are you3. it's</td><td>BN-CVAE1. okay bye - bye2. nice talking to you too3. all right take care bye - bye</td></tr><tr><td colspan="3">Topic: RECYCLING</td></tr><tr><td colspan="3">Context: are they doing a lot of recycling out in georgia</td></tr><tr><td colspan="3">Target (statement-non-opinion): well at my workplace they are we have places foraluminum cans and we have everybody's been unk a separate trash can for</td></tr><tr><td>CVAE1. yeah2. yeah3. yeah</td><td>CVAE (BOW)1. well that's good for a while i'll tell youthat you're not doing anything at ti andthen you're in a small town2. oh i know i've got a lot of trees andtrees and stuff and3. yeah it's like you know people thatwant to be unk and they're not goingto bother you to make a mess</td><td>BN-CVAE1. well we do recyclenewspapers2. yes i do too3. well we're at a point wherewe're going to be a landfillspace</td></tr><tr><td colspan="3">Topic: UNIVERSAL HEALTH INS</td></tr><tr><td colspan="3">Context: some of the good obviously that nobody has to worry about health care</td></tr><tr><td colspan="3">Target (statement-non-opinion): and i guess i'll have to help with grandchildrenone of these days i hope</td></tr><tr><td>CVAE1. um - hum2. uh - huh3. uh - huh</td><td>CVAE (BOW)1. okay well see we don't have anychoice of any of those2.um - hum3. yeah</td><td>BN-CVAE1. well i hope that we shouldhave a balanced budget2. uh - huh3. well that's a good idea</td></tr></table>
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11ee629b19c6ca2946b2edc533297c77bcab65429ddcc565aa4347ee53e95b60
|
| 3 |
+
size 1106309
|
abatchnormalizedinferencenetworkkeepstheklvanishingaway/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5e3ce749a2b08e329941a89e3a558e656e942afcb73df0d2e93a1dd17d3a527
|
| 3 |
+
size 598827
|
acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dba97afa310d6ab81aada304ecff0e3c9e484dac97cf6cf62895e387a16ad0f5
|
| 3 |
+
size 90862
|
acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51bc8014fd41b057f1ac441bef74db70d7be655781668562a277358000f3f178
|
| 3 |
+
size 117377
|
acallformorerigorinunsupervisedcrosslinguallearning/684851d0-c13c-45a1-a7cd-979a3fa7c5e1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5cf46d5cebfea2d5cd0cd8f919805b2971481675bd809873057dc707ada9f695
|
| 3 |
+
size 325517
|
acallformorerigorinunsupervisedcrosslinguallearning/full.md
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Call for More Rigor in Unsupervised Cross-lingual Learning
|
| 2 |
+
|
| 3 |
+
Mikel Artetxe†*, Sebastian Ruder‡*, Dani Yogatama‡, Gorka Labaka†, Eneko Agirre†
|
| 4 |
+
|
| 5 |
+
†HiTZ Center, University of the Basque Country (UPV/EHU)
|
| 6 |
+
|
| 7 |
+
DeepMind
|
| 8 |
+
|
| 9 |
+
{mikel.artexe,gorka.labaka,e.agirre}@ehu.eus
|
| 10 |
+
|
| 11 |
+
{ruder,dyogatama} $@$ google.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
The study of the connection among human languages has contributed to major discoveries including the evolution of languages, the reconstruction of proto-languages, and an understanding of language universals (Eco and Fentress, 1995). In natural language processing, the main promise of multilingual learning is to bridge the digital language divide, to enable access to information and technology for the world's 6,900 languages (Ruder et al., 2019). For the purpose of this paper, we define "multilingual learning" as learning a common model for two or more languages from raw text, without any downstream task labels. Common use cases include translation as well as pretraining multilingual representations. We will use the term interchangeably with "cross-lingual learning".
|
| 20 |
+
|
| 21 |
+
Recent work in this direction has increasingly focused on purely unsupervised cross-lingual learning (UCL)—i.e., cross-lingual learning without any parallel signal across the languages. We provide an overview in §2. Such work has been motivated by the apparent dearth of parallel data for most of the world's languages. In particular, previous work has noted that "data encoding cross-lingual equivalence is often expensive to obtain" (Zhang et al., 2017a) whereas "monolingual data is much easier to find" (Lample et al., 2018a). Overall, it has been argued that unsupervised cross-lingual learning "opens up opportunities for the processing of extremely low-resource languages and domains that lack parallel data completely" (Zhang et al., 2017a).
|
| 22 |
+
|
| 23 |
+
We challenge this narrative and argue that the scenario of no parallel data and sufficient monolingual data is unrealistic and not reflected in the real world (§3.1). Nevertheless, UCL is an important research direction and we advocate for its study based on an inherent scientific interest (to better understand and make progress on general language understanding), usefulness as a lab setting, and simplicity (§3.2).
|
| 24 |
+
|
| 25 |
+
Unsupervised cross-lingual learning permits no supervisory signal by definition. However, previous work implicitly includes monolingual and cross-lingual signals that constitute a departure from the pure setting. We review existing training signals as well as other signals that may be of interest for future study (§4). We then discuss methodological issues in UCL (e.g., validation, hyperparameter tuning) and propose best evaluation practices (§5). Finally, we provide a unified outlook of established research areas (cross-lingual word embeddings, deep multilingual models and unsupervised machine translation) in UCL (§6), and conclude with a summary of our recommendations (§7).
|
| 26 |
+
|
| 27 |
+
# 2 Background
|
| 28 |
+
|
| 29 |
+
In this section, we briefly review existing work on UCL, covering cross-lingual word embeddings (§2.1), deep multilingual pre-training (§2.2), and unsupervised machine translation (§2.3).
|
| 30 |
+
|
| 31 |
+
# 2.1 Cross-lingual word embeddings
|
| 32 |
+
|
| 33 |
+
Cross-lingual word embedding methods traditionally relied on parallel corpora (Gouws et al., 2015; Luong et al., 2015). Nonetheless, the amount of supervision required was greatly reduced via cross-lingual word embedding mappings, which work by separately learning monolingual word embeddings in each language and mapping them into a shared space through a linear transformation. Early work required a bilingual dictionary to learn such a transformation (Mikolov et al., 2013a; Faruqui and Dyer, 2014). This requirement was later reduced with self-learning (Artetxe et al., 2017), and ultimately removed via unsupervised initialization heuristics (Artetxe et al., 2018a; Hoshen and Wolf, 2018) and adversarial learning (Zhang et al., 2017a; Conneau et al., 2018a). Finally, several recent methods have formulated cross-lingual embedding alignment as an optimal transport problem (Zhang et al., 2017b; Grave et al., 2019; Alvarez-Melis and Jaakkola, 2018).
|
| 34 |
+
|
| 35 |
+
# 2.2 Deep multilingual pretraining
|
| 36 |
+
|
| 37 |
+
Following the success in learning shallow word embeddings (Mikolov et al., 2013b; Pennington et al., 2014), there has been an increasing interest in learning contextual word representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018). Recent research has been dominated by BERT (Devlin et al., 2019), which uses a bidirectional transformer encoder trained on masked language modeling and next sentence prediction, which led to impressive gains on various downstream tasks.
|
| 38 |
+
|
| 39 |
+
While the above approaches are limited to a single language, a multilingual extension of BERT (mBERT) has been shown to also be effective at learning cross-lingual representations in an unsupervised way. The main idea is to combine monolingual corpora in different languages, upsampling those with less data, and training a regular BERT model on the combined data. Conneau and Lample (2019) follow a similar approach but perform a more thorough evaluation and report substantially
|
| 40 |
+
|
| 41 |
+
stronger results, $^{2}$ which was further scaled up by Conneau et al. (2019). Several recent studies (Wu and Dredze, 2019; Pires et al., 2019; Artetxe et al., 2020b; Wu et al., 2019) analyze mBERT to get a better understanding of its capabilities.
|
| 42 |
+
|
| 43 |
+
# 2.3 Unsupervised machine translation
|
| 44 |
+
|
| 45 |
+
Early attempts to build machine translation systems using monolingual data alone go back to statistical decipherment (Ravi and Knight, 2011; Dou and Knight, 2012, 2013). However, this approach was only shown to work in limited settings, and the first convincing results on standard benchmarks were achieved by Artetxe et al. (2018c) and Lample et al. (2018a) on unsupervised Neural Machine Translation (NMT). Both approaches rely on cross-lingual word embeddings to initialize a shared encoder, and train it in conjunction with the decoder using a combination of denoising autoencoding, backtranslation, and optionally adversarial learning.
|
| 46 |
+
|
| 47 |
+
Subsequent work adapted these principles to unsupervised phrase-based Statistical Machine Translation (SMT), obtaining large improvements over the original NMT-based systems (Lample et al., 2018b; Artetxe et al., 2018b). This alternative approach uses cross-lingual $n$ -gram embeddings to build an initial phrase table, which is combined with an $n$ -gram language model and a distortion model, and further refined through iterative backtranslation. There have been several follow-up attempts to combine NMT and SMT based approaches (Marie and Fujita, 2018; Ren et al., 2019; Artetxe et al., 2019b). More recently, Conneau and Lample (2019), Song et al. (2019) and Liu et al. (2020) obtain strong results using deep multilingual pretraining rather than cross-lingual word embeddings to initialize unsupervised NMT systems.
|
| 48 |
+
|
| 49 |
+
# 3 Motivating fully unsupervised learning
|
| 50 |
+
|
| 51 |
+
In this section, we challenge the narrative of motivating UCL based on a lack of parallel resources. We argue that the strict unsupervised scenario cannot be motivated from an immediate practical perspective, and elucidate what we believe should be the true goals of this research direction.
|
| 52 |
+
|
| 53 |
+
# 3.1 How practical is the strict unsupervised scenario?
|
| 54 |
+
|
| 55 |
+
Monolingual resources subsume parallel resources. For instance, each side of a parallel corpus effectively serves as a monolingual corpus. From this argument, it follows that monolingual data is cheaper to obtain than parallel data, so unsupervised crosslingual learning should in principle be more generally applicable than supervised learning.
|
| 56 |
+
|
| 57 |
+
However, we argue that the common claim that the requirement for parallel data "may not be met for many language pairs in the real world" (Xu et al., 2018) is largely inaccurate. For instance, the JW300 parallel corpus covers 343 languages with around 100,000 parallel sentences per language pair on average (Agić and Vulić, 2019), and the multilingual Bible corpus collected by Mayer and Cysouw (2014) covers 837 language varieties (each with a unique ISO 639-3 code). Moreover, the PanLex project aims to collect multilingual lexica for all human languages in the world, and already covers 6,854 language varieties with at least 20 lexemes, 2,364 with at least 200 lexemes, and 369 with at least 2,000 lexemes (Kamholz et al., 2014). While 20 or 200 lexemes might seem insufficient, weakly supervised cross-lingual word embedding methods already proved effective with as little as 25 word pairs (Artetxe et al., 2017). More recent methods have focused on completely removing this weak supervision (Conneau et al., 2018a; Artetxe et al., 2018a), which can hardly be justified from a practical perspective given the existence of such resources and additional training signals stemming from a (partially) shared script (\$4.2). Finally, given the availability of sufficient monolingual data, noisy parallel data can often be obtained by mining bitext (Schwenk et al., 2019a,b).
|
| 58 |
+
|
| 59 |
+
In addition, large monolingual data is difficult to obtain for low-resource languages. For instance, recent work on cross-lingual word embeddings has mostly used Wikipedia as its source for monolingual corpora (Gouws et al., 2015; Vulić and Korhonen, 2016; Conneau et al., 2018a). However, as of November 2019, Wikipedia exists in only 307 languages<sup>3</sup> of which nearly half have less than 10,000 articles. While one could hope to overcome this by taking the entire web as a corpus, as facilitated by Common Crawl<sup>4</sup> and similar initiatives, this is not
|
| 60 |
+
|
| 61 |
+
always feasible for low-resource languages. First, the presence of less resourced languages on the web is very limited, with only a few hundred languages recognized as being used in websites.<sup>5</sup> This situation is further complicated by the limited coverage of existing tools such as language detectors (Buck et al., 2014; Grave et al., 2018), which only cover a few hundred languages. Alternatively, speech could also serve as a source of monolingual data (e.g., by recording public radio stations). However, this is an unexplored direction within UCL, and collecting, processing and effectively capitalizing on speech data is far from trivial, particularly for low-resource languages.
|
| 62 |
+
|
| 63 |
+
All in all, we conclude that the alleged scenario involving no parallel data and sufficient monolingual data is not met in the real world in the terms explored by recent UCL research. Needless to say, effectively exploiting unlabeled data is important in any low-resource setting. However, refusing to use an informative training signal—which parallel data is—when it does indeed exist, cannot be justified from a practical perspective if one's goal is to build the strongest possible model. For this reason, we believe that semi-supervised learning is a more suitable paradigm for truly low-resource languages, and UCL should not be motivated from an immediate practical perspective.
|
| 64 |
+
|
| 65 |
+
# 3.2 A scientific motivation
|
| 66 |
+
|
| 67 |
+
Despite not being an entirely realistic setup, we believe that UCL is an important research direction for the reasons we discuss below.
|
| 68 |
+
|
| 69 |
+
Inherent scientific interest. The extent to which two languages can be aligned based on independent samples—without any cross-lingual signal—is an open and scientifically relevant problem per se. In fact, it is not entirely obvious that UCL should be possible at all, as humans would certainly struggle to align two unknown languages without any grounding. Exploring the limits of UCL could help to understand the limits of the principles that the corresponding methods are based on, such as the distributional hypothesis. Moreover, this research line could bring new insights into the properties and inner workings of both language acquisition and the underlying computational models that ultimately make UCL possible. Finally, such methods may be useful in areas where supervision is impos
|
| 70 |
+
|
| 71 |
+
sible to obtain, such as when dealing with unknown or even non-human languages.
|
| 72 |
+
|
| 73 |
+
Useful as a lab setting. The strict unsupervised scenario, although not practical, allows us to isolate and better study the use of monolingual corpora for cross-lingual learning. We believe lessons learned in this setting can be useful in the more practical semi-supervised scenario. In a similar vein, monolingual language models, although hardly useful on their own, have contributed to large improvements in other tasks. From a research methodology perspective, unsupervised systems also set a competitive baseline, which any semi-supervised method should improve upon.
|
| 74 |
+
|
| 75 |
+
Simplicity as a value. As we discussed previously, refusing to use an informative training signal when it does exist can hardly be beneficial, so we should not expect UCL to perform better than semi-supervised learning. However, simplicity is a value in its own right. Unsupervised approaches could be preferable to their semi-supervised counterparts if the performance gap between them is small enough. For instance, unsupervised cross-lingual embedding methods have been reported to be competitive with their semi-supervised counterparts in certain settings (Glavaš et al., 2019), while being easier to use in the sense that they do not require a bilingual dictionary.
|
| 76 |
+
|
| 77 |
+
# 4 What does unsupervised mean?
|
| 78 |
+
|
| 79 |
+
In its most general sense, unsupervised cross-lingual learning can be seen as referring to any method relying exclusively on monolingual text data in two or more languages. However, there are different training signals—stemming from common assumptions and varying amounts of linguistic knowledge—that one can potentially exploit under such a regime. This has led to an inconsistent use of this term in the literature. In this section, we categorize different training signals available both from a monolingual and a cross-lingual perspective and discuss additional scenarios enabled by multiple languages.
|
| 80 |
+
|
| 81 |
+
# 4.1 Monolingual training signals
|
| 82 |
+
|
| 83 |
+
From a computational perspective, text is modeled as a sequence of discrete symbols. In UCL, the training data consists of a set of such sequences in each of the languages. In principle, without any knowledge about the languages, one would have no
|
| 84 |
+
|
| 85 |
+
prior information of the nature of such sequences or the possible relations between them. In practice, however, sets of sequences are assumed to be independent, and existing work differs whether they assume document-level sequences (Conneau and Lample, 2019) or sentence-level sequences (Artetxe et al., 2018c; Lample et al., 2018a).
|
| 86 |
+
|
| 87 |
+
Nature of atomic symbols. A more important consideration is the nature of the atomic symbols in such sequences. To the best of our knowledge, previous work assumes some form of word segmentation or tokenization (e.g., splitting by whitespaces or punctuation marks). Early work on cross-lingual word embeddings considered such tokens as atomic units. However, more recent work (Hoshen and Wolf, 2018; Glavaš et al., 2019) has primarily used fastText embeddings (Bojanowski et al., 2017) which incorporate subword information into the embedding learning, although the vocabulary is still defined at the token level. In addition, there have also been approaches that incorporate character-level information into the alignment learning itself (Heyman et al., 2017; Riley and Gildea, 2018). In contrast, most work on contextual word embeddings and unsupervised machine translation operates with a subword vocabulary (Devlin et al., 2019; Conneau and Lample, 2019).
|
| 88 |
+
|
| 89 |
+
While the above distinction might seem irrelevant from a practical perspective, we think that it is important from a more fundamental point of view (e.g. in relation to the distributional hypothesis as discussed in §3.2). Moreover, some of the underlying assumptions might not generalize to different writing systems (e.g. logographic instead of alphabetic). For instance, subword tokenization has been shown to perform poorly on reduplicated words (Vania and Lopez, 2017). In relation to that, one could also consider the text in each language as a stream of discrete character-like symbols without any notion of tokenization. Such a tabula rasa approach is potentially applicable to any arbitrary language, even when its writing system is not known, but has so far only been explored for a limited number of languages in a monolingual setting (Hahn and Baroni, 2019).
|
| 90 |
+
|
| 91 |
+
Linguistic information. Finally, one can exploit additional linguistic knowledge through linguistic analysis such as lemmatization, part-of-speech tagging, or syntactic parsing. For instance, before the advent of unsupervised NMT, statistical deci
|
| 92 |
+
|
| 93 |
+
pherment was already shown to benefit from incorporating syntactic dependency relations (Dou and Knight, 2013). For other tasks such as unsupervised POS tagging (Snyder et al., 2008), monolingual tag dictionaries have been used. While such approaches could still be considered unsupervised from a cross-lingual perspective, we argue that the interest of this research direction is greatly limited by two factors: (i) from a theoretical perspective, it assumes some fundamental knowledge that is not directly inferred from the raw monolingual corpora; and (ii) from a more practical perspective, it is not reasonable to assume that such resources are available in the less resourced settings where this research direction has more potential for impact.
|
| 94 |
+
|
| 95 |
+
# 4.2 Cross-lingual training signals
|
| 96 |
+
|
| 97 |
+
Pure UCL should not use any cross-lingual signal by definition. When we view text as a sequence of discrete atomic symbols (either characters or tokens), a strict interpretation of this principle would consider the set of atomic symbols in different languages to be disjoint, without prior knowledge of the relationship between them.
|
| 98 |
+
|
| 99 |
+
Needless to say, any form of learning requires making assumptions, as one needs some criterion to prefer one mapping over another. In the case of UCL, such assumptions stem from the structural similarity across languages (e.g. semantically equivalent words in different languages are assumed to occur in similar contexts). In practice, these assumptions weaken as the distribution of the datasets diverges, and some UCL models have been reported to break under a domain shift (Søgaard et al., 2018; Guzmán et al., 2019; Marchisio et al., 2020). Similarly, approaches that leverage linguistic features such as syntactic dependencies may assume that these are similar across languages.
|
| 100 |
+
|
| 101 |
+
In addition, one can also assume that the sets of symbols that are used to represent different languages have some commonalities. This departs from the strict definition of UCL above, establishing some prior connections between the sets of symbols in different languages. Such an assumption is reasonable from a practical perspective, as there are a few scripts (e.g. Latin, Arabic or Cyrillic) that cover a large fraction of languages. Moreover, even when two languages use different writing systems or scripts, there are often certain elements that are still shared (e.g. Arabic numerals, named entities written in a foreign script, URLs, certain punctuation
|
| 102 |
+
|
| 103 |
+
tion marks, etc.). In relation to that, several models have relied on identically spelled words (Artetxe et al., 2017; Smith et al., 2017; Søgaard et al., 2018) or string-level similarity across languages (Riley and Gildea, 2018; Artetxe et al., 2019b) as training signals. Other methods use a joint subword vocabulary for all languages, indirectly exploiting the commonalities in their writing system (Lample et al., 2018b; Conneau and Lample, 2019).
|
| 104 |
+
|
| 105 |
+
However, past work greatly differs on the nature and relevance that is attributed to such a training signal. The reliance on identically spelled words has been considered as a weak form of supervision in the cross-lingual word embedding literature (Søgaard et al., 2018; Ruder et al., 2018), and significant effort has been put into developing strictly unsupervised methods that do not rely on such signal (Conneau et al., 2018a). In contrast, the unsupervised machine translation literature has not payed much attention to this factor, and has often relied on identical words (Artetxe et al., 2018c), string-level similarity (Artetxe et al., 2019b), or a joint subword vocabulary (Lample et al., 2018b; Conneau and Lample, 2019) under the unsupervised umbrella. The same is true for unsupervised deep multilingual pretraining, where a shared subword vocabulary has been a common component (Pires et al., 2019; Conneau and Lample, 2019), although recent work shows that it is not important to share vocabulary across languages (Artetxe et al., 2020b; Wu et al., 2019).
|
| 106 |
+
|
| 107 |
+
Our position is that making assumptions on linguistics universals is acceptable and ultimately necessary for UCL. However, we believe that any connection stemming from a (partly) shared writing system belongs to a different category, and should be considered a separate cross-lingual signal. Our rationale is that a given writing system pertains to a specific form to encode a language, but cannot be considered to be part of the language itself.[6]
|
| 108 |
+
|
| 109 |
+
# 4.3 Multilinguality
|
| 110 |
+
|
| 111 |
+
While most work in unsupervised cross-lingual learning considers two languages at a time, there have recently been some attempts to extend these methods to multiple languages (Duong et al., 2017; Chen and Cardie, 2018; Heyman et al., 2019), and most work on unsupervised cross-lingual pretraining is multilingual (Pires et al., 2019; Conneau
|
| 112 |
+
|
| 113 |
+
<table><tr><td>Monolingual signal</td><td>Cross-lingual signal</td></tr><tr><td>Sequence of symbols</td><td>Shared writing system</td></tr><tr><td>Sets of sentences/documents</td><td>Identical words</td></tr><tr><td>Tokens/subwords</td><td>String similarity</td></tr><tr><td>Linguistic analysis</td><td></td></tr></table>
|
| 114 |
+
|
| 115 |
+
Table 1: Different types of monolingual and cross-lingual signals that have been used for unsupervised cross-lingual learning, ordered roughly from least to most linguistic knowledge (top to bottom).
|
| 116 |
+
|
| 117 |
+
and Lample, 2019). When considering parallel data across a subset of the language pairs, multilinguality gives rise to additional scenarios. For instance, the scenario where two languages have no parallel data between each other but are well connected through a third (pivot) language has been explored by several authors in the context of machine translation (Cheng et al., 2016; Chen et al., 2017). However, given that the languages in question are still indirectly connected through parallel data, this scenario does not fall within the unsupervised category, and is instead commonly known as zero-resource machine translation.
|
| 118 |
+
|
| 119 |
+
An alternative scenario explored in the contemporaneous work of Liu et al. (2020) is where a set of languages are connected through parallel data, and there is a separate language with monolingual data only. We argue that, when it comes to the isolated language, such a scenario should still be considered as UCL, as it does not rely on any parallel data for that particular language nor does it assume any previous knowledge of it. This scenario is easy to justify from a practical perspective given the abundance of parallel data for high-resource languages, and can also be interesting from a more theoretical point of view. This way, rather than considering two unknown languages, this alternative scenario would assume some knowledge of how one particular language is connected to other languages, and attempt to align it to a separate unknown language.
|
| 120 |
+
|
| 121 |
+
# 4.4 Discussion
|
| 122 |
+
|
| 123 |
+
As discussed throughout the section, there are different training signals that we can exploit depending on the available resources of the languages involved and the assumptions made regarding their writing system, which are summarized in Table 1. Many of these signals are not specific to work on UCL but have been observed in the past in allegedly language-independent NLP approaches, as discussed by Bender (2011). Others, such as a re
|
| 124 |
+
|
| 125 |
+
liaence on subwords or shared symbols are more recent phenomena.
|
| 126 |
+
|
| 127 |
+
While we do not aim to open a terminological debate on what UCL encompasses, we advocate for future work being more aware and explicit about the monolingual and cross-lingual signals they employ, what assumptions they make (e.g. regarding the writing system), and the extent to which these generalize to other languages.
|
| 128 |
+
|
| 129 |
+
In particular, we argue that it is critical to consider the assumptions made by different methods when comparing their results. Otherwise the blind chase for state-of-the-art performance may benefit models making stronger assumptions and exploiting all available training signals, which could ultimately conflict with the eminently scientific motivation of this research area (see §3.2).
|
| 130 |
+
|
| 131 |
+
# 5 Methodological issues
|
| 132 |
+
|
| 133 |
+
In this section, we describe methodological issues that are commonly encountered when training and evaluating unsupervised cross-lingual models and propose measures to ameliorate them.
|
| 134 |
+
|
| 135 |
+
# 5.1 Validation and hyperparameter tuning
|
| 136 |
+
|
| 137 |
+
In conventional supervised or semi-supervised settings, we use a separate validation set for development and hyperparameter tuning. However, this becomes tricky in unsupervised cross-lingual learning, where we ideally should not use any parallel data other than for testing purposes.
|
| 138 |
+
|
| 139 |
+
Previous work has not paid much attention to this aspect, and different methods are evaluated with different validation schemes. For instance, Artetxe et al. (2018b,c) use a separate language pair with a parallel validation set to make all development and hyperparameter decisions. They test their final system on other language pairs without any parallel data. This approach has the advantage of being strictly unsupervised with respect to the test language pairs, but the optimal hyperparameter choice might not necessarily transfer well across languages. In contrast, Conneau et al. (2018a) and Lample et al. (2018a) propose an unsupervised validation criterion that is defined over monolingual data and shown to correlate well with test performance. This enables systematic tuning on the language pair of interest, but still requires parallel data to guide the development of the unsupervised validation criterion itself. A parallel validation set has also been used for systematic tuning in
|
| 140 |
+
|
| 141 |
+
the context of unsupervised machine translation (Marie and Fujita, 2018; Marie et al., 2019; Stojanovski et al., 2019). While this is motivated as a way to abstract away the issue of unsupervised tuning—which the authors consider to be an open problem—we argue that any systematic use of parallel data should not be considered UCL. Finally, previous work often does not report the validation scheme used. In particular, unsupervised crosslingual word embedding methods have almost exclusively been evaluated on bilingual lexicons that do not have a validation set, and presumably use the test set to guide development to some extent.
|
| 142 |
+
|
| 143 |
+
Our position is that a completely blind development model without any parallel data is unrealistic. Some cross-lingual signals to guide development are always needed. However, this factor should be carefully controlled and reported with the necessary rigor as a part of the experimental design. We advocate for using one language pair for development and evaluating on others when possible. If parallel data in the target language pair is used, the test set should be kept blind to avoid overfitting, and a separate validation should be used. In any case, we argue that the use of parallel data in the target language pair should be minimized if not completely avoided, and it should under no circumstances be used for extensive tuning. Instead, we recommend to use unsupervised validation criteria for systematic tuning in the target language.
|
| 144 |
+
|
| 145 |
+
# 5.2 Evaluation practices
|
| 146 |
+
|
| 147 |
+
We argue that there are also several issues with common evaluation practices in UCL.
|
| 148 |
+
|
| 149 |
+
Evaluation on favorable conditions. Most work on UCL has focused on relatively close languages with large amounts of high-quality parallel corpora from similar domains. Only recently have approaches considered more diverse languages as well as language pairs that do not involve English (Glavaš et al., 2019; Vulić et al., 2019), and some existing methods have been shown to completely break in less favorable conditions (Guzmán et al., 2019; Marchisio et al., 2020). In addition, most approaches have focused on learning from similar domains, often involving Wikipedia and news corpora, which are unlikely to be available for low-resource languages. We believe that future work should pay more attention to the effect of the typology and linguistic distance of the languages involved, as well as the size, noise and domain
|
| 150 |
+
|
| 151 |
+
similarity of the training data used.
|
| 152 |
+
|
| 153 |
+
Over-reliance on translation tasks. Most work on UCL focuses on translation tasks, either at the word level (where the problem is known as bilingual lexicon induction) or at the sentence level (where the problem is known as unsupervised machine translation). While translation can be seen as the ultimate application of cross-lingual learning and has a strong practical interest on its own, it only evaluates a particular facet of a model's cross-lingual generalization ability. In relation to that, Glavaš et al. (2019) showed that bilingual lexicon induction performance does not always correlate well with downstream tasks. In particular, they observe that some mapping methods that are specifically designed for bilingual lexicon induction perform poorly on other tasks, showing the risk of relying excessively on translation benchmarks for evaluating cross-lingual models.
|
| 154 |
+
|
| 155 |
+
Moreover, existing translation benchmarks have been shown to have several issues on their own. In particular, bilingual lexicon induction datasets have been reported to misrepresent morphological variations, overly focus on named entities and frequent words, and have pervasive gaps in the gold-standard targets (Czarnowska et al., 2019; Kementchedjhieva et al., 2019). More generally, most of these datasets are limited to relatively close languages and comparable corpora.
|
| 156 |
+
|
| 157 |
+
Lack of an established cross-lingual benchmark. At the same time, there is no de facto standard benchmark to evaluate cross-lingual models beyond translation. Existing approaches have been evaluated in a wide variety of tasks including dependency parsing (Schuster et al., 2019), named entity recognition (Rahimi et al., 2019), sentiment analysis (Barnes et al., 2018), natural language inference (Conneau et al., 2018b), and document classification (Schwenk and Li, 2018). XNLI (Conneau et al., 2018b) and MLDoc (Schwenk and Li, 2018) are common choices, but they have their own problems: MultiNLI, the dataset from which XNLI was derived, has been shown to contain superficial cues that can be exploited (Gururangan et al., 2018), while MLDoc can be solved by keyword matching (Artetxe et al., 2020b). There are non-English counterparts for more challenging tasks such as question answering (Cui et al., 2019; Hsu et al., 2019), but these only exist for a handful of languages. More recent datasets such as XQuAD
|
| 158 |
+
|
| 159 |
+
<table><tr><td>Methodological issues</td><td>Examples</td></tr><tr><td>Validation and hyperparameter tuning</td><td>Systematic tuning with parallel data or on test data</td></tr><tr><td>Evaluation on favorable conditions</td><td>Typologically similar languages; always including English; training on the same domain</td></tr><tr><td>Over-reliance on translation tasks</td><td>Overfitting to bilingual lexicon induction; known issues with existing datasets</td></tr><tr><td>Lack of an established benchmark</td><td>Evaluation on many different tasks; problems with common tasks (MLDoc and XNLI)</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Table 2: Methodological issues pertaining to validation and hyperparameter tuning and evaluation practices in current work on unsupervised cross-lingual learning.
|
| 162 |
+
|
| 163 |
+
(Artetxe et al., 2020b), MLQA (Lewis et al., 2019) and TyDi QA (Clark et al., 2020) cover a wider set of languages, but a comprehensive benchmark that evaluates multilingual representations on a diverse set of tasks—in the style of GLUE (Wang et al., 2018)—and languages has been missing until very recently. The contemporaneous XTREME (Hu et al., 2020) and XGLUE (Liang et al., 2020) benchmarks try to close this gap, but they are still restricted to languages where existing labelled data is available. Finally, an additional issue is that a large part of these benchmarks were created through translation, which was recently shown to introduce artifacts (Artetxe et al., 2020a).
|
| 164 |
+
|
| 165 |
+
We present a summary of the methodological issues discussed in Table 2.
|
| 166 |
+
|
| 167 |
+
# 6 Bridging the gap between unsupervised cross-lingual learning flavors
|
| 168 |
+
|
| 169 |
+
The three categories of UCL ( $\S 2$ ) have so far been treated as separate research topics by the community. In particular, cross-lingual word embeddings have a long history (Ruder et al., 2019), while deep multilingual pretraining has emerged as a separate line of research with its own best practices and evaluation standards. At the same time, unsupervised machine translation has been considered a separate problem in its own right, where cross-lingual word embeddings and deep multilingual pretraining have just served as initialization techniques.
|
| 170 |
+
|
| 171 |
+
While each of these families have their own defining features, we believe that they share a strong connection that should be considered from a more holistic perspective. In particular, both cross-lingual word embeddings and deep mul
|
| 172 |
+
|
| 173 |
+
tilingual pretraining share the goal of learning (sub)word representations, and essentially differ on whether such representations are static or context-dependent. Similarly, in addition to being a downstream application of the former, unsupervised machine translation can also be useful to develop other multilingual applications or learn better crosslingual representations. This has previously been shown for supervised machine translation (McCann et al., 2017; Siddhant et al., 2019) and recently for bilingual lexicon induction (Artetxe et al., 2019a). In light of these connections, we call for a more holistic view of UCL, both from an experimental and theoretical perspective.
|
| 174 |
+
|
| 175 |
+
Evaluation. Most work on cross-lingual word embeddings focuses on bilingual lexicon induction. In contrast, deep multilingual pretraining has not been tested on this task, and is instead typically evaluated on zero-shot cross-lingual transfer. We think it is important to evaluate both families—cross-lingual word embeddings and deep multilingual representations—in the same conditions to better understand their strengths and weaknesses. In that regard, Artetxe et al. (2020b) recently showed that deep pretrained models are much stronger in some downstream tasks, while cross-lingual word embeddings are more efficient and sufficient for simpler tasks. However, this could partly be attributed to a particular integration strategy, and we advocate for using a common evaluation framework in future work to allow a direct comparison between the different families.
|
| 176 |
+
|
| 177 |
+
Theory. From a more theoretical perspective, it is still not well understood in what ways crosslingual word embeddings and deep multilingual pretraining differ. While one could expect the latter to be learning higher-level multilingual abstractions, recent work suggests that deep multilingual models might mostly be learning a lexical-level alignment (Artetxe et al., 2020b). For that reason, we believe that further research is needed to understand the relation between both families of models.
|
| 178 |
+
|
| 179 |
+
# 7 Recommendations
|
| 180 |
+
|
| 181 |
+
To summarize, we make the following practical recommendations for future cross-lingual research:
|
| 182 |
+
|
| 183 |
+
- Be rigorous when motivating UCL. Do not present it as a practical scenario unless supported by a real use case.
|
| 184 |
+
|
| 185 |
+
- Be explicit about the monolingual and crosslingual signals used by your approach and the assumptions it makes, and take them into considerations when comparing different models.
|
| 186 |
+
- Report the validation scheme used. Minimize the use of parallel data by preferring an unsupervised validation criterion and/or using only one language for development. Always keep the test set blind.
|
| 187 |
+
- Pay attention to the conditions in which you evaluate your model. Consider the impact of typology, linguistic distance, and the domain similarity, size and noise of the training data. Be aware of known issues with common benchmarks, and favor evaluation on a diverse set of tasks.
|
| 188 |
+
- Keep a holistic view of UCL, including crosslingual word embeddings, deep multilingual pretraining and unsupervised machine translation. To the extent possible, favor a common evaluation framework for these different families.
|
| 189 |
+
|
| 190 |
+
# 8 Conclusions
|
| 191 |
+
|
| 192 |
+
In this position paper, we review the status quo of unsupervised cross-lingual learning—a relatively recent field. UCL is typically motivated by the lack of cross-lingual signal for many of the world's languages, but available resources indicate that a scenario with no parallel data and sufficient monolingual data is not realistic. Instead, we advocate for the importance of UCL for scientific reasons.
|
| 193 |
+
|
| 194 |
+
We also discuss different monolingual and cross-lingual training signals that have been used in the past, and advocate for carefully reporting them to enable a meaningful comparison across different approaches. In addition, we describe methodological issues related to the unsupervised setting and propose measures to ameliorate them. Finally, we discuss connections between cross-lingual word embeddings, deep multilingual pre-training, and unsupervised machine translation, calling for an evaluation on an equal footing.
|
| 195 |
+
|
| 196 |
+
We hope that this position paper will serve to strengthen research in UCL, providing a more rigorous look at the motivation, definition, and methodology. In light of the unprecedented growth of our field in recent times, we believe that it is essential to establish a rigorous foundation connecting past and present research, and an evaluation protocol that
|
| 197 |
+
|
| 198 |
+
carefully controls for the use of parallel data and assesses models in diverse, challenging settings.
|
| 199 |
+
|
| 200 |
+
# Acknowledgments
|
| 201 |
+
|
| 202 |
+
This research was partially funded by a Facebook Fellowship, the Basque Government excellence research group (IT1343-19), the Spanish MINECO (UnsupMT TIN2017-91692-EXP MCIU/AEI/FEDER, UE) and Project BigKnowledge (Ayudas Fundación BBVA a equipos de Investigación@científica 2018).
|
| 203 |
+
|
| 204 |
+
# References
|
| 205 |
+
|
| 206 |
+
Zeljko Agić and Ivan Vulić. 2019. JW300: A wide-coverage parallel corpus for low-resource languages. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3204–3210, Florence, Italy. Association for Computational Linguistics.
|
| 207 |
+
David Alvarez-Melis and Tommi Jaakkola. 2018. Gromov-wasserstein alignment of word embedding spaces. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1881-1890, Brussels, Belgium. Association for Computational Linguistics.
|
| 208 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2017. Learning bilingual word embeddings with (almost) no bilingual data. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 451-462, Vancouver, Canada. Association for Computational Linguistics.
|
| 209 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018a. A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 789-798, Melbourne, Australia. Association for Computational Linguistics.
|
| 210 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018b. Unsupervised statistical machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3632-3642, Brussels, Belgium. Association for Computational Linguistics.
|
| 211 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2019a. Bilingual lexicon induction through unsupervised machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5002-5007, Florence, Italy. Association for Computational Linguistics.
|
| 212 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2019b. An effective approach to unsupervised machine translation. In Proceedings of the 57th Annual
|
| 213 |
+
|
| 214 |
+
Meeting of the Association for Computational Linguistics, pages 194-203, Florence, Italy. Association for Computational Linguistics.
|
| 215 |
+
Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2020a. Translation artifacts in cross-lingual transfer learning. arXiv preprint arXiv:2004.04721.
|
| 216 |
+
Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018c. Unsupervised neural machine translation. In Proceedings of the 6th International Conference on Learning Representations (ICLR 2018).
|
| 217 |
+
Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020b. On the Cross-lingual Transferability of Monolingual Representations. In Proceedings of ACL 2020.
|
| 218 |
+
Jeremy Barnes, Roman Klinger, and Sabine Schulte im Walde. 2018. Bilingual sentiment embeddings: Joint projection of sentiment across languages. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2483-2493, Melbourne, Australia. Association for Computational Linguistics.
|
| 219 |
+
Emily M. Bender. 2011. On Achieving and Evaluating Language-Independence in NLP. Linguistic Issues in Language Technology, 6(3):1-26.
|
| 220 |
+
Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Association for Computational Linguistics, 5:135-146.
|
| 221 |
+
Christian Buck, Kenneth Heafield, and Bas van Ooyen. 2014. N-gram counts and language models from the common crawl. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 3579-3584, Reykjavik, Iceland. European Language Resources Association (ELRA).
|
| 222 |
+
Xilun Chen and Claire Cardie. 2018. Unsupervised multilingual word embeddings. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 261-270, Brussels, Belgium. Association for Computational Linguistics.
|
| 223 |
+
Yun Chen, Yang Liu, Yong Cheng, and Victor O.K. Li. 2017. A teacher-student framework for zero-resource neural machine translation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1925-1935, Vancouver, Canada. Association for Computational Linguistics.
|
| 224 |
+
Yong Cheng, Yang Liu, Qian Yang, Maosong Sun, and Wei Xu. 2016. Neural machine translation with pivot languages. arXiv preprint arXiv:1611.04928.
|
| 225 |
+
Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. 2020. Tydi qa: A benchmark
|
| 226 |
+
|
| 227 |
+
for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics.
|
| 228 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishray Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettle-moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.
|
| 229 |
+
Alexis Conneau and Guillaume Lample. 2019. Crosslingual language model pretraining. In Advances in Neural Information Processing Systems 32, pages 7057-7067.
|
| 230 |
+
Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Hervé Jégou. 2018a. Word translation without parallel data. In Proceedings of the 6th International Conference on Learning Representations (ICLR 2018).
|
| 231 |
+
Alexis Conneau, Rudy Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. 2018b. XNLI: Evaluating cross-lingual sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2475-2485, Brussels, Belgium. Association for Computational Linguistics.
|
| 232 |
+
Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Shijin Wang, and Guoping Hu. 2019. Cross-lingual machine reading comprehension. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1586-1595, Hong Kong, China. Association for Computational Linguistics.
|
| 233 |
+
Paula Czarnowska, Sebastian Ruder, Edouard Grave, Ryan Cotterell, and Ann Copestake. 2019. Don't forget the long tail! A comprehensive analysis of morphological generalization in bilingual lexicon induction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 973-982, Hong Kong, China. Association for Computational Linguistics.
|
| 234 |
+
Andrew M. Dai and Quoc V. Le. 2015. Semi-supervised sequence learning. In Advances in Neural Information Processing Systems 28, pages 3079-3087.
|
| 235 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 236 |
+
|
| 237 |
+
Qing Dou and Kevin Knight. 2012. Large scale decipherment for out-of-domain machine translation. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 266-275, Jeju Island, Korea. Association for Computational Linguistics.
|
| 238 |
+
Qing Dou and Kevin Knight. 2013. Dependency-based decipherment for resource-limited machine translation. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1668-1676, Seattle, Washington, USA. Association for Computational Linguistics.
|
| 239 |
+
Long Duong, Hiroshi Kanayama, Tengfei Ma, Steven Bird, and Trevor Cohn. 2017. Multilingual training of crosslingual word embeddings. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 894-904, Valencia, Spain. Association for Computational Linguistics.
|
| 240 |
+
Umberto Eco and James Fentress. 1995. The search for the perfect language. Blackwell Oxford.
|
| 241 |
+
Manaal Faruqui and Chris Dyer. 2014. Improving vector space word representations using multilingual correlation. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics, pages 462-471, Gothenburg, Sweden. Association for Computational Linguistics.
|
| 242 |
+
Goran Glavaš, Robert Litschko, Sebastian Ruder, and Ivan Vulić. 2019. How to (properly) evaluate cross-lingual word embeddings: On strong baselines, comparative analyses, and some misconceptions. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 710–721, Florence, Italy. Association for Computational Linguistics.
|
| 243 |
+
Stephan Gouws, Yoshua Bengio, and Greg Corrado. 2015. *BilBOWA: Fast bilingual distributed representations without word alignments*. In *Proceedings of the 32nd International Conference on Machine Learning*, volume 37 of *Proceedings of Machine Learning Research*, pages 748-756, Lille, France. PMLR.
|
| 244 |
+
Edouard Grave, Piotr Bojanowski, Prakhar Gupta, Armand Joulin, and Tomas Mikolov. 2018. Learning word vectors for 157 languages. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 245 |
+
Edouard Grave, Armand Joulin, and Quentin Berthet. 2019. Unsupervised alignment of embeddings with Wasserstein procrustes. In Proceedings of Machine Learning Research, volume 89, pages 1880-1890. PMLR.
|
| 246 |
+
|
| 247 |
+
Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural language inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 248 |
+
Francisco Guzmán, Peng-Jen Chen, Myle Ott, Juan Pino, Guillaume Lample, Philipp Koehn, Vishrav Chaudhary, and Marc'Aurelio Ranzato. 2019. The FLORES evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 6097-6110, Hong Kong, China. Association for Computational Linguistics.
|
| 249 |
+
Michael Hahn and Marco Baroni. 2019. Tabula nearly rasa: Probing the linguistic knowledge of character-level neural language models trained on unsegmented text. Transactions of the Association for Computational Linguistics, 7:467-484.
|
| 250 |
+
Geert Heyman, Bregt Verreet, Ivan Vulic, and Marie-Francine Moens. 2019. Learning unsupervised multilingual word embeddings with incremental multilingual hubs. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1890-1902, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 251 |
+
Geert Heyman, Ivan Vulic, and Marie-Francine Moens. 2017. Bilingual lexicon induction by learning to combine word-level and character-level representations. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 1085-1095, Valencia, Spain. Association for Computational Linguistics.
|
| 252 |
+
Yedid Hoshen and Lior Wolf. 2018. Non-adversarial unsupervised word translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 469-478, Brussels, Belgium. Association for Computational Linguistics.
|
| 253 |
+
Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.
|
| 254 |
+
Tsung-Yuan Hsu, Chi-Liang Liu, and Hung-yi Lee. 2019. Zero-shot reading comprehension by cross-lingual transfer learning with multi-lingual language representation model. In Proceedings of the
|
| 255 |
+
|
| 256 |
+
2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5933-5940, Hong Kong, China. Association for Computational Linguistics.
|
| 257 |
+
Junjie Hu, Sebastian Ruder, Aditya Siddhant, Graham Neubig, Orhan First, and Melvin Johnson. 2020. XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization. arXiv preprint arXiv:2003.11080.
|
| 258 |
+
David Kamholz, Jonathan Pool, and Susan Colowick. 2014. PanLex: Building a resource for pan-lingual lexical translation. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 3145-3150, Reykjavik, Iceland. European Language Resources Association (ELRA).
|
| 259 |
+
Yova Kementchedjhieva, Mareike Hartmann, and Anders Søgaard. 2019. Lost in evaluation: Misleading benchmarks for bilingual dictionary induction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3327-3332, Hong Kong, China. Association for Computational Linguistics.
|
| 260 |
+
Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018a. Unsupervised machine translation using monolingual corpora only. In Proceedings of the 6th International Conference on Learning Representations (ICLR 2018).
|
| 261 |
+
Guillaume Lample, Myle Ott, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018b. Phrase-based & neural unsupervised machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 5039-5049, Brussels, Belgium. Association for Computational Linguistics.
|
| 262 |
+
Patrick Lewis, Barlas Oğuz, Rudy Rinott, Sebastian Riedel, and Holger Schwenk. 2019. MLQA: Evaluating Cross-lingual Extractive Question Answering. arXiv preprint arXiv:1910.07475.
|
| 263 |
+
Yaobo Liang, Nan Duan, Yeyun Gong, Ning Wu, Fenfei Guo, Weizhen Qi, Ming Gong, Linjun Shou, Daxin Jiang, Guihong Cao, Xiaodong Fan, Bruce Zhang, Rahul Agrawal, Edward Cui, Sining Wei, Taroon Bharti, Ying Qiao, Jiun-Hung Chen, Winnie Wu, Shuguang Liu, Fan Yang, Rangan Majumder, and Ming Zhou. 2020. Xglue: A new benchmark dataset for cross-lingual pre-training, understanding and generation. arXiv preprint arXiv:2004.01401.
|
| 264 |
+
Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. arXiv preprint arXiv:2001.08210.
|
| 265 |
+
|
| 266 |
+
Thang Luong, Hieu Pham, and Christopher D. Manning. 2015. Bilingual word representations with monolingual quality in mind. In Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing, pages 151-159, Denver, Colorado. Association for Computational Linguistics.
|
| 267 |
+
Kelly Marchisio, Kevin Duh, and Philipp Koehn. 2020. When does unsupervised machine translation work? arXiv preprint arXiv:2004.05516.
|
| 268 |
+
Benjamin Marie and Atsushi Fujita. 2018. Unsupervised neural machine translation initialized by unsupervised statistical machine translation. arXiv preprint arXiv:1810.12703.
|
| 269 |
+
Benjamin Marie, Haipeng Sun, Rui Wang, Kehai Chen, Atsushi Fujita, Masao Utiyama, and Eiichiro Sumita. 2019. NICT's unsupervised neural and statistical machine translation systems for the WMT19 news translation task. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 294-301, Florence, Italy. Association for Computational Linguistics.
|
| 270 |
+
Thomas Mayer and Michael Cysouw. 2014. Creating a massively parallel Bible corpus. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 3158-3163, Reykjavik, Iceland. European Language Resources Association (ELRA).
|
| 271 |
+
Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Contextualized word vectors. In Advances in Neural Information Processing Systems 30, pages 6294-6305.
|
| 272 |
+
Tomas Mikolov, Quoc V Le, and Ilya Sutskever. 2013a. Exploiting similarities among languages for machine translation. arXiv preprint arXiv:1309.4168.
|
| 273 |
+
Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S. Corrado, and Jeff Dean. 2013b. Distributed representations of words and phrases and their compositionality. In Advances in Neural Information Processing Systems 26, pages 3111-3119.
|
| 274 |
+
Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.
|
| 275 |
+
Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 276 |
+
|
| 277 |
+
Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4996-5001, Florence, Italy. Association for Computational Linguistics.
|
| 278 |
+
Afshin Rahimi, Yuan Li, and Trevor Cohn. 2019. Massively multilingual transfer for NER. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 151-164, Florence, Italy. Association for Computational Linguistics.
|
| 279 |
+
Sujith Ravi and Kevin Knight. 2011. Deciphering foreign language. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 12-21, Portland, Oregon, USA. Association for Computational Linguistics.
|
| 280 |
+
Shuo Ren, Zhirui Zhang, Shujie Liu, Ming Zhou, and Shuai Ma. 2019. Unsupervised neural machine translation with SMT as posterior regularization. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 241-248.
|
| 281 |
+
Parker Riley and Daniel Gildea. 2018. Orthographic features for bilingual lexicon induction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 390-394, Melbourne, Australia. Association for Computational Linguistics.
|
| 282 |
+
Sebastian Ruder, Ryan Cotterell, Yova Kementchedjieva, and Anders Søgaard. 2018. A discriminative latent-variable model for bilingual lexicon induction. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 458-468, Brussels, Belgium. Association for Computational Linguistics.
|
| 283 |
+
Sebastian Ruder, Ivan Vulic, and Anders Søgaard. 2019. A Survey of Cross-lingual Word Embedding Models. Journal of Artificial Intelligence Research, 65:569-631.
|
| 284 |
+
Tal Schuster, Ori Ram, Regina Barzilay, and Amir Globerson. 2019. Cross-lingual alignment of contextual word embeddings, with applications to zero-shot dependency parsing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1599–1613, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 285 |
+
Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzmán. 2019a. WikiMatrix: Mining 135M Parallel Sentences. arXiv preprint arXiv:1907.05791.
|
| 286 |
+
Holger Schwenk and Xian Li. 2018. A corpus for multilingual document classification in eight languages. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC)
|
| 287 |
+
|
| 288 |
+
2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 289 |
+
Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave, and Armand Joulin. 2019b. CC-Matrix: Mining Billions of High-Quality Parallel Sentences on the WEB. arXiv preprint arXiv:1911.04944.
|
| 290 |
+
Aditya Siddhant, Melvin Johnson, Henry Tsai, Naveen Arivazhagan, Jason Riesa, Ankur Bapna, Orhan Firat, and Karthik Raman. 2019. Evaluating the Cross-Lingual Effectiveness of Massively Multilingual Neural Machine Translation. arXiv preprint arXiv:1909.00437.
|
| 291 |
+
Samuel L. Smith, David H. P. Turban, Steven Hamblin, and Nils Y. Hammerla. 2017. Offline bililingual word vectors, orthogonal transformations and the inverted softmax. In Proceedings of the 5th International Conference on Learning Representations (ICLR 2017).
|
| 292 |
+
Benjamin Snyder, Tahira Naseem, Jacob Eisenstein, and Regina Barzilay. 2008. Unsupervised multilingual learning for POS tagging. In Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing, pages 1041-1050, Honolulu, Hawaii. Association for Computational Linguistics.
|
| 293 |
+
Anders Søgaard, Sebastian Ruder, and Ivan Vulic. 2018. On the limitations of unsupervised bilingual dictionary induction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 778-788, Melbourne, Australia. Association for Computational Linguistics.
|
| 294 |
+
Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and TieYan Liu. 2019. MASS: Masked sequence to sequence pre-training for language generation. In Proceedings of the 36th International Conference on Machine Learning, volume 97, pages 5926-5936, Long Beach, California, USA. PMLR.
|
| 295 |
+
Dario Stojanovski, Viktor Hangya, Matthias Huck, and Alexander Fraser. 2019. The LMU munich unsupervised machine translation system for WMT19. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 393-399, Florence, Italy. Association for Computational Linguistics.
|
| 296 |
+
Clara Vania and Adam Lopez. 2017. From characters to words to in between: Do we capture morphology? In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2016-2027, Vancouver, Canada. Association for Computational Linguistics.
|
| 297 |
+
Ivan Vulic, Goran Glavaš, Roi Reichart, and Anna Korhonen. 2019. Do we really need fully unsupervised cross-lingual embeddings? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International
|
| 298 |
+
|
| 299 |
+
Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4407-4418, Hong Kong, China. Association for Computational Linguistics.
|
| 300 |
+
Ivan Vulic and Anna Korhonen. 2016. On the role of seed lexicons in learning bilingual word embeddings. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 247-257, Berlin, Germany. Association for Computational Linguistics.
|
| 301 |
+
Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.
|
| 302 |
+
Shijie Wu, Alexis Conneau, Haoran Li, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Emerging cross-lingual structure in pretrained language models. arXiv preprint arXiv:1911.01464.
|
| 303 |
+
Shijie Wu and Mark Dredze. 2019. Beto, bentz, becas: The surprising cross-lingual effectiveness of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 833–844, Hong Kong, China. Association for Computational Linguistics.
|
| 304 |
+
Ruochen Xu, Yiming Yang, Naoki Otani, and Yuexin Wu. 2018. Unsupervised cross-lingual transfer of word embedding spaces. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2465–2474, Brussels, Belgium. Association for Computational Linguistics.
|
| 305 |
+
Meng Zhang, Yang Liu, Huanbo Luan, and Maosong Sun. 2017a. Adversarial training for unsupervised bilingual lexicon induction. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1959-1970, Vancouver, Canada. Association for Computational Linguistics.
|
| 306 |
+
Meng Zhang, Yang Liu, Huanbo Luan, and Maosong Sun. 2017b. Earth mover's distance minimization for unsupervised bilingual lexicon induction. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 1934-1945, Copenhagen, Denmark. Association for Computational Linguistics.
|
acallformorerigorinunsupervisedcrosslinguallearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ed7c68e36e5c454dbbc4b73fb3eafe473605a7e08c8a3089ed21850d7f02880
|
| 3 |
+
size 69399
|
acallformorerigorinunsupervisedcrosslinguallearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39424e4e38815e028c10a30b9f4476d2413e3284850cb1fe0ad4369a933ab367
|
| 3 |
+
size 349568
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a36d30f1a7f8968b41d4b5d4d1fd962ae185e9da2a42b516b7eef754e55706ed
|
| 3 |
+
size 42104
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1bbd632df01a6949e305cd81ccf73d25ba185826bb82a24d25ae79e3a974d13a
|
| 3 |
+
size 49744
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/863d725e-89f9-475d-a34e-476d1deb53e1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e42b5595dd9d6c8f6361161a76b6d3d676786282f9bb96369c949ff5092318d8
|
| 3 |
+
size 427627
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/full.md
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Complete Shift-Reduce Chinese Discourse Parser with Robust Dynamic Oracle
|
| 2 |
+
|
| 3 |
+
Shyh-Shiun Hung, $^{1}$ Hen-Hsen Huang, $^{2,3}$ and Hsin-Hsi Chen $^{1,3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Computer Science and Information Engineering, National Taiwan University, Taiwan
|
| 6 |
+
|
| 7 |
+
$^{2}$ Department of Computer Science, National Chengchi University, Taiwan
|
| 8 |
+
$^{3}$ MOST Joint Research Center for AI Technology and All Vista Healthcare, Taiwan shhung@nlg.csie.ntu.edu.tw, hhhuang@nccu.edu.tw, hhchen@ntu.edu.tw
|
| 9 |
+
|
| 10 |
+
# Abstract
|
| 11 |
+
|
| 12 |
+
This work proposes a standalone, complete Chinese discourse parser for practical applications. We approach Chinese discourse parsing from a variety of aspects and improve the shift-reduce parser not only by integrating the pre-trained text encoder, but also by employing novel training strategies. We revise the dynamic-oracle procedure for training the shift-reduce parser, and apply unsupervised data augmentation to enhance rhetorical relation recognition. Experimental results show that our Chinese discourse parser achieves the state-of-the-art performance.
|
| 13 |
+
|
| 14 |
+
# 1 Introduction
|
| 15 |
+
|
| 16 |
+
Discourse parsing is one of the fundamental tasks in natural language processing (NLP). Typical types of discourse parsing include hierarchical discourse parsing and shallow discourse parsing. The former is aimed at finding the relationships among a series of neighboring elementary discourse units (EDUs) and further building up a hierarchical tree structure (Mann and Thompson, 1988). Instead of establishing a tree structure, the latter finds the across-paragraph relations between all text units in a paragraph or a document. Based on Rhetorical Structure Theory Discourse Treebank (RST-DT) (Carlson et al., 2001a), hierarchical discourse parsing in English has been well-studied.
|
| 17 |
+
|
| 18 |
+
This paper focuses on hierarchical discourse parsing in Chinese. Previous work on hierarchical Chinese discourse parsing is mostly based on the RST-style Chinese Discourse Treebank (Li et al., 2014). To distinguish from the other Chinese Discourse Treebank (Zhou and Xue, 2012), which is annotated with the PDTB-style for shallow discourse parsing, we use the term CDTB-14 to refer to the RST-style one and the term CDTB-12 to refer to the PDTB-style one. Kong and Zhou (2017)
|
| 19 |
+
|
| 20 |
+
propose a pipeline framework and generate the discourse parsing tree in a bottom-up way. Lin et al. (2018) propose an end-to-end system based on a recursive neural network (RvNN) to construct the parsing tree with a CKY-like algorithm. Sun and Kong (2018) use transition-based method with the stack augmented parser-Interpreter neural network (SPINN) (Bowman et al., 2016) as the backbone model, helping their model make a better prediction with the previous information.
|
| 21 |
+
|
| 22 |
+
In this work, we attempt to construct a complete Chinese discourse parser, which supports all the four sub-tasks in hierarchical discourse parsing, including EDU segmentation, tree structure construction, nuclearity labeling, and rhetorical relation recognition. Given a paragraph, our parser extracts all EDUs, builds the tree structure, identifies the nucleuses, and recognizes the rhetorical relations of all internal nodes. We propose a revised dynamic-oracle procedure (Yu et al., 2018) for training the shift-reduce parser. Because of the limited training instances in CDTB-14, we also address the data sparsity issue by introducing unsupervised data augmentation (Xie et al., 2019). Experimental results show that our methodology is effective, and our model outperforms all the previous models. The contributions of this work are three-fold shown as follows.
|
| 23 |
+
|
| 24 |
+
1. We explore the task of Chinese discourse parsing with a variety of strategies, and our parser achieves the state-of-the-art performance. Our robust dynamic-oracle procedure can be applied to other shift-reduce parsers.
|
| 25 |
+
|
| 26 |
+
2. Our complete Chinese discourse parser handles a raw paragraph/document directly and performs all the subtasks in hierarchical discourse parsing. No pre-processing procedures such as Chinese word segmentation, POS-tagging, and syntactic parsing are required.
|
| 27 |
+
|
| 28 |
+
3. We release the pre-trained, standalone, ready-to-use parser as a resource for the research community. $^{1}$
|
| 29 |
+
|
| 30 |
+
# 2 Methodology
|
| 31 |
+
|
| 32 |
+
Figure 1 gives an overview of our parser. Five stages are performed to transform a raw document into a parse tree: EDU segmentation, tree structure construction, rhetorical relation and nuclearity classification, binary tree conversion, and beam search.
|
| 33 |
+
|
| 34 |
+
# 2.1 Elementary Discourse Unit Segmentation
|
| 35 |
+
|
| 36 |
+
Typically, EDU segmentation is a sequence labeling task (Wang et al., 2018; Peters et al., 2018). We propose a model for labeling each Chinese character in a raw document. The Begin-Inside scheme is employed that the word beginning with a new EDU will be labeled as $B$ , and the rest of the words will be labeled as $I$ . Our model is based on the pretrained text encoder BERT (Devlin et al., 2018). More specifically, we adopt the version BERT-base, Chinese since this is the only pre-trained BERT dedicated to Chinese so far. As the BERT for Chinese is character-based, we feed each Chinese character into a BERT layer to obtain its contextual embedding. Then, we fine tune the representation with an additional dense layer and measure the probability of each label of each character with a softmax layer. The model is further trained as conditional random fields (CRFs) (Lafferty et al., 2001) for finding the global optimal label sequence.
|
| 37 |
+
|
| 38 |
+
# 2.2 Tree Construction
|
| 39 |
+
|
| 40 |
+
We propose a shift-reduce parser for building the structure of the discourse parse tree. A shift-reduce parser maintains a stack and a queue for representing a state during parsing, and an action classifier is trained to predict the action (i.e., shift or reduce) for making a transition from the given state to the next state. In the initial state, the stack is empty, and the queue contains all the EDUs in a raw document. In the final state, the queue is empty, and the stack contains only one element, i.e., the discourse parse tree of the whole paragraph.
|
| 41 |
+
|
| 42 |
+
To decide whether to shift or to reduce, we propose an action classifier by considering the information of the top two elements of the stack $s_1$ and $s_2$ (i.e., the two most recent discourse units) and the first element of the queue $q$ (i.e., the next
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Figure 1: Overview of our Chinese discourse parser.
|
| 46 |
+
|
| 47 |
+
EDU). The textual form of each of these three discourse units will be fed into the BERT encoder for representing as $Enc(s_1)$ , $Enc(s_2)$ , and $Enc(q)$ . Next, we concatenate the max pooling of $Enc(s_1)$ , $Enc(s_2)$ , and $Enc(q)$ and feed the resulting vector into a dense layer to predict the next action.
|
| 48 |
+
|
| 49 |
+
Since shift-reduce is a greedy algorithm, it can hardly recover from an error state. The shift-reduce parser is typically trained with the teacher mode, where only correct states are given, and the resulting parser may perform poor when it reaches unfamiliar states. For this reason, we propose a revised dynamic-oracle procedure (Yu et al., 2018) for training our discourse parser. One drawback of the original dynamic oracle is that some golden training examples may be neglected. Because CDTB-14 has relatively few action steps to build a tree, the probability of falling into a wrong state is much small compared to that of RST-DT. In our revision, we want to guarantee all correct states have been trained. As shown in Algorithm 1, the document will be gone through twice when training a document example. We first follow the golden actions, and choose action predicted by the model with a probability $\alpha$ at the second time. We refer to them as teacher mode and student mode, respectively. Note that we follow the suggestion of Yu et al. (2018) to set $\alpha$ to 0.7.
|
| 50 |
+
|
| 51 |
+
Algorithm 1 Training Procedure for Our Shift-Reduce Discourse Parser.
|
| 52 |
+
1: $S, Q \gets$ empty stack, elementary discourse units
|
| 53 |
+
2: while $Q$ is not empty $\vee S$ has more than 1 unit do
|
| 54 |
+
3: predicted, golden $\leftarrow$ ACTIONCLASSIFIER(S.top1(), S.top2(), Q.front(), GOLDENACTION
|
| 55 |
+
4: COMPUTELOSSANDUPDATE(predicted, golden)
|
| 56 |
+
5: PERFORMACTION(golden)
|
| 57 |
+
6: $S, Q \gets$ empty stack, elementary discourse units
|
| 58 |
+
7: while $Q$ is not empty $\vee S$ has more than 1 unit do
|
| 59 |
+
8: predicted, golden $\leftarrow$ ACTIONCLASSIFIER(S.top1(), S.top2(), Q.front(), GOLDENACTION
|
| 60 |
+
9: COMPUTELOSSANDUPDATE(predicted, golden)
|
| 61 |
+
10: if rand() > $\alpha$ then PERFORMACTION(golden) else PERFORMACTION(predicted)
|
| 62 |
+
|
| 63 |
+
# 2.3 Rhetorical Relation Recognition
|
| 64 |
+
|
| 65 |
+
If two discourse units are decided to be merged during the tree construction stage, a new internal node will be generated and the relationship of the two discourse units will be determined. Predicting the relation between two textual arguments is a typical classification task in NLP. We propose a BERT-based classifier, which predicts the relation of two arguments separated by the symbol [SEP], with additional dense layers as the output.
|
| 66 |
+
|
| 67 |
+
In CDTB-14, the "coordination" relation accounts for $59.6\%$ of the training data, while minor relations suffer from data sparseness. To address this issue, we introduce unsupervised data augmentation (UDA) (Xie et al., 2019) to enhance the performance. We adopt the discourse pairs in CDTB-12 as the material for UDA. Note that other unlabeled text pairs can also be used for UDA. We chose those from CDTB-12 simply because the format is convenient for us to use.
|
| 68 |
+
|
| 69 |
+
The original loss is shown as Eq. 1. Given a span of text $x$ , our main model $P(\cdot)$ predicts the rhetorical relation $y_{c}$ . Eq. 2 shows the additional consistency loss to enforce the smoothness of our main model, and $\hat{x}$ stands for the augmented unlabeled sentence pair. $L$ and $U$ stand for labeled data and unlabeled data, respectively. As shown in Eq. 3, we train both objectives at the same time with a weight $\lambda$ to adjust the effect of UDA.
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
H = - \frac {1}{N} \sum_ {x \in L} ^ {N} \sum_ {c = 1} ^ {M} y _ {c} \log (P (y _ {c} | x)) \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
D _ {K L} = - \frac {1}{N} \sum_ {x \in U} ^ {N} P (y | x) \log \left(\frac {P (y | x)}{P (y | \hat {x})}\right) \tag {2}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mathcal {L} (\theta) = H + \lambda D _ {K L} \tag {3}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
The UDA procedure first generates the augmented unlabeled sentence pairs. Various ap
|
| 84 |
+
|
| 85 |
+
proaches to paraphrasing can be employed. In this work, we utilize the back-translation strategy (Sennrich et al., 2016), where we translate the Chinese sentence pair to English and then translate back to Chinese. This is equivalent to add noises to the original inputs. As the original and the back-translated sentence pairs express the same meaning, our model is expected to predict the same label for both pairs. By minimizing the consistency loss, our model can behave consistently no matter whether an original instance or its paraphrases are given. In this way, the model can be more generalized and robust. Besides, when our model is able to predict the same label for both sentence pairs, it means that our model has also learned their label.
|
| 86 |
+
|
| 87 |
+
# 2.4 Nuclearity Labeling
|
| 88 |
+
|
| 89 |
+
Nuclearity labeling is aimed at determining the nucleus from a sentence pair. The nuclearity of two sentences has a correlation with their relationship, thus we jointly train the rhetorical relation and the nuclearity classifiers, where the loss for back-propagation is the sum of the losses of both classifiers. Similar to the imbalance issue of rhetorical relation recognition, the 'Equal' class accounts for $51\%$ of training data. We also employ UDA for performance enhancement.
|
| 90 |
+
|
| 91 |
+
# 2.5 Binary Tree Conversion
|
| 92 |
+
|
| 93 |
+
For simplicity, our shift-reduce parser constructs a binary tree. However, the parse trees annotated in CDTB-14 are not always binary. In the training and the test sets, $8.9\%$ and $10\%$ of the internal nodes have more than two children, respectively. Most of the previous works do not handle the binary tree conversion, and some of the work further convert the golden trees into binary trees to calculate their scores, resulting in less accurate evaluation. In the
|
| 94 |
+
|
| 95 |
+
training stage, we convert the multiway trees to their corresponding left-heavy binary trees (Morey et al., 2018). In the testing stage, we convert the binary tree constructed by our parser to the corresponding multiway tree. For example, a three-way node, $A \rightarrow XYZ$ , will be converted to $A \rightarrow A'Z$ and $A' \rightarrow XY$ . The conversion is deterministic and bidirectional, so it is free from ambiguity.
|
| 96 |
+
|
| 97 |
+
# 2.6 Beam Search
|
| 98 |
+
|
| 99 |
+
To decode a transition sequence during the testing stage, the standard method is to choose the action that has the maximum probability of the current time step as the input for the next time step. However, this greedy approach might fail to find the sequence that has the maximum overall probability only because one of the action probability is small in that sequence. Beam search (Wiseman and Rush, 2016) is a heuristic search algorithm that explores a graph by maintaining the top $k$ results at every time step. This approach helps keep a number of potential candidates from discarding. Note that the greedy approach is equivalent to beam search with a beam width $k = 1$ .
|
| 100 |
+
|
| 101 |
+
When performing the shift-reduce parsing, two kinds of states have only one action to choose: (1) less than two elements in the stack, and (2) no element in the queue. Under the above two conditions, the probability of the selected action will be 1, making our model to be overly biased on those sequences having many non-optional stages. For this reason, we apply an alternative way to compute the sequence probability during beam search. Our modified beam search is still fulfilled by maintaining the top $k$ sequences, but the score of a sequence is calculated by the average probabilities of the selected actions that have more than one choice.
|
| 102 |
+
|
| 103 |
+
# 3 Experiments
|
| 104 |
+
|
| 105 |
+
# 3.1 Experimental Settings
|
| 106 |
+
|
| 107 |
+
Following the setting of Kong and Zhou (2017), we divide CDTB-14 into the training set, including 450 articles (2,125 paragraphs), and test set, including 50 articles (217 paragraphs). We keep $10\%$ of the training data for validation. PARSEVAL (Carlson et al., 2001b) is used for evaluation.
|
| 108 |
+
|
| 109 |
+
# 3.2 Experimental Results
|
| 110 |
+
|
| 111 |
+
Table 1 shows the performances of our parser in micro-averaged F-score, compared with previous work Zhou (Kong and Zhou, 2017) and Lin (Lin
|
| 112 |
+
|
| 113 |
+
<table><tr><td>Model</td><td>EDU</td><td>+T</td><td>+R</td><td>+N</td><td>All</td></tr><tr><td>Zhou</td><td rowspan="4">Given</td><td>52.3</td><td>33.8</td><td>23.9</td><td>23.2</td></tr><tr><td>Lin</td><td>64.6</td><td>42.7</td><td>38.5</td><td>35.0</td></tr><tr><td>BERT-CKY</td><td>76.5</td><td>50.8</td><td>48.5</td><td>43.1</td></tr><tr><td>Ours</td><td>82.8</td><td>57.6</td><td>56.0</td><td>50.5</td></tr><tr><td>Zhou</td><td>93.8</td><td>46.4</td><td>28.8</td><td>23.1</td><td>22.0</td></tr><tr><td>Lin</td><td>87.2</td><td>49.5</td><td>32.6</td><td>28.8</td><td>26.8</td></tr><tr><td>BERT-CKY</td><td>92.4</td><td>68.9</td><td>43.3</td><td>42.0</td><td>37.0</td></tr><tr><td>Normal</td><td>97.4</td><td>78.8</td><td>54.6</td><td>52.0</td><td>47.1</td></tr><tr><td>Dynamic</td><td>97.4</td><td>78.9</td><td>54.5</td><td>51.8</td><td>47.1</td></tr><tr><td>Ours</td><td>97.4</td><td>80.0</td><td>55.9</td><td>53.6</td><td>48.9</td></tr></table>
|
| 114 |
+
|
| 115 |
+
Table 1: Performances of EDU segmentation (EDU), tree construction (T), rhetorical relation recognition (R), nuclearity labeling (N), and all subtasks, reported in Micro-averaged F-score.
|
| 116 |
+
|
| 117 |
+
et al., 2018). We also implement BERT-CKY, a CKY parser by using BERT for representation, as an additional baseline model. The evaluation is based on multiway trees.
|
| 118 |
+
|
| 119 |
+
Both the performances with and without golden EDUs are measured. The results show that BERT is highly competitive and has the ability to catch the potential relations between discourse units since Lin and BERT-CKY basically use the same approach while the latter model uses BERT as the text encoder. Our parser outperforms all the baseline models and achieves a significant improvement without the golden EDUs given. Note that BERT-CKY is based on Lin et al. (2018), which has its own EDU segmentation module different from ours, hence the EDU score is different.
|
| 120 |
+
|
| 121 |
+
We examine the performance of three different training techniques for shift-reduce parsing. As mentioned in Section 2.2, Normal stands for action classifier trained with gold standard actions, Dynamic stands for Dynamic Oracle introduced by Yu et al. (2018), and Ours stands for our revised dynamic-oracle procedure where the model is trained with both gold standard actions and dynamic oracle actions.
|
| 122 |
+
|
| 123 |
+
Compared to Normal, experimental results show no improvement made by the original dynamic oracle, while our revised dynamic oracle outperforms the other two strategies. Our strategy does not ignore the golden action in every correct state and also has the chance to explore error states.
|
| 124 |
+
|
| 125 |
+
In order to compare with SUN (Sun and Kong, 2018), we convert the golden standard trees into binary trees and measure the performances on bi
|
| 126 |
+
|
| 127 |
+
<table><tr><td>Model</td><td>EDU</td><td>+T</td><td>+R</td><td>+N</td><td>All</td></tr><tr><td>Sun</td><td>93.0</td><td>78.2</td><td></td><td>53.2</td><td></td></tr><tr><td>Ours</td><td>97.4</td><td>83.3</td><td>58.1</td><td>55.7</td><td>52.0</td></tr></table>
|
| 128 |
+
|
| 129 |
+
nary trees in macro-averaged F-score. The results are shown in Table 2. Sun and Kong (2018) do not address all subtasks in Chinese discourse parsing, and our model outperforms SUN in every subtask.
|
| 130 |
+
|
| 131 |
+
Table 2: Performances measured on binary trees, reported in macro-averaged F-score.
|
| 132 |
+
|
| 133 |
+
<table><tr><td>Relation</td><td></td><td>P</td><td>R</td><td>F</td></tr><tr><td rowspan="2">Coordination</td><td>-UDA</td><td>84.3</td><td>77.8</td><td>80.9</td></tr><tr><td>+UDA</td><td>90.7</td><td>76.9</td><td>83.2</td></tr><tr><td rowspan="2">Causality</td><td>-UDA</td><td>38.7</td><td>43.2</td><td>40.8</td></tr><tr><td>+UDA</td><td>38.7</td><td>55.4</td><td>45.6</td></tr><tr><td rowspan="2">Transition</td><td>-UDA</td><td>80.0</td><td>80.0</td><td>80.0</td></tr><tr><td>+UDA</td><td>80.0</td><td>88.9</td><td>84.2</td></tr><tr><td rowspan="2">Explanation</td><td>-UDA</td><td>46.0</td><td>57.6</td><td>51.1</td></tr><tr><td>+UDA</td><td>45.2</td><td>70.9</td><td>55.2</td></tr></table>
|
| 134 |
+
|
| 135 |
+
# 3.3 Discussions
|
| 136 |
+
|
| 137 |
+
To examine the effectiveness of UDA, Table 3 shows the performances of rhetorical relation recognition with and without UDA. Experimental results show that application of UDA successfully enhances the recall scores of the three minor classes with a little trade-off in the recall score of the dominant class, Coordination. In addition, the F-scores of all the four relations are increased. In other words, applying UDA deals with the data imbalance issue and improves the overall performance. Applying UDA to nuclearity classification also has a similar improvement as Table 3.
|
| 138 |
+
|
| 139 |
+
Theoretically, beam search with a larger beam width helps find a better solution. As shown in
|
| 140 |
+
|
| 141 |
+
Table 3: Performances of the four rhetorical relations $(\%)$ with and without UDA. Occurrences of these relations are $59.6\%$ , $17.1\%$ , $1.6\%$ , and $21.7\%$ , respectively.
|
| 142 |
+
|
| 143 |
+
<table><tr><td>Beam Size</td><td>EDU</td><td>+T</td><td>+R</td><td>+N</td><td>All</td></tr><tr><td>k=1</td><td rowspan="3">Given</td><td>82.8</td><td>57.6</td><td>56.0</td><td>50.5</td></tr><tr><td>k=2</td><td>81.8</td><td>56.8</td><td>55.1</td><td>49.7</td></tr><tr><td>k=5</td><td>81.7</td><td>56.7</td><td>54.9</td><td>49.6</td></tr></table>
|
| 144 |
+
|
| 145 |
+
Table 4: Performances of beam search with different beam widths.
|
| 146 |
+
|
| 147 |
+
Table 4, however, our parser is worse when a larger beam width is used, which means the sequence having higher overall score does not ensure the better decoding result. Our experiment only shows the beam widths up to five because the scores of worse sequences are already higher than that of the correct sequence in some cases. That is, the larger beam widths seem to be unnecessary.
|
| 148 |
+
|
| 149 |
+
The reason may be that beam search is not really suitable for the shift-reduce paradigm. For example, a sequence might fall into a seriously bad stage but the rest of actions can be easily determined so that the sequence will get a high overall probability. This assumption also implies that unlike beam search applied on sequence to sequence model, we cannot judge a transition sequence is good or bad by solely considering its overall score. In addition, for longer textual units such as paragraph, human readers and writers may not follow the assumption of overall optimization. Instead, human beings read and write sequentially, similar to the greedy nature.
|
| 150 |
+
|
| 151 |
+
We also evaluate our approach in English discourse parsing. The famous dataset, RST-DT, is used. Our model achieves F-scores of $85.0\%$ , $58.8\%$ , $69.9\%$ , and $56.7\%$ in tree construction, rhetorical relation recognition, nuclearity labeling, and all subtasks, respectively. The overall performance is similar to that of the state-of-the-art model (Yu et al., 2018).
|
| 152 |
+
|
| 153 |
+
# 4 Conclusion
|
| 154 |
+
|
| 155 |
+
This work proposes a standalone, complete Chinese discourse parser. We integrate BERT, UDA, and a revised training procedure for constructing a robust shift-reduce parser. Our model is compared with a number of previous models, and experimental results show that our model achieves the state-of-the-art performance and is highly competitive with different setups. We will explore cross-lingual transfer learning for supporting more languages.
|
| 156 |
+
|
| 157 |
+
# Acknowledgements
|
| 158 |
+
|
| 159 |
+
This research was partially supported by Ministry of Science and Technology, Taiwan, under grants MOST-106-2923-E-002-012-MY3, MOST-109-2634-F-002-040-, MOST-109-2634-F-002-034-, MOST-108-2218-E-009-051-, and by Academia Sinica, Taiwan, under grant AS-TP-107-M05.
|
| 160 |
+
|
| 161 |
+
# References
|
| 162 |
+
|
| 163 |
+
Samuel R. Bowman, Jon Gauthier, Abhinav Rastogi, Raghav Gupta, Christopher D. Manning, and Christopher Potts. 2016. A fast unified model for parsing and sentence understanding. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1466-1477, Berlin, Germany. Association for Computational Linguistics.
|
| 164 |
+
Lynn Carlson, Daniel Marcu, and Mary Ellen Okurovsky. 2001a. Building a discourse-tagged corpus in the framework of rhetorical structure theory. In Proceedings of the Second SIGdial Workshop on Discourse and Dialogue.
|
| 165 |
+
Lynn Carlson, Daniel Marcu, and Mary Ellen Okurowski. 2001b. Building a discourse-tagged corpus in the framework of rhetorical structure theory. In Proceedings of the Second SIGdial Workshop on Discourse and Dialogue (SIGDIAL'01), pages 1-10.
|
| 166 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.
|
| 167 |
+
Fang Kong and Guodong Zhou. 2017. A cdt-styled end-to-end chinese discourse parser. ACM Trans. Asian Low-Resour. Lang. Inf. Process., 16(4):26:1-26:17.
|
| 168 |
+
John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pages 282-289, San Francisco, CA, USA. Morgan Kaufmann Publishers Inc.
|
| 169 |
+
Yancui Li, Wenhe Feng, Jing Sun, Fang Kong, and Guodong Zhou. 2014. Building Chinese discourse corpus with connective-driven dependency tree structure. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2105-2114, Doha, Qatar. Association for Computational Linguistics.
|
| 170 |
+
Chuan-An Lin, Hen-Hsen Huang, Zi-Yuan Chen, and Hsin-Hsi Chen. 2018. A unified RvNN framework for end-to-end Chinese discourse parsing. In Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pages 73-77, Santa Fe, New Mexico. Association for Computational Linguistics.
|
| 171 |
+
William C Mann and Sandra A Thompson. 1988. Rhetorical structure theory: Toward a functional theory of text organization. Text-Interdisciplinary Journal for the Study of Discourse, 8(3):243-281.
|
| 172 |
+
Mathieu Morey, Philippe Muller, and Nicholas Asher. 2018. A dependency perspective on rst discourse parsing and evaluation. Comput. Linguist., 44(2):197-235.
|
| 173 |
+
|
| 174 |
+
Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word representations. CoRR, abs/1802.05365.
|
| 175 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computational Linguistics.
|
| 176 |
+
Cheng Sun and Fang Kong. 2018. A transition-based framework for chinese discourse structure parsing. Journal of Chinese Information Processing, 32(12):48.
|
| 177 |
+
Yizhong Wang, Sujian Li, and Jingfeng Yang. 2018. Toward fast and accurate neural discourse segmentation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 962-967, Brussels, Belgium. Association for Computational Linguistics.
|
| 178 |
+
Sam Wiseman and Alexander M. Rush. 2016. Sequence-to-sequence learning as beam-search optimization. CoRR, abs/1606.02960.
|
| 179 |
+
Qizhe Xie, Zihang Dai, Eduard Hovy, Minh-Thang Luong, and Quoc V Le. 2019. Unsupervised data augmentation for consistency training. arXiv preprint arXiv:1904.12848.
|
| 180 |
+
Nan Yu, Meishan Zhang, and Guohong Fu. 2018. Transition-based neural RST parsing with implicit syntax features. In Proceedings of the 27th International Conference on Computational Linguistics, pages 559-570, Santa Fe, New Mexico, USA. Association for Computational Linguistics.
|
| 181 |
+
Yuping Zhou and Nianwen Xue. 2012. PDTB-style discourse annotation of Chinese text. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 69-77, Jeju Island, Korea. Association for Computational Linguistics.
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ecd56d962d1bca65963e90aa37ce7cb513ecc23c354b33f5a36cc4165cb4eac4
|
| 3 |
+
size 188938
|
acompleteshiftreducechinesediscourseparserwithrobustdynamicoracle/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:04fe14d0944d134d79eb9e1c25932db41a569ad25b2205c240585c6108090cd0
|
| 3 |
+
size 201247
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f45c616890a5c521336bab6bb036d8315bdf9e55980cba08b72ebf2abbbc264
|
| 3 |
+
size 88396
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b93ac1fa00c7b6ba955d69d12a97e4a42f8fe942db5db63246da48272e6e7ed6
|
| 3 |
+
size 107254
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/bab6b959-5d76-4e05-a32d-ea7723b78eaa_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e03953c117d77f92249e459587c76200fc9efde458e3815856edcc3cd3abd4f
|
| 3 |
+
size 513390
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/full.md
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Comprehensive Analysis of Preprocessing for Word Representation Learning in Affective Tasks
|
| 2 |
+
|
| 3 |
+
Nastaran Babanejad, Ameeta Agrawal, Aijun An, Manos Papagelis
|
| 4 |
+
|
| 5 |
+
Department of Electrical Engineering and Computer Science,
|
| 6 |
+
|
| 7 |
+
York University, Toronto, Canada
|
| 8 |
+
|
| 9 |
+
{nasba, ameeta, aan, papaggel}@eecs.yorku.ca
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Affective tasks such as sentiment analysis, emotion classification and sarcasm detection have been popular in recent years due to abundance of user-generated data, accurate computational linguistic models, and broad range of relevant applications in various domains. At the same time, many studies have highlighted the importance of text preprocessing, as an integral step to any natural language processing prediction model and downstream task. While preprocessing in affective systems is well-studied, preprocessing in word vector based models applied to affective systems, is not. To address this limitation, we conduct a comprehensive analysis of the role of preprocessing techniques in affective analysis based on word vector models. Our analysis is the first of its kind and provides useful insights of the importance of each preprocessing technique when applied at the training phase, commonly ignored in pretrained word vector models, and/or at the downstream task phase.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Affective tasks such as sentiment analysis, emotion classification and sarcasm detection have enjoyed great popularity in recent years. This success can be largely attributed to the fundamental and straightforward nature of the methods employed, the availability of vast amounts of user-generated natural language data, and the wide range of useful applications, spanning from hate speech detection to monitoring the sentiment of financial markets and news recommendation (Djuric et al., 2015; Babanejad et al., 2019). Most early models of affect analysis employed pretrained word embeddings that have been obtained under the assumption of the distributional hypothesis (Mikolov et al., 2013; Devlin et al., 2018). The distributional hypothesis suggests that two words occurring frequently in
|
| 18 |
+
|
| 19 |
+
similar linguistic contexts tend to be more semantically similar, and therefore should be represented closer to one another in the embedding space. However, while such embeddings are useful for several natural language processing (NLP) downstream tasks, they are known to be less suitable for affective tasks in particular (Tang et al., 2014; Agrawal et al., 2018). Although some authors claim that there is a need for post-processing word embeddings for affective tasks, others find that off-the-shelf vectors are very powerful for affective lexicon learning (Lison and Kutuzov, 2017). For example, word2vec (Mikolov et al., 2013) estimates the pair of words 'happy' and 'sad' to be more similar than the pair of words 'happy' and 'joy', which is counterintuitive, and might affect the accuracy performance of the models that depend on it.
|
| 20 |
+
|
| 21 |
+
To address the limitations of traditional word embeddings, several techniques have been proposed, including task-specific fine-tuning (Devlin et al., 2018), retrofitting (Faruqui et al., 2014), representing emotion with vectors using a multi-task training framework (Xu et al., 2018) and generating affective word embeddings (Felbo et al., 2017), to name a few. Other attempts to overcome the limitation of word vectors include optimization of hyperparameters (Levy et al., 2015), as well as fine-tuned preprocessing strategies tailored to different NLP tasks. While these strategies have demonstrated evidence of improving the accuracy performance in tasks such as word similarity, word analogy, and others (Lison and Kutuzov, 2017), their effect in affective tasks has not received considerable attention and remains less explored. Our work is motivated by the observation that preprocessing factors such as stemming, stopwords removal and many others make up an integral part of nearly every improved text classification model, and affective systems in particular (Danisman and Alpkocak, 2008; Patil and Patil, 2013). However, little work has been
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Framework of applying preprocessing in different stages in affective systems; (a) Pre, (b) Post.
|
| 25 |
+
|
| 26 |
+
done towards understanding the role of preprocessing techniques applied to word embeddings in different stages of affective systems. To address this limitation, the overarching goal of this research, is to perform an extensive and systematic assessment of the effect of a range of linguistic preprocessing factors pertaining to three affective tasks, including sentiment analysis, emotion classification and sarcasm detection. Towards that end, we systematically analyze the effectiveness of applying preprocessing to large training corpora before learning word embeddings, an approach that has largely been overlooked by the community. We investigate the following research questions: (i) what is the effect of integrating preprocessing techniques earlier into word embedding models, instead of later on in a downstream classification models? (ii) which preprocessing techniques yield the most benefit in affective tasks? (iii) does preprocessing of word embeddings provide any improvement over state-of-the-art pretrained word embeddings? and if yes, how much?
|
| 27 |
+
|
| 28 |
+
Figure 1 illustrates the difference between a) preprocessing word embeddings pipeline (Pre) vs. b) preprocessing classification dataset pipeline (Post), where preprocessing techniques in (a) are applied to the training corpus of the model and in (b) only to the classification dataset. In brief, the main contributions of our work are as follows:
|
| 29 |
+
|
| 30 |
+
- We conduct a comprehensive analysis of the role of preprocessing techniques in affective tasks (including sentiment analysis, emotion classification and sarcasm detection), employing different models, over nine datasets;
|
| 31 |
+
- We perform a comparative analysis of the accuracy performance of word vector models when preprocessing is applied at the training phase (training data) and/or at the downstream task phase (classification dataset). Interestingly, we obtain best results when preprocessing is applied only to the training corpus or when it is applied to both the training corpus
|
| 32 |
+
|
| 33 |
+
and the classification dataset of interest.
|
| 34 |
+
|
| 35 |
+
- We evaluate the performance of our best preprocessed word vector model against state-of-the-art pretrained word embedding models;
|
| 36 |
+
- We make source code and data publicly available to encourage reproducibility of results<sup>1</sup>.
|
| 37 |
+
|
| 38 |
+
The rest of the paper is organized as follows: Section 2 presents an overview of the related work. Section 3 elaborates on the preprocessing techniques employed in the evaluation of models. Section 4 describes the experimental evaluation framework. In Section 5 a comprehensive analysis of the results is provided. Section 6 concludes the paper with key insights of the research.
|
| 39 |
+
|
| 40 |
+
# 2 Related Work
|
| 41 |
+
|
| 42 |
+
In this section, we present an overview of related work on preprocessing classification datasets and preprocessing word embeddings, and how our work aims to bridge the gap between those efforts.
|
| 43 |
+
|
| 44 |
+
# 2.1 Preprocessing Classification Datasets
|
| 45 |
+
|
| 46 |
+
Preprocessing is a vital step in text mining and therefore, evaluation of preprocessing techniques has long been a part of many affective systems. Saif et al. (2014) indicated that, despite its popular use in Twitter sentiment analysis, the use of precompiled stopwords has a negative impact on the classification performance. Angiani et al. (2016) analyzed various preprocessing methods such as stopwords removal, stemming, negation, emoticons, and so on, and found stemming to be most effective for the task of sentiment analysis. Similarly, Symeonidis et al. (2018) found that lemmatization increases accuracy. Jianqiang and Xiaolin (2017) observed that removing stopwords, numbers, and URLs can reduce noise but does not affect performance, whereas replacing negation and expanding acronyms can improve the classification accuracy.
|
| 47 |
+
|
| 48 |
+
Preprocessing techniques such as punctuation and negation (Rose et al., 2018) or post-tagging and negation (Seal et al., 2020) make up a common component of many emotion classification models (Kim et al., 2018; Patil and Patil, 2013). One of the earliest works (Danisman and Alpkocak, 2008) preserved emotion words and negative verbs during stopwords removal, replaced punctuation with descriptive new words, replaced negative short forms with long forms, and concatenated negative words with emotion words to create new words (e.g., not happy $\rightarrow$ NOThappy). Although stemming may remove the emotional meaning from some words, it has been shown to improve classification accuracy (Danisman and Alpkocak, 2008; Agrawal and An, 2012). Negations have also been found beneficial, whereas considering intensifiers and diminishers did not lead to any improvements (Strohm, 2017).
|
| 49 |
+
|
| 50 |
+
Pecar et al. (2018) also highlight the importance of preprocessing when using user-generated content, with emoticons processing being the most effective. Along the same lines, while Gratian and Haid (2018) found pos-tags to be useful, Boiy et al. (2007) ignored pos-tagging because of its effect of reducing the classification accuracy
|
| 51 |
+
|
| 52 |
+
The aforementioned works describe preprocessing techniques as applied directly to evaluation datasets in affective systems. In contrast, we examine the effectiveness of directly incorporating these known effective preprocessing techniques further "upstream" into the training corpus of word embeddings, which are widely used across a number of downstream tasks.
|
| 53 |
+
|
| 54 |
+
# 2.2 Preprocessing Word Embeddings
|
| 55 |
+
|
| 56 |
+
Through a series of extensive experiments, particularly those related to context window size and dimensionality, (Levy et al., 2015) indicate that seemingly minor variations can have a large impact on the success of word representation methods in similarity and analogy tasks, stressing the need for more analysis of often ignored preprocessing settings. Lison and Kutuzov (2017) also present a systematic analysis of context windows based on a set of four hyperparameters, including window position and stopwords removal, where the right window was found to be better than left for English similarity task, and stopwords removal substantially benefited analogy task but not similarity.
|
| 57 |
+
|
| 58 |
+
A general space of hyperparameters and preprocessing factors such as context window size (Her
|
| 59 |
+
|
| 60 |
+
shcovich et al., 2019; Melamud et al., 2016), dimensionality (Melamud et al., 2016), syntactic dependencies (Levy and Goldberg, 2014; Vulić et al., 2020) and their effect on NLP tasks including word similarity (Hershcovich et al., 2019), tagging, parsing, relatedness, and entailment (Hashimoto et al., 2017) and biomedical (Chiu et al., 2016) has been studied extensively in the literature. The main conclusion of these studies, however, is that these factors are heavily task-specific. Therefore, in this work we explore preprocessing factors of generating word embeddings specifically tailored to affective tasks, which have received little attention.
|
| 61 |
+
|
| 62 |
+
A recent study investigated the role of tokenizing, lemmatizing, lowercasing and multiword grouping (Camacho-Collados and Pilehvar, 2018) as applied to sentiment analysis and found simple tokenization to be generally adequate. In the task of emotion classification, Mulki et al. (2018) examined the role of four preprocessing techniques as applied to a vector space model based on tfidf trained on a small corpus of tweets, and found stemming, lemmatization and emoji tagging to be the most effective factors.
|
| 63 |
+
|
| 64 |
+
Distinct from prior works, we examine a much larger suite of preprocessing factors grounded in insights derived from numerous affective systems, trained over two different corpora, using three different word embedding models. We evaluate the effect of the preprocessed word embeddings in three distinct affective tasks including sentiment analysis, emotion classification and sarcasm detection.
|
| 65 |
+
|
| 66 |
+
# 3 Preprocessing in Affective Systems
|
| 67 |
+
|
| 68 |
+
This section describes the preprocessing factors applied to the training corpus that is then used to generate word representations and the order of the preprocessing factors which we need to follow when applying on the corpus.
|
| 69 |
+
|
| 70 |
+
# 3.1 Preprocessing Factors
|
| 71 |
+
|
| 72 |
+
Basic: A group of common text preprocessing applied at the very beginning, such as removing html tags, removing numbers, and lowercasing. This step removes all common punctuation from text, such as “@%*=(/ +” using the NLTK regextokenizer2.
|
| 73 |
+
|
| 74 |
+
Spellcheck (spell): A case can be made for either correcting misspellings and typos or leaving
|
| 75 |
+
|
| 76 |
+
them as is assuming they represent natural language text and its associated complexities. In this step, we identify words that may have been misspelled and correct them<sup>3</sup>. As unambiguous spell corrections are not very common and in most cases we have multiple options for correction, we built our own custom dictionary to suggest a replacement by parsing the ukWac corpora<sup>4</sup> to retrieve a word-frequency list. A misspelled word that has multiple replacements is replaced with the suggested word that has the maximum frequency in the corpora.
|
| 77 |
+
|
| 78 |
+
Negation (neg): Negation is a mechanism that transforms a positive argument into its inverse rejection (Benamara et al., 2012). Specifically in the task of affective analysis, negation plays a critical role as negation words can affect the word or sentence polarity causing the polarity to invert in many cases. Our negation procedure is as follows:
|
| 79 |
+
|
| 80 |
+
(i) Compilation of an antonym dictionary: The first stage involves compiling an antonym dictionary using the WordNet corpus (Miller, 1995). For every synset, there are three possibilities: finding no antonym, one antonym or multiple antonyms. The first two cases are trivial (unambiguous replacements). In the case of the third option (ambiguous replacement), which represents the most common case, amongst the many choices, we consider the antonym with the maximum frequency in the ukWac corpus, as described in the previous section and finally the antonym of a word is picked at random from one of its senses in our antonym dictionary.
|
| 81 |
+
|
| 82 |
+
(ii) Negation handler: Next, we identify the negation words in tokenized text<sup>5</sup>. If a negation word is found, the token following it (i.e., negated word) is extracted and its antonym looked up in the antonym dictionary. If an antonym is found, the negation word and the negated word are replaced with it.
|
| 83 |
+
|
| 84 |
+
For example, let the sentence "I am not happy today" in its tokenized form ['I', 'am', 'not', 'happy', 'today']. First, we identify any negation words (i.e., 'not') and their corresponding negated words (i.e., 'happy'). Then, we look up the antonym of 'happy' in the antonym dictionary (i.e., 'sad') and replace the phrase 'not happy' with the word 'sad', resulting in a new sentence "I am sad today".
|
| 85 |
+
|
| 86 |
+
Parts-of-Speech (pos): Four parts-of-speech
|
| 87 |
+
|
| 88 |
+
classes, namely nouns, verbs, adjectives and adverbs have been shown to be more informative with regards to affect than the other classes. Thus, using the NLTK pos-tagger, for each sentence in the corpus we retain only the words belonging to one of these four classes, i.e., $\mathrm{NN^{*}}$ , $\mathrm{JJ^{*}}$ , $\mathrm{VB^{*}}$ , and $\mathrm{RB^{*}}$ .
|
| 89 |
+
|
| 90 |
+
Stopwords (stop): Stopwords are generally the most common words in a language typically filtered out before classification tasks. Therefore, we remove all the stopwords using the NLTK library.
|
| 91 |
+
|
| 92 |
+
Stemming (stem): Stemming, which reduces a word to its root form, is an essential preprocessing technique in NLP tasks. We use NLTK Snowball stemmer for stemming our training corpus.
|
| 93 |
+
|
| 94 |
+
# 3.2 Order of Preprocessing Factors
|
| 95 |
+
|
| 96 |
+
While some preprocessing techniques can be applied independently of each other (e.g., removing stopwords and removing punctuation), others need a more careful consideration of the sequence in which they are applied in order to obtain a more stable result. For instance, post-tagging should be applied before stemming in order for the tagger to work well, or negation should be performed prior to removing stopwords. To this end, we consider the following ordering when combining all the aforementioned preprocessing factors: spellchecking, negation handling, pos classes, removing stopwords, and stemming.
|
| 97 |
+
|
| 98 |
+
# 4 Experimental Evaluation Framework
|
| 99 |
+
|
| 100 |
+
# 4.1 Training Corpora
|
| 101 |
+
|
| 102 |
+
Table 1 summarizes the details of our two training corpora with regards to their vocabulary and corpus sizes after applying various preprocessing settings. For some preprocessing such as POS (pos) and stopwords removal (stop), without any significant loss in vocabulary as indicated by the $\%$ ratio of preprocessed to basic, the corpus size reduces dramatically, in some cases more than $50\%$ , a nontrivial implication with regards to training time.
|
| 103 |
+
|
| 104 |
+
News: This corpus consists of 142,546 articles from 15 American publications, spanning from 2013 to early $2018^{6}$ .
|
| 105 |
+
|
| 106 |
+
Wikipedia: Comparatively a much larger corpus than the News, this corpus consists of 23,046,187 articles from Wikipedia<sup>7</sup>.
|
| 107 |
+
|
| 108 |
+
<table><tr><td rowspan="2">Corpus</td><td rowspan="2">Processing</td><td colspan="2">Vocab</td><td colspan="2">Corpus</td></tr><tr><td>size</td><td>%</td><td>size</td><td>%</td></tr><tr><td rowspan="14">News</td><td>Basic</td><td>155K</td><td>100</td><td>123.2M</td><td>100</td></tr><tr><td>spell</td><td>149K</td><td>96</td><td>123.2M</td><td>100</td></tr><tr><td>stem</td><td>137K</td><td>88</td><td>123.2M</td><td>100</td></tr><tr><td>punc</td><td>147K</td><td>95</td><td>111.0M</td><td>90</td></tr><tr><td>neg</td><td>152K</td><td>98</td><td>90.7M</td><td>73</td></tr><tr><td>stop</td><td>150K</td><td>97</td><td>75.6M</td><td>61</td></tr><tr><td>pos</td><td>154K</td><td>99</td><td>70.7M</td><td>57</td></tr><tr><td>All - punc</td><td>151K</td><td>97</td><td>93.7M</td><td>76</td></tr><tr><td>All - pos</td><td>140K</td><td>90</td><td>90.5M</td><td>73</td></tr><tr><td>All - stop</td><td>150K</td><td>97</td><td>75.3M</td><td>61</td></tr><tr><td>All</td><td>110K</td><td>71</td><td>55.2M</td><td>49</td></tr><tr><td>All - stem</td><td>110K</td><td>71</td><td>58.1M</td><td>47</td></tr><tr><td>All - spell</td><td>110K</td><td>71</td><td>56.4M</td><td>46</td></tr><tr><td>All - neg</td><td>110K</td><td>71</td><td>54.3M</td><td>44</td></tr><tr><td rowspan="8">Wikipedia</td><td>Basic</td><td>5.1M</td><td>100</td><td>8.1B</td><td>100</td></tr><tr><td>All - punc</td><td>4.9M</td><td>96</td><td>7.2B</td><td>89</td></tr><tr><td>All - pos</td><td>4.8M</td><td>94</td><td>7.0B</td><td>86</td></tr><tr><td>All - stop</td><td>4.9M</td><td>96</td><td>6.8B</td><td>84</td></tr><tr><td>All - stem</td><td>4.3M</td><td>84</td><td>6.4B</td><td>79</td></tr><tr><td>All - spell</td><td>4.6M</td><td>90</td><td>6.1B</td><td>75</td></tr><tr><td>All</td><td>4.6M</td><td>90</td><td>5.6B</td><td>69</td></tr><tr><td>All - neg</td><td>4.6M</td><td>90</td><td>5.0B</td><td>62</td></tr></table>
|
| 109 |
+
|
| 110 |
+
Table 1: Details of training corpora
|
| 111 |
+
|
| 112 |
+
<table><tr><td>Dataset</td><td>Genre</td><td>Task</td><td>Total</td></tr><tr><td>IMDB</td><td>reviews</td><td>sentiment</td><td>50,000</td></tr><tr><td>SemEval</td><td>tweets</td><td>sentiment</td><td>14,157</td></tr><tr><td>Airline</td><td>tweets</td><td>sentiment</td><td>11,541</td></tr><tr><td>ISEAR</td><td>narratives</td><td>emotions</td><td>5,477</td></tr><tr><td>Alm</td><td>fairy tales</td><td>emotions</td><td>1,206</td></tr><tr><td>SSEC</td><td>tweets</td><td>emotions</td><td>1,017</td></tr><tr><td>Onion</td><td>headlines</td><td>sarcasm</td><td>28,619</td></tr><tr><td>IAC</td><td>response</td><td>sarcasm</td><td>3,260</td></tr><tr><td>Reddit</td><td>comments</td><td>sarcasm</td><td>1,010,826</td></tr></table>
|
| 113 |
+
|
| 114 |
+
Table 2: Details of evaluation datasets
|
| 115 |
+
|
| 116 |
+
# 4.2 Word Embedding Models
|
| 117 |
+
|
| 118 |
+
We obtain our preprocessed word representations through three models: (i) CBOw (Continuous Bag-of-Words), (ii) Skip-gram: While CBOw takes the context of each word as the input and tries to predict the word corresponding to the context, skip-gram reverses the use of target and context words, where the target word is fed at the input and the output layer of the neural network is replicated multiple times to accommodate the chosen number of context words (Mikolov et al., 2013). We train both the models on both the training corpora using min count of 5 for News and 100 for Wikipedia with window sizes of 5 and 10, respectively, setting dimensionality to 300.
|
| 119 |
+
|
| 120 |
+
(iii) BERT (Bidirectional Encoder Representations from Transformers): BERT is an unsupervised method of pretraining contextualized language representations (Devlin et al., 2018). We train the model using BERT large uncased archi
|
| 121 |
+
|
| 122 |
+
tecture (24-layer, 1024-hidden, 16-heads, 340M parameters) with same setting for parameters as the original paper.
|
| 123 |
+
|
| 124 |
+
We train each of the three models (CBOW, Skipgram and BERT) 8 times using 16 TPUs (64 TPU chips), Tensorflow 1.15, 1TB memory on Google Cloud and two 32 GPUs cluster of V100/RTX 2080 Ti, 1TB memory using Microsoft CNTK parallelization algorithm $^{8}$ on Amazon server. For a large model such as BERT, it takes upto 4-5 days for each run of the training.
|
| 125 |
+
|
| 126 |
+
# 4.3 Evaluation Datasets
|
| 127 |
+
|
| 128 |
+
We conduct our evaluation on three tasks, namely sentiment analysis, emotion classification and sarcasm detection. Table 2 presents the details of our evaluation datasets, and some illustrative examples of text are shown in Table 3.
|
| 129 |
+
|
| 130 |
+
Sentiment Analysis: This popular task involves classifying text as positive or negative, and we use the following three datasets for evaluation: (i) IMDB: This dataset<sup>9</sup> includes 50,000 movie reviews for sentiment analysis, consisting of 25,000 negative and 25,000 positive reviews Maas et al. (2011). (ii) Semeval 2016: This sentiment analysis in Twitter dataset<sup>10</sup> consists of 14,157 tweets where 10,076 of them are positive and 4,081 negative Nakov et al. (2016). (iii) Airlines: This sentiment analysis dataset<sup>11</sup> consists of 11,541 tweets about six U.S. airlines from February 2015, with 9,178 tweets labeled as positive and 2,363 negative.
|
| 131 |
+
|
| 132 |
+
Emotion Classification: A multiclass classification task, this involves classifying text into a number of emotion categories such as happy, sad, and so on. The following datasets are used in our evaluation: (i) SSEC: The Stance Sentiment Emotion Corpus Schuff et al. (2017) is the re-annotation of the SemEval 2016 Twitter stance and sentiment corpus Mohammad et al. (2017) with emotion labels including anger, joy, sadness, fear, surprise. [12]. (ii) ISEAR: This dataset contains narratives of personal experiences evoking emotions Wallbott and Scherer (1986). We use a subset of the data consisting of five categories: sadness, anger, disgust, fear, joy. (iii) Alm: This dataset contains sentences
|
| 133 |
+
|
| 134 |
+
<table><tr><td>Text</td><td>Label</td><td>Dataset</td></tr><tr><td>·I must admit that this is one of the worst movies I’ve ever seen. I thought Dennis Hopper had a little more taste than to appear in this kind of yeeecchh... [truncated]</td><td>negative</td><td>IMDB</td></tr><tr><td>·everything was fine until you lost my bag.</td><td>negative</td><td>Airline</td></tr><tr><td>·At work, when an elderly man complained unjustifiably about me and distrusted me.</td><td>anger</td><td>ISEAR</td></tr><tr><td>·The ladies danced and clapped their hands for joy.</td><td>happy</td><td>Alm</td></tr><tr><td>·if this heat is killing me i don’t wanna know what the poor polar bears are going through</td><td>sadness</td><td>SSEC</td></tr><tr><td>·ford develops new suv that runs purely on gasoline</td><td>sarcastic</td><td>Onion</td></tr><tr><td>·Been saying that ever since the first time I heard about creationism</td><td>not-sarcastic</td><td>IAC</td></tr><tr><td>·Remember, it’s never a girl’s fault, it’s always the man’s fault.</td><td>sarcastic</td><td>Reddit</td></tr></table>
|
| 135 |
+
|
| 136 |
+
Table 3: Examples of text instances in the evaluation datasets
|
| 137 |
+
|
| 138 |
+
from fairy tales marked with one of five emotion categories: angry-disgusted, fearful, happy, sad and surprised Cecilia and Ovesdotter (2008).
|
| 139 |
+
|
| 140 |
+
Sarcasm Detection: Detecting sarcasm from text, a challenging task due to the sophisticated nature of sarcasm, involves labeling text as sarcastic or not. We use the following three datasets: (i) Onion: This news headlines dataset $^{13}$ collected sarcastic versions of current events from The Onion and non-sarcastic news headlines from HuffPost Misra and Arora (2019), resulting in a total 28,619 records. (ii) IAC: A subset of the Internet Argument Corpus Oraby et al. (2016), this dataset contains response utterances annotated for sarcasm. We extract 3260 instances from the general sarcasm type. $^{14}$ . (iii) Reddit: Self-Annotated Reddit Corpus $(\mathrm{SARC})^{15}$ is a collection of Reddit posts where sarcasm is labeled by the author in contrast to other datasets where the data is typically labeled by independent annotators Khodak et al. (2017).
|
| 141 |
+
|
| 142 |
+
# 4.4 Classification Setup
|
| 143 |
+
|
| 144 |
+
For classification, we employ the LSTM model as it works well with sequential data such as text. For binary classification, such as sentiment analysis and sarcasm detection, the loss function used is the binary cross-entropy along with sigmoid activation:
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\xi = - \frac {1}{N} \sum_ {i = 1} ^ {N} y _ {i} l o g (p (y _ {i})) + (1 - y _ {i}) l o g (1 - p (y _ {i}))
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
where $y$ is the binary representation of true label, $p(y)$ is the predicted probability, and $i$ denotes the $i^{\mathrm{th}}$ training sample.
|
| 151 |
+
|
| 152 |
+
For multiclass emotion classification, the loss function used is categorical cross-entropy loss over a batch of $N$ instances and $k$ classes, along with softmax activation:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\xi = - \frac {1}{N} \sum_ {i = 1} ^ {N} \sum_ {j = 1} ^ {k} y _ {i j} \log (p (y _ {i j}))
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $p(y)$ is the predicted probability distribution, $p(y_{ij})\in [0,1]$
|
| 159 |
+
|
| 160 |
+
The optimizer is Adam Kingma and Ba (2014), all loss functions are sample-wise, and we take the mean of all samples (epoch = 5, 10, batch size = 64, 128). All sentiment and sarcasm datasets are split into training/testing using $80\% / 20\%$ , with $10\%$ validation from training. For the smaller and imbalanced emotion datasets, we use stratified 5-fold cross-validation. We use a dropout layer to prevent overfitting by ignoring randomly selected neurons during training. We use early stopping when validation loss stops improving with patience $= 3$ , min-delta $= 0.0001$ . The results are reported in terms of weighted F-score (as some emotion datasets are highly imbalanced), where F-score $= 2\frac{p \cdot r}{p + r}$ , with $p$ denoting precision, and $r$ is recall.
|
| 161 |
+
|
| 162 |
+
# 5 Discussion and Analysis
|
| 163 |
+
|
| 164 |
+
We analyze the impact of preprocessing techniques in word representation learning on affect analysis.
|
| 165 |
+
|
| 166 |
+
# 5.1 Effect of Preprocessing Factors
|
| 167 |
+
|
| 168 |
+
A primary goal of this work is to identify the most effective preprocessing factors for training word embeddings for affective tasks. Table 4 details the results of our experiments comparing the performance of individual preprocessing factors as well as those of ablation studies (i.e., including all the factors but one).
|
| 169 |
+
|
| 170 |
+
Observing the performance of the individual factors on the News corpus, we note that even a single simple preprocessing technique can bring improvements, thereby validating our intuition of incorporating preprocessing into training corpora of word representations. Second, negation (neg) processing appears to be consistently the most
|
| 171 |
+
|
| 172 |
+
<table><tr><td>Models</td><td>Processing</td><td>IMDB</td><td>Semeval</td><td>Airline</td><td>IAC</td><td>Onion</td><td>Reddit</td><td>Alm</td><td>ISEAR</td><td>SSEC</td></tr><tr><td rowspan="14">CBOW</td><td>Basic</td><td>83.99</td><td>55.69</td><td>60.73</td><td>65.74</td><td>68.23</td><td>59.42</td><td>36.81</td><td>55.43</td><td>51.76</td></tr><tr><td>stop</td><td>84.43</td><td>55.72</td><td>61.37</td><td>66.03</td><td>68.17</td><td>59.27</td><td>36.81</td><td>56.01</td><td>52.33</td></tr><tr><td>spell</td><td>86.20</td><td>55.93</td><td>61.96</td><td>66.00</td><td>69.57</td><td>60.00</td><td>36.88</td><td>56.41</td><td>52.14</td></tr><tr><td>stem</td><td>86.92</td><td>55.72</td><td>61.86</td><td>65.89</td><td>68.49</td><td>59.72</td><td>36.94</td><td>55.84</td><td>51.89</td></tr><tr><td>punc</td><td>86.99</td><td>56.41</td><td>62.08</td><td>65.93</td><td>69.85</td><td>60.28</td><td>36.94</td><td>56.89</td><td>52.03</td></tr><tr><td>pos</td><td>85.66</td><td>56.83</td><td>62.75</td><td>66.32</td><td>70.25</td><td>60.63</td><td>37.02</td><td>57.04</td><td>53.19</td></tr><tr><td>neg</td><td>88.98</td><td>57.29</td><td>63.81</td><td>66.87</td><td>71.12</td><td>60.91</td><td>37.22</td><td>57.39</td><td>54.15</td></tr><tr><td>All</td><td>89.96</td><td>57.82</td><td>64.58</td><td>67.23</td><td>70.90</td><td>60.84</td><td>37.43</td><td>57.72</td><td>53.71</td></tr><tr><td>All - neg</td><td>84.67</td><td>55.00</td><td>61.58</td><td>66.02</td><td>69.73</td><td>59.94</td><td>36.91</td><td>55.89</td><td>51.94</td></tr><tr><td>All - pos</td><td>85.69</td><td>56.31</td><td>64.29</td><td>66.97</td><td>70.48</td><td>60.15</td><td>37.19</td><td>56.27</td><td>52.16</td></tr><tr><td>All - punc</td><td>86.41</td><td>56.88</td><td>63.01</td><td>66.75</td><td>70.01</td><td>60.00</td><td>37.01</td><td>57.19</td><td>52.43</td></tr><tr><td>All - spell</td><td>88.23</td><td>56.41</td><td>63.87</td><td>67.23</td><td>70.83</td><td>60.27</td><td>37.22</td><td>57.41</td><td>53.41</td></tr><tr><td>All - stop</td><td>90.01</td><td>60.82</td><td>66.84</td><td>67.20</td><td>72.49</td><td>62.11</td><td>38.96</td><td>59.28</td><td>55.00</td></tr><tr><td>All - stem</td><td>88.12</td><td>60.82</td><td>67.12</td><td>69.25</td><td>72.13</td><td>61.73</td><td>38.00</td><td>59.00</td><td>55.42</td></tr><tr><td rowspan="14">Skip-gram</td><td>Basic</td><td>83.07</td><td>54.23</td><td>61.47</td><td>65.51</td><td>68.01</td><td>59.75</td><td>35.87</td><td>55.64</td><td>51.49</td></tr><tr><td>stop</td><td>83.23</td><td>55.47</td><td>62.00</td><td>65.62</td><td>68.00</td><td>59.84</td><td>35.94</td><td>55.76</td><td>51.62</td></tr><tr><td>spell</td><td>85.90</td><td>55.48</td><td>62.00</td><td>65.61</td><td>69.76</td><td>60.28</td><td>36.10</td><td>55.93</td><td>52.30</td></tr><tr><td>stem</td><td>86.00</td><td>55.33</td><td>61.89</td><td>65.60</td><td>68.72</td><td>59.50</td><td>36.00</td><td>55.69</td><td>51.40</td></tr><tr><td>punc</td><td>86.68</td><td>55.79</td><td>62.38</td><td>65.89</td><td>70.00</td><td>60.44</td><td>36.41</td><td>56.81</td><td>52.71</td></tr><tr><td>pos</td><td>85.91</td><td>56.28</td><td>63.25</td><td>66.24</td><td>69.81</td><td>60.85</td><td>36.44</td><td>56.23</td><td>52.94</td></tr><tr><td>neg</td><td>87.28</td><td>56.89</td><td>63.72</td><td>66.87</td><td>70.59</td><td>61.27</td><td>36.87</td><td>57.34</td><td>53.10</td></tr><tr><td>All</td><td>88.36</td><td>57.04</td><td>64.91</td><td>66.94</td><td>70.73</td><td>61.12</td><td>37.10</td><td>57.92</td><td>53.58</td></tr><tr><td>All - neg</td><td>83.26</td><td>54.00</td><td>61.95</td><td>66.00</td><td>69.88</td><td>60.00</td><td>36.94</td><td>55.97</td><td>51.89</td></tr><tr><td>All - pos</td><td>86.21</td><td>55.22</td><td>65.12</td><td>66.06</td><td>69.88</td><td>61.00</td><td>37.00</td><td>56.42</td><td>52.10</td></tr><tr><td>All - punc</td><td>85.57</td><td>55.99</td><td>64.29</td><td>66.29</td><td>70.00</td><td>60.98</td><td>37.01</td><td>57.02</td><td>52.53</td></tr><tr><td>All - spell</td><td>86.00</td><td>56.98</td><td>65.00</td><td>66.25</td><td>70.25</td><td>0.61</td><td>37.04</td><td>57.69</td><td>52.86</td></tr><tr><td>All - stop</td><td>88.74</td><td>60.93</td><td>67.00</td><td>68.57</td><td>72.20</td><td>62.02</td><td>38.92</td><td>59.18</td><td>55.18</td></tr><tr><td>All - stem</td><td>88.42</td><td>60.67</td><td>67.39</td><td>69.08</td><td>72.00</td><td>62.36</td><td>37.44</td><td>59.48</td><td>55.23</td></tr></table>
|
| 173 |
+
|
| 174 |
+
Table 4: F-score results of evaluating the effect of preprocessing factors using CBOw and Skip-gram on News corpus. The overall best results are in bold. The best result using only any one preprocessing setting is underlined.
|
| 175 |
+
|
| 176 |
+
effective factor across all the 9 datasets, indicating its importance in affective classification, followed by parts-of-speech (pos) processing where we retained words belonging only to one of four classes. On the other hand, removing stopwords (stop), spellchecking (spell) and stemming (stem) yield little improvement and mixed results. Interestingly, applying all the preprocessing factors is barely better or in some cases even worse (Onion, Reddit and SSEC) than applying just negation. Finally, the best performance comes from combining all the preprocessing factors except stemming (All-stem). Moreover, Table 5 details the performance of ablation studies on Wikipedia corpus for all three models where we note that the best performance for the CBOw model comes from combining all the preprocessing factors except stemming (All-stem), whereas for the Skip-gram and BERT models, the best results are obtained by applying all the preprocessing factors except stopwords removal (All-stop). Considering that the Wikipedia corpus is almost 160 times bigger than the News corpus, it is unsurprising that the word embeddings obtained from the former yield considerably better results, consistent across all nine datasets.
|
| 177 |
+
|
| 178 |
+
# 5.2 Evaluating Preprocessing Training Corpora for Word Vectors vs. Preprocessing Classification Data
|
| 179 |
+
|
| 180 |
+
We investigate the difference between applying preprocessing to the training corpora for generating word embeddings (Pre) and applying preprocessing to the classification datasets (Post). As an example, during Pre, we first apply the preprocessing techniques (e.g., all but stemming) to the training corpus (e.g., Wikipedia), then generate word embeddings, then convert a classification dataset (e.g., IMDB) into word embedding representation, and finally classify using LSTM. Conversely, for Post, we first generate word embeddings from a training corpus (e.g., Wikipedia), then apply the preprocessing techniques (e.g., all but stemming) to the classification dataset (e.g., IMDB), which is then converted to word vector representation, and finally classified using LSTM<sup>16</sup>.
|
| 181 |
+
|
| 182 |
+
The results of this experiment are presented in Table 6, where we observe that incorporating preprocessing into the training corpora before generat
|
| 183 |
+
|
| 184 |
+
<table><tr><td>Models</td><td>Processing</td><td>IMDB</td><td>Semeval</td><td>Airline</td><td>IAC</td><td>Onion</td><td>Reddit</td><td>Alm</td><td>ISEAR</td><td>SSEC</td></tr><tr><td rowspan="8">CBOW</td><td>Basic</td><td>84.91</td><td>56.89</td><td>68.11</td><td>69.15</td><td>71.02</td><td>63.58</td><td>45.22</td><td>59.73</td><td>55.84</td></tr><tr><td>All</td><td>88.41</td><td>60.25</td><td>71.39</td><td>71.57</td><td>73.61</td><td>65.27</td><td>48.81</td><td>62.48</td><td>57.42</td></tr><tr><td>All - neg</td><td>83.02</td><td>56.03</td><td>69.28</td><td>69.55</td><td>70.25</td><td>64.18</td><td>46.00</td><td>60.42</td><td>55.93</td></tr><tr><td>All - pos</td><td>85.69</td><td>57.21</td><td>71.00</td><td>70.08</td><td>72.29</td><td>64.82</td><td>47.53</td><td>62.28</td><td>56.25</td></tr><tr><td>All - punc</td><td>84.00</td><td>57.36</td><td>70.46</td><td>70.01</td><td>72.02</td><td>65.00</td><td>47.68</td><td>61.84</td><td>56.64</td></tr><tr><td>All - spell</td><td>86.19</td><td>58.26</td><td>70.98</td><td>70.59</td><td>72.85</td><td>65.00</td><td>47.29</td><td>61.63</td><td>57.00</td></tr><tr><td>All - stop</td><td>91.10</td><td>61.00</td><td>73.00</td><td>72.31</td><td>74.50</td><td>68.20</td><td>52.39</td><td>64.29</td><td>58.46</td></tr><tr><td>All - stem</td><td>88.76</td><td>62.19</td><td>73.25</td><td>72.36</td><td>75.69</td><td>68.53</td><td>50.28</td><td>65.33</td><td>59.28</td></tr><tr><td rowspan="8">Skip-gram</td><td>Basic</td><td>84.00</td><td>55.94</td><td>68.36</td><td>69.20</td><td>71.68</td><td>63.74</td><td>45.01</td><td>59.45</td><td>55.62</td></tr><tr><td>All</td><td>87.00</td><td>59.99</td><td>71.29</td><td>71.25</td><td>73.82</td><td>65.67</td><td>48.51</td><td>65.02</td><td>57.13</td></tr><tr><td>All - neg</td><td>84.97</td><td>56.11</td><td>69.00</td><td>70.17</td><td>70.04</td><td>64.55</td><td>46.28</td><td>60.54</td><td>55.86</td></tr><tr><td>All - pos</td><td>86.21</td><td>57.62</td><td>70.25</td><td>70.85</td><td>73.22</td><td>65.47</td><td>47.49</td><td>63.44</td><td>56.00</td></tr><tr><td>All - punc</td><td>85.00</td><td>57.20</td><td>70.00</td><td>70.77</td><td>72.00</td><td>65.00</td><td>47.10</td><td>61.72</td><td>56.49</td></tr><tr><td>All - spell</td><td>85.75</td><td>58.49</td><td>70.26</td><td>70.89</td><td>72.63</td><td>65.18</td><td>47.14</td><td>61.25</td><td>56.84</td></tr><tr><td>All - stop</td><td>89.76</td><td>61.74</td><td>72.19</td><td>72.00</td><td>75.69</td><td>68.29</td><td>52.01</td><td>64.00</td><td>58.14</td></tr><tr><td>All - stem</td><td>89.66</td><td>60.28</td><td>73.66</td><td>71.98</td><td>75.24</td><td>68.72</td><td>51.39</td><td>63.44</td><td>59.01</td></tr><tr><td rowspan="8">BERT</td><td>Basic</td><td>90.11</td><td>70.82</td><td>90.23</td><td>71.19</td><td>76.30</td><td>59.74</td><td>57.81</td><td>65.70</td><td>65.39</td></tr><tr><td>All</td><td>91.86</td><td>71.76</td><td>91.73</td><td>73.66</td><td>78.72</td><td>62.60</td><td>59.74</td><td>67.80</td><td>67.49</td></tr><tr><td>All - neg</td><td>90.33</td><td>70.52</td><td>91.04</td><td>72.00</td><td>77.07</td><td>61.44</td><td>58.14</td><td>66.59</td><td>66.10</td></tr><tr><td>All - pos</td><td>91.01</td><td>71.20</td><td>91.66</td><td>73.31</td><td>78.45</td><td>62.04</td><td>59.01</td><td>66.25</td><td>68.13</td></tr><tr><td>All - punc</td><td>91.59</td><td>71.50</td><td>91.60</td><td>73.18</td><td>78.54</td><td>62.27</td><td>59.60</td><td>67.25</td><td>67.27</td></tr><tr><td>All - spell</td><td>91.78</td><td>71.13</td><td>91.34</td><td>73.02</td><td>78.40</td><td>62.00</td><td>59.44</td><td>67.21</td><td>67.30</td></tr><tr><td>All - stop</td><td>94.18</td><td>73.81</td><td>94.85</td><td>75.80</td><td>79.10</td><td>65.39</td><td>60.73</td><td>69.33</td><td>69.81</td></tr><tr><td>All - stem</td><td>92.19</td><td>71.94</td><td>92.03</td><td>74.49</td><td>77.93</td><td>63.74</td><td>60.16</td><td>68.00</td><td>67.05</td></tr></table>
|
| 185 |
+
|
| 186 |
+
Table 5: F-score results of evaluating the effect of preprocessing factors using different models on Wikipedia corpus. The overall best results are shown in bold.
|
| 187 |
+
|
| 188 |
+
<table><tr><td>Models</td><td>Processing</td><td>IMDB</td><td>Semeval</td><td>Airline</td><td>IAC</td><td>Onion</td><td>Reddit</td><td>Alm</td><td>ISEAR</td><td>SSEC</td></tr><tr><td rowspan="3">CBOW</td><td>Post</td><td>87.49</td><td>59.33</td><td>71.28</td><td>69.87</td><td>74.20</td><td>67.13</td><td>47.19</td><td>62.00</td><td>56.27</td></tr><tr><td>Pre</td><td>88.76</td><td>62.19</td><td>73.25</td><td>72.36</td><td>75.69</td><td>68.53</td><td>50.28</td><td>65.33</td><td>59.28</td></tr><tr><td>Both</td><td>88.10</td><td>62.41</td><td>73.00</td><td>71.86</td><td>75.00</td><td>70.10</td><td>50.39</td><td>64.52</td><td>58.20</td></tr><tr><td rowspan="3">Skip-gram</td><td>Post</td><td>88.14</td><td>60.41</td><td>71.85</td><td>70.22</td><td>75.07</td><td>67.00</td><td>50.44</td><td>62.08</td><td>56.00</td></tr><tr><td>Pre</td><td>89.76</td><td>61.74</td><td>72.19</td><td>72.00</td><td>75.69</td><td>68.29</td><td>52.01</td><td>64.00</td><td>58.14</td></tr><tr><td>Both</td><td>89.33</td><td>61.25</td><td>73.58</td><td>71.62</td><td>75.48</td><td>68.74</td><td>51.68</td><td>65.29</td><td>58.03</td></tr><tr><td rowspan="3">BERT</td><td>Post</td><td>94.58</td><td>70.25</td><td>92.35</td><td>74.69</td><td>77.10</td><td>63.38</td><td>58.40</td><td>68.20</td><td>67.17</td></tr><tr><td>Pre</td><td>94.18</td><td>73.81</td><td>94.85</td><td>75.80</td><td>79.10</td><td>65.39</td><td>60.73</td><td>69.33</td><td>69.81</td></tr><tr><td>Both</td><td>94.63</td><td>72.41</td><td>93.00</td><td>75.19</td><td>78.69</td><td>65.17</td><td>60.33</td><td>69.06</td><td>68.43</td></tr></table>
|
| 189 |
+
|
| 190 |
+
Table 6: F-score results of evaluating the effect of preprocessing word embeddings training corpus vs. preprocessing evaluation datasets
|
| 191 |
+
|
| 192 |
+
ing word vectors (Pre) outperforms preprocessing classification datasets (Post) across all nine datasets of the three affective tasks. Interestingly though, preprocessing both the bodies of text (Both) appears to be of little benefit, suggesting the importance of preprocessing training corpora used for obtaining word embeddings.
|
| 193 |
+
|
| 194 |
+
# 5.3 Evaluating Proposed Model against State-of-the-art Baselines
|
| 195 |
+
|
| 196 |
+
While not a primary focus of this paper, in this final experiment we compare the performance of our preprocessed word embeddings against those of six state-of-the-art pretrained word embeddings<sup>17</sup>.
|
| 197 |
+
|
| 198 |
+
(i) GloVe: Global vectors for word representations (Pennington et al., 2014) were trained on aggregated global word co-occurrences. We use the vectors trained on GloVe6B 6 billion words $^{18}$ , uncased, from Wikipedia and Gigaword. (ii) SSWE: Sentiment Specific Word Embeddings (unified model) $^{19}$ were trained using a corpus of 10 million tweets to encode sentiment information into the continuous representation of words (Tang et al., 2014). (iii) FastText: These pretrained word vectors $^{20}$ , based on sub-word character n-grams were trained on Wikipedia using fastText (Bojanowski et al., 2017), an extension of the word2vec model.
|
| 199 |
+
|
| 200 |
+
<table><tr><td>Models</td><td>IMDB</td><td>Semeval</td><td>Airline</td><td>IAC</td><td>Onion</td><td>Reddit</td><td>Alm</td><td>ISEAR</td><td>SSEC</td></tr><tr><td>GloVe</td><td>85.64</td><td>70.29</td><td>70.21</td><td>70.19</td><td>71.39</td><td>63.57</td><td>56.21</td><td>65.30</td><td>58.40</td></tr><tr><td>SSWE</td><td>80.45</td><td>69.27</td><td>78.29</td><td>64.85</td><td>52.74</td><td>50.73</td><td>51.00</td><td>54.71</td><td>52.18</td></tr><tr><td>FastText</td><td>75.26</td><td>68.55</td><td>70.69</td><td>55.74</td><td>58.29</td><td>59.37</td><td>52.28</td><td>25.40</td><td>53.20</td></tr><tr><td>DeepMoji</td><td>69.79</td><td>62.10</td><td>71.03</td><td>65.67</td><td>70.90</td><td>53.08</td><td>46.33</td><td>58.20</td><td>58.90</td></tr><tr><td>EWE</td><td>71.28</td><td>60.27</td><td>67.81</td><td>67.43</td><td>70.06</td><td>55.02</td><td>58.33</td><td>66.09</td><td>58.94</td></tr><tr><td colspan="10">Our best results:</td></tr><tr><td>CBOW</td><td>91.10</td><td>62.19</td><td>73.25</td><td>72.36</td><td>75.69</td><td>68.53</td><td>52.39</td><td>65.33</td><td>59.28</td></tr><tr><td>Skip-gram</td><td>89.76</td><td>61.74</td><td>73.66</td><td>72.00</td><td>75.69</td><td>68.72</td><td>52.01</td><td>65.02</td><td>59.01</td></tr><tr><td>BERT</td><td>94.18</td><td>73.81</td><td>94.85</td><td>75.80</td><td>79.10</td><td>65.39</td><td>60.73</td><td>69.33</td><td>69.81</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 7: F-score results of comparing against state-of-the-art word embeddings. The best score is highlighted in bold, and the second best result is underlined.
|
| 203 |
+
|
| 204 |
+
(iv) DeepMoji: These word embeddings $^{21}$ were trained using BiLSTM on 1.2 billion tweets with emojis (Felbo et al., 2017). (v) EWE: Emotion-enriched Word Embeddings $^{22}$ were learned on 200,000 Amazon product reviews corpus using an LSTM model (Agrawal et al., 2018).
|
| 205 |
+
|
| 206 |
+
From the results in Table 7, we notice that BERT is best on eight out of nine datasets except one sarcasm dataset (Reddit), while word2vec CBOW is the second best on four datasets. Overall, our analysis suggests that preprocessing at word embedding stage (Pre) works well for all the three affective tasks.
|
| 207 |
+
|
| 208 |
+
# 5.4 Analyzing the Three Affective Tasks
|
| 209 |
+
|
| 210 |
+
Figure 2 summarizes the results obtained for all three tasks in terms of (a) absolute F-scores and (b) relative improvement (best preprocessing over Basic preprocessing). The IMDB dataset achieves the highest F-score overall, most likely because it consists of movie reviews which are much longer than the text from other genres. As expected, the binary classification task of sentiment analysis and sarcasm detection achieve comparable results, while the multiclass emotion classification typically has much lower F-scores. The most interesting observation, however, is noticed in Fig. 2(b) where the emotion datasets show the highest relative improvement, indicating that multiclass classification tasks may benefit the most from applying preprocessing at word embedding stage (Pre).
|
| 211 |
+
|
| 212 |
+
# 6 Conclusions
|
| 213 |
+
|
| 214 |
+
We systematically examined the role of preprocessing training corpora used to induce word representations for affect analysis. While all preprocessing techniques improved performance to a certain ex
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
Figure 2: Absolute F-scores vs. relative improvement
|
| 218 |
+
|
| 219 |
+
tent, our analysis suggests that the most noticeable increase is obtained through negation processing (neg). The overall best performance is achieved by applying all the preprocessing techniques, except stopwords removal (All-stop). Interestingly, incorporating preprocessing into word representations appears to be far more beneficial than applying it in a downstream task to classification datasets. Moreover, while all the three affective tasks (sentiment analysis, sarcasm detection and emotion classification) benefit from our proposed preprocessing framework, our analysis reveals that the multiclass emotion classification task benefits the most. Exploring the space of subsets of our preprocessing factors might yield more interesting combinations; we leave this for future work.
|
| 220 |
+
|
| 221 |
+
# Acknowledgements
|
| 222 |
+
|
| 223 |
+
We thank the anonymous reviewers for their insightful comments. This work is funded by Natural Sciences and Engineering Research Council of Canada (NSERC) and the Big Data Research, Analytics, and Information Network (BRAIN) Alliance established by the Ontario Research Fund Research Excellence Program (ORF-RE). In particular, we thank Majid Taghdimi from Questrade to provide us with the computing resources and help in the parallelization algorithm. We would also like to thank Dr. Heidar Davoudi for the helpful discussions and insights in this project.
|
| 224 |
+
|
| 225 |
+
# References
|
| 226 |
+
|
| 227 |
+
Ameeta Agrawal and Aijun An. 2012. Unsupervised emotion detection from text using semantic and syntactic relations. In Proceedings of the The 2012 IEEE/WIC/ACM International Joint Conferences on Web Intelligence and Intelligent Agent Technology-Volume 01, pages 346-353. IEEE Computer Society.
|
| 228 |
+
Ameeta Agrawal, Aijun An, and Manos Papagelis. 2018. Learning emotion-enriched word representations. In Proceedings of the 27th International Conference on Computational Linguistics, pages 950-961.
|
| 229 |
+
Giulio Angiani, Laura Ferrari, Tomaso Fontanini, Paolo Fornacciari, Eleonora Iotti, Federico Magliani, and Stefano Manicardi. 2016. A comparison between preprocessing techniques for sentiment analysis in twitter. In Proceedings of the 2nd International Workshop on Knowledge Discovery on the WEB, KDWeb.
|
| 230 |
+
Nastaran Babanejad, Ameeta Agrawal, Heidar Davoudi, Aijun An, and Manos Papagelis. 2019. Leveraging emotion features in news recommendations. In Proceedings of the 7'th International Workshop on News Recommendation and Analytics (INRA'19) in conjunction with RecSys'19, Copenhagen, Denmark, September 16 - 20, 2019.
|
| 231 |
+
Farah Benamara, Baptiste Chardon, Yannick Mathieu, Vladimir Popescu, and Nicholas Asher. 2012. How do negation and modality impact on opinions? In Proceedings of the Workshop on Extra-Propositional Aspects of Meaning in Computational Linguistics, ExProM '12, pages 10-18, Stroudsburg, PA, USA. Association for Computational Linguistics.
|
| 232 |
+
Erik Boiy, Pieter Hens, Koen Deschacht, and Marie-Francine Moens. 2007. Automatic sentiment analysis in on-line text. In Proceedings of the 11th International Conference on Electronic Publishing ELPUB2007.
|
| 233 |
+
Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Association for Computational Linguistics, 5:135-146.
|
| 234 |
+
Jose Camacho-Collados and Mohammad Taher Pilehvar. 2018. On the role of text preprocessing in neural network architectures: An evaluation study on text categorization and sentiment analysis. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP. Association for Computational Linguistics.
|
| 235 |
+
Ebba Cecilia and Alm Ovesdotter. 2008. Affect in text and speech. ProQuest, CiteSeer.
|
| 236 |
+
Billy Chiu, Gamal Crichton, Anna Korhonen, and Sampo Pyysalo. 2016. How to train good word embeddings for biomedical nlp. In Proceedings of the
|
| 237 |
+
|
| 238 |
+
15th workshop on biomedical natural language processing, pages 166-174.
|
| 239 |
+
Toner Danisman and Adil Alpkocak. 2008. Feeler: Emotion classification of text using vector space model. In Proceedings of the AISB 2008 Symposium on Affective Language in Human and Machine, AISB 2008 Convention Communication, Interaction and Social Intelligence, volume 1, page 53.
|
| 240 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 241 |
+
Nemanja Djuric, Jing Zhou, Robin Morris, Mihajlo Grbovic, Vladan Radosavljevic, and Narayan Bhamidipati. 2015. Hate speech detection with comment embeddings. In Proceedings of the 24th International Conference on World Wide Web, WWW '15 Companion, page 29-30, New York, NY, USA. Association for Computing Machinery.
|
| 242 |
+
Manaal Faruqui, Jesse Dodge, Sujay K Jauhar, Chris Dyer, Eduard Hovy, and Noah A Smith. 2014. Retrofitting word vectors to semantic lexicons. arXiv preprint arXiv:1411.4166.
|
| 243 |
+
Bjarke Felbo, Alan Mislove, Anders Søgaard, Iyad Rahwan, and Sune Lehmann. 2017. Using millions of emoji occurrences to learn any-domain representations for detecting sentiment, emotion and sarcasm. In Proceedings of the 2017 International Conference on Empirical Methods in Natural Language Processing (EMNLP).
|
| 244 |
+
Vachagan Gratian and Marina Haid. 2018. Braint at iest 2018: Fine-tuning multiclass perceptron for implicit emotion classification. In Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 243-247.
|
| 245 |
+
Kazuma Hashimoto, Caiming Xiong, Yoshimasa Tsuruoka, and Richard Socher. 2017. A joint many-task model: Growing a neural network for multiple NLP tasks. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1923-1933, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 246 |
+
Daniel Hershcovich, Assaf Toledo, Alon Halfon, and Noam Slonim. 2019. Syntactic interchangeability in word embedding models. arXiv preprint arXiv:1904.00669.
|
| 247 |
+
Zhao Jianqiang and Gui Xiaolin. 2017. Comparison research on text pre-processing methods on twitter sentiment analysis. IEEE Access, 5:2870-2879.
|
| 248 |
+
Mikhail Khodak, Nikunj Saunshi, and Kiran Vodrahalli. 2017. A large self-annotated corpus for sarcasm. arXiv preprint arXiv:1704.05579.
|
| 249 |
+
|
| 250 |
+
Yanghoon Kim, Hwanhee Lee, and Kyomin Jung. 2018. AttnConvnet at SemEval-2018 task 1: Attention-based convolutional neural networks for multi-label emotion classification. In Proceedings of The 12th International Workshop on Semantic Evaluation, pages 141-145, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 251 |
+
Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.
|
| 252 |
+
Omer Levy and Yoav Goldberg. 2014. Dependency-based word embeddings. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, volume 2, pages 302-308.
|
| 253 |
+
Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Improving distributional similarity with lessons learned from word embeddings. Transactions of the Association for Computational Linguistics, 3:211-225.
|
| 254 |
+
Pierre Lison and Andrey Kutuzov. 2017. Redefining context windows for word embedding models: An experimental study. In Proceedings of the 21st Nordic Conference on Computational Linguistics (NoDaLiDa), pages 284-288, Gothenburg, Sweden. Association for Computational Linguistics.
|
| 255 |
+
Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 142-150, Portland, Oregon, USA. Association for Computational Linguistics.
|
| 256 |
+
Oren Melamud, David McClosky, Siddharth Patwardhan, and Mohit Bansal. 2016. The role of context types and dimensionality in learning word embeddings. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1030-1040, San Diego, California. Association for Computational Linguistics.
|
| 257 |
+
Tomas Mikolov, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representations in vector space. CoRR, abs/1301.3781.
|
| 258 |
+
George A. Miller. 1995. Wordnet: A lexical database for english. Association for Computing Machinery, Commun. ACM, 38(11):39-41.
|
| 259 |
+
Rishabh Misra and Prahal Arora. 2019. Sarcasm detection using hybrid neural network. arXiv preprint arXiv:1908.07414.
|
| 260 |
+
Saif M Mohammad, Parinaz Sobhani, and Svetlana Kiritchenko. 2017. Stance and sentiment in tweets. ACM Transactions on Internet Technology (TOIT), 17(3):26.
|
| 261 |
+
|
| 262 |
+
Hala Mulki, Chedi Bechikh Ali, Hatem Haddad, and Ismail Babaoglu. 2018. Tw-star at semeval-2018 task 1: Preprocessing impact on multi-label emotion classification. In Proceedings of The 12th International Workshop on Semantic Evaluation, pages 167-171.
|
| 263 |
+
Preslav Nakov, Alan Ritter, Sara Rosenthal, Veselin Stoyanov, and Fabrizio Sebastiani. 2016. SemEval 2016 task 4: Sentiment analysis in Twitter. In Proceedings of the 10th International Workshop on Semantic Evaluation, SemEval '16, San Diego, California. Association for Computational Linguistics.
|
| 264 |
+
Shereen Oraby, Vrindavan Harrison, Lena Reed, Ernesto Hernandez, Ellen Riloff, and Marilyn Walker. 2016. Creating and characterizing a diverse corpus of sarcasm in dialogue. In Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 31-41, Los Angeles. Association for Computational Linguistics.
|
| 265 |
+
Chaitali G. Patil and Sandip Patil. 2013. Use of porter stemming algorithm andsvm for emotion extraction from news headlines. In International Journal of Electronics, Communication and Soft Computing Science and Engineering.
|
| 266 |
+
Samuel Pecar, Michal Farkas, Marian Simko, Peter Lacko, and Maria Bielikova. 2018. Nl-fiit at iest-2018: Emotion recognition utilizing neural networks and multi-level preprocessing. In Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 217-223.
|
| 267 |
+
Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.
|
| 268 |
+
S Lovelyn Rose, R Venkatesan, Girish Pasupathy, and P Swaradh. 2018. A lexicon-based term weighting scheme for emotion identification of tweets. International Journal of Data Analysis Techniques and Strategies, 10(4):369-380.
|
| 269 |
+
Hassan Saif, Miriam Fernandez, Yulan He, and Harith Alani. 2014. On stopwords, filtering and data sparsity for sentiment analysis of twitter. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 810-817, Reykjavik, Iceland. European Language Resources Association (ELRA).
|
| 270 |
+
Hendrik Schuff, Jeremy Barnes, Julian Mohme, Sebastian Padó, and Roman Klinger. 2017. Annotation, modelling and analysis of fine-grained emotions on a stance and sentiment detection corpus. In Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 13-23.
|
| 271 |
+
Dibyendu Seal, Uttam K Roy, and Rohini Basak. 2020. Sentence-level emotion detection from text based
|
| 272 |
+
|
| 273 |
+
on semantic rules. In Information and Communication Technology for Sustainable Development, pages 423-430. Springer.
|
| 274 |
+
Florian Strohm. 2017. The impact of intensifiers, diminishers and negations on emotion expressions. B.S. thesis, University of Stuttgart.
|
| 275 |
+
Symeon Symeonidis, Dimitrios Effrosynidis, and Avi Arampatzis. 2018. A comparative evaluation of preprocessing techniques and their interactions for twitter sentiment analysis. Expert Systems with Applications, 110:298-310.
|
| 276 |
+
Duyu Tang, Furu Wei, Nan Yang, Ming Zhou, Ting Liu, and Bing Qin. 2014. Learning sentiment-specific word embedding for twitter sentiment classification. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1555-1565, Baltimore, Maryland. Association for Computational Linguistics.
|
| 277 |
+
|
| 278 |
+
Ivan Vulic, Simon Baker, Edoardo Maria Ponti, Ulla Petti, Ira Leviant, Kelly Wing, Olga Majewska, Eden Bar, Matt Malone, Thierry Poibean, Roi Reichart, and Anna Korhonen. 2020. Multi-simlex: A large-scale evaluation of multilingual and cross-lingual lexical semantic similarity.
|
| 279 |
+
Harald G Wallbott and Klaus R Scherer. 1986. How universal and specific is emotional experience? evidence from 27 countries on five continents. Information (International Social Science Council), 25(4):763-795.
|
| 280 |
+
Peng Xu, Andrea Madotto, Chien-Sheng Wu, Ji Ho Park, and Pascale Fung. 2018. Emo2Vec: Learning generalized emotion representation by multitask training. In Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 292-298, Brussels, Belgium. Association for Computational Linguistics.
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4ff1fe8bc1cc56acc12b2441740bd52772c2d1606d82a15acfc5526d7075e88
|
| 3 |
+
size 733379
|
acomprehensiveanalysisofpreprocessingforwordrepresentationlearninginaffectivetasks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f1cd2804e1f495ce337b8bd03c3ce6130499f69eacc72bf197e2dfaaa97cad17
|
| 3 |
+
size 341165
|
acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38029115df555983564f933a252de2542a8cd98a73bb3246f88c6624540ca054
|
| 3 |
+
size 131512
|
acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7cf6682be56063bceb055aa445d7c2279583ff8c89aea098b7edff089893a77
|
| 3 |
+
size 154862
|
acorpusforlargescalephonetictypology/214b2e1f-db8e-438f-b526-0b31ec7cc50b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a95eb2431178b15e2f4dafa23f76f5479e9d95b0aff595a848140825bf0f264c
|
| 3 |
+
size 6115725
|
acorpusforlargescalephonetictypology/full.md
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Corpus for Large-Scale Phonetic Typology
|
| 2 |
+
|
| 3 |
+
Elizabeth Salesky $^{G}$ Eleanor Chodroff $^{Y}$ Tiago Pimentel $^{5}$ Matthew Wiesner $^{G}$ Ryan Cotterell $^{3,0}$ Alan W Black $^{4}$ Jason Eisner $^{G}$
|
| 4 |
+
|
| 5 |
+
<sup>c</sup>Johns Hopkins University <sup>x</sup>University of York
|
| 6 |
+
<sup>3</sup>University of Cambridge <sup>0</sup>ETH Zürich <sup>1</sup>Carnegie Mellon University esalesky@jhu.edu eleanor.chodroff@york.ac.uk
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
A major hurdle in data-driven research on typology is having sufficient data in many languages to draw meaningful conclusions. We present VoxClamantis v1.0, the first large-scale corpus for phonetic typology, with aligned segments and estimated phoneme-level labels in 690 readings spanning 635 languages, along with acoustic-phonetic measures of vowels and sibilants. Access to such data can greatly facilitate investigation of phonetic typology at a large scale and across many languages. However, it is nontrivial and computationally intensive to obtain such alignments for hundreds of languages, many of which have few to no resources presently available. We describe the methodology to create our corpus, discuss caveats with current methods and their impact on the utility of this data, and illustrate possible research directions through a series of case studies on the 48 highest-quality readings. Our corpus and scripts are publicly available for non-commercial use at https://voxclamantisproject.github.io.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Understanding the range and limits of cross-linguistic variation is fundamental to the scientific study of language. In speech and particularly phonetic typology, this involves exploring potentially universal tendencies that shape sound systems and govern phonetic structure. Such investigation requires access to large amounts of cross-linguistic data. Previous cross-linguistic phonetic studies have been limited to a small number of languages with available data (Disner, 1983; Cho and Ldefoged, 1999), or have relied on previously reported measures from many studies (Whalen and Levitt, 1995; Becker-Kristal, 2010; Gordon and Roettger, 2017; Chodroff et al., 2019).
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
Figure 1: The 635 languages of our corpus geo-located with mean Mel Cepstral Distortion (MCD) scores.
|
| 18 |
+
|
| 19 |
+
Existing multilingual speech corpora have similar restrictions, with data too limited for many tasks (Engstrand and Cunningham-Andersson, 1988; Ladefoged and Maddieson, 2007) or approximately 20 to 30 recorded languages (Ardila et al., 2020; Harper, 2011; Schultz, 2002).
|
| 20 |
+
|
| 21 |
+
The recently developed CMU Wilderness corpus (Black, 2019) constitutes an exception to this rule with over 600 languages. This makes it the largest and most typologically diverse speech corpus to date. In addition to its coverage, the CMU Wilderness corpus is unique in two additional aspects: cleanly recorded, read speech exists for all languages in the corpus, and the same content (modulo translation) exists across all languages.
|
| 22 |
+
|
| 23 |
+
However, this massively multilingual speech corpus is challenging to work with directly. Copyright, computational restrictions, and sheer size limit its accessibility. Due to copyright restrictions, the audio cannot be directly downloaded with the sentence and phoneme alignments. A researcher would need to download original audio MP3 and text through links to bible.is, then segment these with speech-to-text sentence alignments distributed in Black (2019). For phonetic research, subsequently identifying examples of specific phonetic segments in the audio is also a near-essential
|
| 24 |
+
|
| 25 |
+
step for extracting relevant acoustic-phonetic measurements. Carrying out this derivative step has allowed us to release a stable-access collection of token-level acoustic-phonetic measures to enable further research.
|
| 26 |
+
|
| 27 |
+
Obtaining such measurements requires several processing steps: estimating pronunciations, aligning them to the text, evaluating alignment quality, and finally, extracting phonetic measures. This work is further complicated by the fact that, for a sizable number of these languages, no linguistic resources currently exist (e.g., language-specific pronunciation lexicons). We adapt speech processing methods based on Black (2019) to accomplish these tasks, though not without noise: in §3.4, we identify three significant caveats when attempting to use our extended corpus for large-scale phonetic studies.
|
| 28 |
+
|
| 29 |
+
We release a comprehensive set of standoff markup of over 400 million labeled segments of continuous speech. For each segment, we provide an estimated phoneme-level label from the X-SAMPA alphabet, the preceding and following labels, and the start position and duration in the audio. Vowels are supplemented with formant measurements, and sibilants with standard measures of spectral shape.
|
| 30 |
+
|
| 31 |
+
We present a series of targeted case studies illustrating the utility of our corpus for large-scale phonetic typology. These studies are motivated by potentially universal principles posited to govern phonetic variation: phonetic dispersion and phonetic uniformity. Our studies both replicate known results in the phonetics literature and also present novel findings. Importantly, these studies investigate current methodology as well as questions of interest to phonetic typology at a large scale.
|
| 32 |
+
|
| 33 |
+
# 2 Original Speech
|
| 34 |
+
|
| 35 |
+
The CMU Wilderness corpus (Black, 2019) consists of recorded readings of the New Testament of the Bible in many languages and dialects. Following the New Testament structure, these data are broken into 27 books, each with a variable number of chapters between 1 and 25. Bible chapters contain standardized verses (approximately sentence-level segments); however, the speech is originally split only by chapter. Each chapter
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 2: The extraction process for the measurements released in VoxClamantis v1.0.
|
| 39 |
+
|
| 40 |
+
has an average of 13 minutes of speech for a total of $\approx 20$ hours of speech and text per language. These recordings are clean, read speech with a sampling rate of $16\mathrm{kHz}$ . In most languages, they are non-dramatic readings with a single speaker; in some, they are dramatic multi-speaker readings with additive music. $^3$ The release from Black (2019) includes several resources for processing the corpus: scripts to download the original source data from bible.is, 'lexicons' created using grapheme-to-phoneme (G2P) conversion, and scripts to apply their generated sentence alignments, which facilitates downstream language processing tasks, including phoneme alignment.
|
| 41 |
+
|
| 42 |
+
# 3 The VoxClamantis v1.0 Corpus
|
| 43 |
+
|
| 44 |
+
Our VoxClamantis v1.0 corpus is derived from 690 audio readings of the New Testament of the Bible<sup>4</sup> in 635 languages.<sup>5</sup> We mark estimated speech seg-
|
| 45 |
+
|
| 46 |
+
ments labeled with phonemic labels, and phonetic measures for the tokens that are vowels or sibilants. The extraction process is diagrammed in Figure 2. In the sections below, we detail our procedures for extracting labeled audio segments and their phonetic measures, in both high- and low-resource languages. We then outline important caveats to keep in mind when using this corpus.
|
| 47 |
+
|
| 48 |
+
# 3.1 Extracting Phoneme Alignments
|
| 49 |
+
|
| 50 |
+
We use a multi-pronged forced alignment strategy to balance broad language coverage (§3.1.1) with utilization of existing high-quality resources (§3.1.2). We assess the quality of our approaches in §3.1.3. We release the stand-off markup for our final alignments as both text files and Praat TextGrids (Boersma and Weenink, 2019).<sup>6</sup>
|
| 51 |
+
|
| 52 |
+
Using scripts and estimated boundaries from Black (2019), we first download and convert the audio MP3s to waveforms, and cut the audio and text into 'sentences' (hereafter called 'utterances' as they are not necessarily sentences). This step creates shorter-length speech samples to facilitate forced alignment; utterance boundaries do not change through our processing.
|
| 53 |
+
|
| 54 |
+
To extract labeled segments, we first require pronunciations for each utterance. A pronunciation is predicted from the text alone using some grapheme-to-phoneme (G2P) method. Each word's predicted pronunciation is a sequence of categorical labels, which are 'phoneme-level' in the sense that they are usually intended to distinguish the words of the language. We then align this predicted sequence of 'phonemes' to the corresponding audio.
|
| 55 |
+
|
| 56 |
+
# 3.1.1 All Languages
|
| 57 |
+
|
| 58 |
+
Most of our languages have neither existing pronunciation lexicons nor G2P resources. To provide coverage for all languages, we generate pronunciations using the simple 'universal' G2P system Unitran (Qian et al., 2010, as extended by Black, 2019), which deterministically expands each grapheme to a fixed sequence of phones in the Extended Speech Assessment Methods Phonetic Alphabet (X-SAMPA) (Wells, 1995/2000). This naive process is error-prone for languages with opaque orthographies, as we show in §3.1.3 below and discuss further in §3.4 (Caveat B). Even so, it provides a starting point for exploring low-resource languages: after some manual inspection, a linguist may be
|
| 59 |
+
|
| 60 |
+
able to correct the labels in a given language by a combination of manual and automatic methods.
|
| 61 |
+
|
| 62 |
+
For each reading, to align the pronunciation strings to the audio, we fit a generative acoustic model designed for this purpose: specifically, eHMM (Prahallad et al., 2006) as implemented in Festvox (Anumanchipalli et al., 2011) to run full Baum-Welch from a flat start for 15 to 30 iterations until the mean mel cepstral distortion score (see §3.1.3) converges. Baum-Welch does not change the predicted phoneme labels, but obtains a language-specific, reading-specific, contextual (triphone) acoustic model for each phoneme type in the language. We then use Viterbi alignment to identify an audio segment for each phoneme token.
|
| 63 |
+
|
| 64 |
+
# 3.1.2 High-Resource Languages
|
| 65 |
+
|
| 66 |
+
A subset of the languages in our corpus are supported by existing pronunciation resources. Two such resources are Epitran (Mortensen et al., 2018), a G2P tool based on language-specific rules, available in both IPA and X-SAMPA, and WikiPron (Lee et al., 2020), a collection of crowd-sourced pronunciations scraped from Wiktionary. These are mapped from IPA to X-SAMPA for label consistency across our corpus. Epitran covers 29 of our languages (39 readings), while WikiPron's 'phonemic' annotations provide partial coverage of 13 additional languages (18 readings). We use Epitran for languages with regular orthographies where it provides high-quality support, and WikiPron for other languages covered by WikiPron annotations. While Unitran and Epitran provide a single pronunciation for a word from the orthography, WikiPron may include multiple pronunciations. In such cases, Viterbi alignment (see below) chooses the pronunciation of each token that best fits the audio.
|
| 67 |
+
|
| 68 |
+
For most languages covered by WikiPron, most of our corpus words are out-of-vocabulary, as they do not yet have user-submitted pronunciations on Wiktionary. We train G2P models on WikiPron annotations to provide pronunciations for these words. Specifically, we use the WFST-based tool Phonetisaurus (Novak et al., 2016). Model hyperparameters are tuned on 3 WikiPron languages from SIGMORPHON 2020 (Gorman et al., 2020) (see Appendix C for details). In general, for languages that are not easily supported by Epitran-style G2P rules, training a G2P model on sufficiently many
|
| 69 |
+
|
| 70 |
+
<table><tr><td>ISO 639-3</td><td>tpi</td><td>ron</td><td>azj</td><td>msa</td><td>ceb</td><td>tur</td><td>tgl</td><td>spa</td><td>ilo</td><td>rus</td><td>hau</td><td>ind</td><td>tgk</td><td>jav</td><td>kaz</td></tr><tr><td># Types</td><td>1398</td><td>9746</td><td>18490</td><td>7612</td><td>8531</td><td>21545</td><td>9124</td><td>11779</td><td>15063</td><td>16523</td><td>4938</td><td>5814</td><td>12502</td><td>10690</td><td>20502</td></tr><tr><td>Unitran PER</td><td>18.4</td><td>21.3</td><td>26.9</td><td>30.1</td><td>30.1</td><td>31.2</td><td>34.4</td><td>34.4</td><td>35.0</td><td>37.4</td><td>37.6</td><td>38.8</td><td>39.8</td><td>49.9</td><td>46.8</td></tr><tr><td># Tokens</td><td>291k</td><td>169k</td><td>125k</td><td>157k</td><td>190k</td><td>125k</td><td>185k</td><td>168k</td><td>169k</td><td>130k</td><td>201k</td><td>170k</td><td>159k</td><td>177k</td><td>142k</td></tr><tr><td>Weighted PER</td><td>20.1</td><td>21.3</td><td>26.1</td><td>31.1</td><td>35.9</td><td>28.5</td><td>40.1</td><td>32.6</td><td>32.7</td><td>36.8</td><td>36.7</td><td>40.5</td><td>38.8</td><td>54.1</td><td>47.7</td></tr><tr><td>ISO 639-3</td><td>swe</td><td>kmr</td><td>som</td><td>tir</td><td>pol</td><td>hae</td><td>vie</td><td>tha</td><td>lao</td><td>ben</td><td>tel</td><td>hin</td><td>mar</td><td>tam</td><td></td></tr><tr><td># Types</td><td>8610</td><td>8127</td><td>14375</td><td>22188</td><td>18681</td><td>15935</td><td>2757</td><td>23338</td><td>31334</td><td>8075</td><td>23477</td><td>7722</td><td>17839</td><td>31642</td><td></td></tr><tr><td>Unitran PER</td><td>46.9</td><td>54.3</td><td>54.6</td><td>57.8</td><td>67.1</td><td>67.3</td><td>73.8</td><td>80.3</td><td>89.1</td><td>90.0</td><td>90.3</td><td>95.7</td><td>97.8</td><td>100.5</td><td></td></tr><tr><td># Tokens</td><td>165k</td><td>176k</td><td>156k</td><td>121k</td><td>141k</td><td>164k</td><td>211k</td><td>26k</td><td>36k</td><td>173k</td><td>124k</td><td>191k</td><td>159k</td><td>139k</td><td></td></tr><tr><td>Weighted PER</td><td>49.5</td><td>53.9</td><td>56.0</td><td>57.4</td><td>66.8</td><td>64.8</td><td>80.6</td><td>80.4</td><td>89.4</td><td>86.2</td><td>88.3</td><td>91.3</td><td>97.8</td><td>102.1</td><td></td></tr></table>
|
| 71 |
+
|
| 72 |
+
Table 1: Phoneme Error Rate (PER) for Unitran treating Epitran as ground-truth. 'Types' and 'Tokens' numbers reflect the number of unique word types and word tokens in each reading. We report PER calculated using word types for calibration with other work, as well as frequency-weighted PER reflecting occurrences in our corpus.
|
| 73 |
+
|
| 74 |
+
high-quality annotations may be more accurate.
|
| 75 |
+
|
| 76 |
+
We align the speech with the high-quality labels using a multilingual ASR model (see Wiesner et al., 2019). The model is trained in Kaldi (Povey et al., 2011) on 300 hours of data from the IARPA BABEL corpora (21 languages), a subset of Wall Street Journal (English), the Hub4 Spanish Broadcast news (Spanish), and a subset of the Voxforge corpus (Russian and French). These languages use a shared X-SAMPA phoneme label set which has high coverage of the labels of our corpus.
|
| 77 |
+
|
| 78 |
+
Our use of a pretrained multilingual model here contrasts with §3.1.1, where we had to train reading-specific acoustic models to deal with the fact that the same Unitran phoneme label may refer to quite different phonemes in different languages (see §3.4). We did not fine-tune our multilingual model to each language, as the cross-lingual ASR performance in previous work (Wiesner et al., 2019) suggests that this model is sufficient for producing phoneme-level alignments.
|
| 79 |
+
|
| 80 |
+
# 3.1.3 Quality Measures
|
| 81 |
+
|
| 82 |
+
Automatically generated phoneme-level labels and alignments inherently have some amount of noise, and this is particularly true for low-resource languages. The noise level is difficult to assess without gold-labeled corpora for either modeling or assessment. However, for the high-resource languages, we can evaluate Unitran against Epitran and WikiPron, pretending that the latter are ground truth. For example, Table 1 shows Unitran's phoneme error rates relative to Epitran. Appendix B gives several more detailed analyses with examples of individual phonemes.
|
| 83 |
+
|
| 84 |
+
Unitran pronunciations may have acceptable phoneme error rates for languages with transparent orthographies and one-to-one grapheme-tophoneme mappings. Alas, without these conditions they prove to be highly inaccurate.
|
| 85 |
+
|
| 86 |
+
That said, evaluating Unitran labels against Epitran or WikiPron may be unfair to Unitran, since some discrepancies are arguably not errors but mere differences in annotation granularity. For example, the 'phonemic' annotations in WikiPron are sometimes surprisingly fine-grained: WikiPron frequently uses /t/ in Cebuano where Unitran only uses /t/, though these refer to the same phoneme. These tokens are scored as incorrect. Moreover, there can be simple systematic errors: Unitran always maps grapheme $< \mathrm{a}>$ to label /a/, but in Tagalog, all such tokens should be /a/. Such errors can often be fixed by remapping the Unitran labels, which in these cases would reduce PER from 30.1 to 6.8 (Cebuano) and from 34.4 to 7.8 (Tagalog). Such rules are not always this straightforward and should be created on a language-specific basis; we encourage rules created for languages outside of current Epitran support to be contributed back to the Epitran project.
|
| 87 |
+
|
| 88 |
+
For those languages where we train a G2P system on WikiPron, we compute the PER of the G2P system on held-out WikiPron entries treated as ground truth. The results (Appendix C) range from excellent to mediocre.
|
| 89 |
+
|
| 90 |
+
We care less about the pronunciations themselves than about the segments that we extract by aligning these pronunciations to the audio. For high-resource languages, we can again compare the segments extracted by Unitran to the higher-quality ones extracted with better pronunciations. For each Unitran token, we evaluate its label and temporal boundaries against the high-quality token that is closest in the audio, as measured by the temporal distance between their midpoints (Appendix B).
|
| 91 |
+
|
| 92 |
+
Finally, the segmentation of speech and text into corresponding utterances is not perfect. We use the utterance alignments generated by Black (2019), in which the text and audio versions of a putative
|
| 93 |
+
|
| 94 |
+
utterance may have only partial overlap. Indeed, Black (2019) sometimes failed to align the Unitran pronunciation to the audio at all, and discarded these utterances. For each remaining utterance, he assessed the match quality using Mel Cepstral Distortion (MCD)—which is commonly used to evaluate synthesized spoken utterances (Kominek et al., 2008)—between the original audio and a resynthesized version of the audio based on the aligned pronunciation. Each segment's audio was resynthesized given the segment's phoneme label and the preceding and following phonemes, in a way that preserves its duration, using CLUSTER-GEN (Black, 2006) with the same reading-specific eHMM model that we used for alignment. We distribute Black's per-utterance MCD scores with our corpus, and show the average score for each language in Appendix E. In some readings, the MCD scores are consistently poor.
|
| 95 |
+
|
| 96 |
+
# 3.2 Phonetic measures
|
| 97 |
+
|
| 98 |
+
Using the phoneme-level alignments described in §3.1, we automatically extract several standard acoustic-phonetic measures of vowels and sibilant fricatives that correlate with aspects of their articulation and abstract representation.
|
| 99 |
+
|
| 100 |
+
# 3.2.1 Vowel measures
|
| 101 |
+
|
| 102 |
+
Standard phonetic measurements of vowels include the formant frequencies and duration information. Formants are concentrations of acoustic energy at frequencies reflecting resonance points in the vocal tract during vowel production (Ladefoged and Johnson, 2014). The lowest two formants, F1 and F2, are considered diagnostic of vowel category identity and approximate tongue body height (F1) and backness (F2) during vowel production (Figure 3). F3 correlates with finer-grained aspects of vowel production such as rhoticity (/r/-coloring), lip rounding, and nasality (House and Stevens, 1956; Lindblom and Sundberg, 1971; Ladefoged et al., 1978), and F4 with high front vowel distinctions and speaker voice quality (Eek and Meister, 1994). Vowel duration can also signal vowel quality, and denotes lexical differences in many languages.
|
| 103 |
+
|
| 104 |
+
We extracted formant and duration information from each vowel using Praat (Boersma and Weenink, 2019). The first four formants (F1-F4) were measured at each quartile and decile of the vowel. Formant estimation was performed with the Burg algorithm in Praat with pre-emphasis from $50\mathrm{Hz}$ , a time window of $25~\mathrm{ms}$ , a time
|
| 105 |
+
|
| 106 |
+

|
| 107 |
+
Figure 3: Vowel Chart
|
| 108 |
+
|
| 109 |
+
step of $6.25\mathrm{ms}$ , a maximum of five formants permitted, and a formant ceiling of $5000\mathrm{Hz}$ which is the recommended value for a male vocal tract (Boersma and Weenink, 2019). Note that the speakers in this corpus are predominantly male.
|
| 110 |
+
|
| 111 |
+
# 3.2.2 Sibilant measures
|
| 112 |
+
|
| 113 |
+
Standard phonetic measurements of sibilant fricatives such as /s/, /z/, /ʃ/, and /ʒ/ include measures of spectral shape, and also segment duration. Measures of spectral shape frequently distinguish sibilant place of articulation: higher concentrations of energy generally reflect more anterior constriction locations (e.g., /s z/ are produced closer to the teeth than /ʃ ʒ/). Segment duration can also signal contrasts in voicing status (Jongman et al., 2000).
|
| 114 |
+
|
| 115 |
+
Our release contains the segment duration, spectral peak, the spectral moments of the frequency distribution (center of gravity: COG, variance, skewness, and kurtosis), as well as two measures of the mid-frequency peak determined by sibilant quality. These are the mid-frequency peak between 3000 and $7000\mathrm{Hz}$ for alveolar sibilants, and between 2000 and $6000\mathrm{Hz}$ for post-alveolar sibilants (Koenig et al., 2013; Shadle et al., 2016). The spectral information was obtained via multitaper spectral analysis (Rahim and Burr, 2017), with a time-bandwidth parameter $(nw)$ of 4 and 8 tapers $(k)$ over the middle $50\%$ of the fricative (Blacklock, 2004). Measurements were made using the methods described in Forrest et al. (1988) for spectral moments and Koenig et al. (2013) for spectral peak varieties.
|
| 116 |
+
|
| 117 |
+
# 3.3 Computation times
|
| 118 |
+
|
| 119 |
+
Generating phoneme-level alignments and extracting subsequent phonetic measures takes significant time, computational resources, and domain knowledge. Our release enables the community to use this data directly without these prerequisites. Table 2 shows that the time to extract our resources,
|
| 120 |
+
|
| 121 |
+
<table><tr><td></td><td colspan="2">Computation Time</td></tr><tr><td>Resource</td><td>Per Language</td><td>Total Time</td></tr><tr><td>Utterance Alignments</td><td>30m</td><td>14d 13h</td></tr><tr><td>Phoneme Alignments</td><td>3d 3h 37m</td><td>6y 12d 16h</td></tr><tr><td>Vowel Measures</td><td>45m</td><td>21d 20h</td></tr><tr><td>Sibilant Measures</td><td>20m</td><td>9d 17h</td></tr><tr><td></td><td>3d 5h 0m</td><td>6y 58d 19h</td></tr></table>
|
| 122 |
+
|
| 123 |
+
Table 2: Computation time to generate the full corpus.
|
| 124 |
+
|
| 125 |
+
once methods have been developed, was more than 6 CPU years, primarily for training eHMM models.
|
| 126 |
+
|
| 127 |
+
# 3.4 General caveats
|
| 128 |
+
|
| 129 |
+
We caution that our labeling and alignment of the corpus contains errors. In particular, it is difficult to responsibly draw firm linguistic conclusions from the Unitran-based segments (§3.1.1). In §5 we suggest future work to address these issues.
|
| 130 |
+
|
| 131 |
+
A Quality of Utterance Pairs: For some utterances, the speech does not correspond completely to the text, due to incorrect cosegmentation. In our phonetic studies, we threshold using reading-level MCD as a heuristic for overall alignment quality, and further threshold remaining readings using utterance-level MCD. We recommend others do so as well.
|
| 132 |
+
|
| 133 |
+
B Phoneme Label Consistency and Accuracy: Phoneme-level labels are predicted from text without the aid of audio using G2P methods. This may lead to systematic errors. In particular, Unitran relies on a 'universal' table that maps grapheme $\langle s \rangle$ (for example) to phoneme /s/ in every context and every language. This is problematic for languages that use $\langle s \rangle$ in some or all contexts to refer to other phonemes such as /ʃ/ or /ʃ/, or use digraphs that contain $\langle s \rangle$ , such as $\langle sh \rangle$ for /ʃ/. Thus, the predicted label /s/ may not consistently refer to the same phoneme within a language, nor to phonetically similar phonemes across languages. Even WikiPron annotations are user-submitted and may not be internally consistent (e.g., some words use /d 3/ or /t/ while others use /d5/ or /t/), nor comparable across languages.
|
| 134 |
+
|
| 135 |
+
'Phoneme' inventories for Unitran and WikiPron have been implicitly chosen by whoever designed the language's orthography or its WikiPron pages; while this may reflect a reasonable folk phonology, it may not correspond to the inventory of underlying or surface phonemes that any linguist would be likely to posit.
|
| 136 |
+
|
| 137 |
+
C Label and Alignment Assessment: While alignment quality for languages with Epitran and WikiPron can be assessed and calibrated beyond this corpus, it cannot for those languages with only Unitran alignments; the error rate on languages without resources to evaluate PER is unknown to us. The Unitran alignments should be treated as a first-pass alignment which may still be useful for a researcher who is willing to perform quality control and correction of the alignments using automatic or manual procedures. Our automatically-generated alignment offers an initial label and placement of the boundaries that would hopefully facilitate downstream analysis.
|
| 138 |
+
|
| 139 |
+
D Corpus Representation: It is difficult to draw conclusions about 'average behavior' across languages. Some language families are better represented in the corpus than others, with more languages, more Bible readings per language, more hours of speech per reading, or more examples of a given phoneme of interest. Additionally, the recordings by language are largely single-speaker (and predominantly male). This means that we can often draw conclusions only about a particular speaker's idiolect, rather than the population of speakers of the language. Metadata giving the exact number of different speakers per recording do not exist.
|
| 140 |
+
|
| 141 |
+
# 4 Phonetic Case Studies
|
| 142 |
+
|
| 143 |
+
We present two case studies to illustrate the utility of our resource for exploration of cross-linguistic typology. Phoneticians have posited several typological principles that may structure phonetic systems. Though previous research has provided some indication as to the direction and magnitude of expected effects, many instances of the principles have not yet been explored at scale. Our case studies investigate how well they account for cross-linguistic variation and systematicity for our phonetic measures from vowels and sibilants. Below we present the data filtering methods for our case studies, followed by an introduction to and evaluation of phonetic dispersion and uniformity.
|
| 144 |
+
|
| 145 |
+
# 4.1 Data filtering
|
| 146 |
+
|
| 147 |
+
For quality, we use only the tokens extracted using high-resource pronunciations (Epitran and WikiPron) and only in languages with mean
|
| 148 |
+
|
| 149 |
+
MCD lower than 8.0. $^{9}$ Furthermore, we only use those utterances with MCD lower than 6.0. The vowel analyses focus on F1 and F2 in ERB taken at the vowel midpoint (Zwicker and Terhardt, 1980; Glasberg and Moore, 1990). $^{10}$ The sibilant analyses focus on mid-frequency peak of /s/ and /z/, also in ERB. Vowel tokens with F1 or F2 measures beyond two standard deviations from the label- and reading-specific mean were excluded, as were tokens for which Praat failed to find a measurable F1 or F2, or whose duration exceeded 300 ms. Sibilant tokens with mid-frequency peak or duration measures beyond two standard deviations from the label- and reading-specific mean were also excluded. When comparing realizations of two labels such as /i/–/u/ or /s/–/z/, we excluded readings that did not contain at least 50 tokens of each label. We show data representation with different filtering methods in Appendix D.
|
| 150 |
+
|
| 151 |
+
After filtering, the vowel analyses included 48 readings covering 38 languages and 11 language families. The distribution of language families was 21 Indo-European, 11 Austronesian, 3 Creole/Pidgin, 3 Turkic, 2 Afro-Asiatic, 2 Tai-Kadai, 2 Uto-Aztecan, 1 Austro-Asiatic, 1 Dravidian, 1 Hmong-Mien, and 1 Uralic. Approximately 8.2 million vowel tokens remained, with a minimum of $\approx 31,000$ vowel tokens per reading. The sibilant analysis included 22 readings covering 18 languages and 6 language families. The distribution of language families was 10 Indo-European, 6 Austronesian, 3 Turkic, 1 Afro-Asiatic, 1 Austro-Asiatic, and 1 Creole/Pidgin. The decrease in total number of readings relative to the vowel analysis primarily reflects the infrequency of /z/ cross-linguistically. Approximately 385,000 /s/ and 83,000 /z/ tokens remained, with a minimum of $\approx 5,200$ tokens per reading.
|
| 152 |
+
|
| 153 |
+
# 4.2 Phonetic dispersion
|
| 154 |
+
|
| 155 |
+
Phonetic dispersion refers to the principle that contrasting speech sounds should be distinct from one another in phonetic space (Martinet, 1955; Jakobson, 1968; Flemming, 1995, 2004). Most studies investigating this principle have focused on its va
|
| 156 |
+
|
| 157 |
+
lidity within vowel systems, as we do here. While languages tend to have seemingly well-dispersed vowel inventories such as $\{/i/, /a/, /u/\}$ (Joos, 1948; Stevens and Keyser, 2010), the actual phonetic realization of each vowel can vary substantially (Lindau and Wood, 1977; Disner, 1983). One prediction of dispersion is that the number of vowel categories in a language should be inversely related to the degree of per-category acoustic variation (Lindblom, 1986). Subsequent findings have cast doubt on this (Livijn, 2000; Recasens and Espinosa, 2009; Vaux and Samuels, 2015), but these studies have been limited by the number and diversity of languages investigated.
|
| 158 |
+
|
| 159 |
+
To investigate this, we measured the correlation between the number of vowel categories in a language and the degree of per-category variation, as measured by the joint entropy of (F1, F2) conditioned on the vowel category. We model $p(\mathrm{F1}, \mathrm{F2} \mid V)$ using a bivariate Gaussian for each vowel type $v$ . We can then compute the joint conditional entropy under this model as $\mathrm{H(F1,F2|V)} = \sum_v p(v) \mathrm{H(F1,F2|V = v)} = \sum_v p(v)\frac{1}{2}\ln \det(2\pi e\Sigma_v)$ , where $\Sigma_v$ is the covariance matrix for the model of vowel $v$ .
|
| 160 |
+
|
| 161 |
+
Vowel inventory sizes per reading ranged from 4 to 20 vowels, with a median of 8. Both Spearman and Pearson correlations between entropy estimate and vowel inventory size across analyzed languages were small and not significant (Spearman $\rho = 0.11$ , $p = 0.44$ ; Pearson $r = 0.11$ , $p = 0.46$ ), corroborating previous accounts of the relationship described in Livijn (2000) and Vaux and Samuels (2015) with a larger number of languages—a larger vowel inventory does not necessarily imply more precision in vowel category production.[11]
|
| 162 |
+
|
| 163 |
+
# 4.3 Phonetic uniformity
|
| 164 |
+
|
| 165 |
+
Previous work suggests that F1 is fairly uniform with respect to phonological height. Within a single language, the mean F1s of /e/ and /o/—which share a height—have been found to be correlated across speakers (Yorkshire English: Watt, 2000; French: Menard et al., 2008; Brazilian Portuguese: Oushiro, 2019; Dutch, English, French, Japanese, Portuguese, Spanish: Schwartz and Menard, 2019). Though it is physically possible for these vowels
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
(a) F1 of /i/-/u/ in ERB
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
(b) Mid-frequency peak of $/s/-/z/$ in ERB
|
| 172 |
+
Figure 4: Correlations of mean F1 (ERB) between /i/ and /u/ and of mean mid-frequency peak (ERB) between /s/ and /z/. The paired segments share a relevant phonological feature specification that is approximated by the acoustic-phonetic measurement: vowel height by F1 and sibilant place by mid-frequency peak. Each reading is represented by an ellipsoid, centered on the paired means and shaped by $\frac{1}{10}$ of their respective standard deviations. The solid line reflects the best-fit linear regression line with standard error in gray shading; the dashed line shows the line of equality. Marginal histograms show the range of variation in the segment-specific means.
|
| 173 |
+
|
| 174 |
+
to differ in F1 realization, the correlations indicate a strong tendency for languages and individual speakers to yoke these two representations together.
|
| 175 |
+
|
| 176 |
+
Systematicity in the realization of sibilant place of articulation has also been observed across speakers of American English and Czech (Chodroff, 2017). Phonetic correlates of sibilant place strongly covary between /s/ and /z/, which share a [+anterior] place of articulation and are produced the alveolar ridge, and between /ʃ/ and /ʒ/, which share a [-anterior] place of articulation and are produced behind the alveolar ridge.
|
| 177 |
+
|
| 178 |
+
A principle of uniformity may account for these above findings. Uniformity here refers to a principle in which a distinctive phonological feature should have a consistent phonetic realization, within a language or speaker, across different segments with that feature (Keating, 2003; Chodroff et al., 2019). Similar principles posited in the literature include Maximal Use of Available Controls, in which a control refers to an integrated perceptual and motor phonetic target (Ménard et al., 2008), as well as a principle of gestural economy (Maddieson, 1995). Phonetic realization refers to the mapping from the abstract distinctive feature to an abstract phonetic target. We approximate this phonetic target via an acoustic-phonetic measurement, but we emphasize that the acoustic measurement is not necessarily a direct reflection of an underlying phonetic target (which could be an articulatory gesture, auditory goal, or perceptuo-motor repre
|
| 179 |
+
|
| 180 |
+
sentation of the sound). We make the simplifying assumption that the acoustic-phonetic formants (F1, F2) directly correspond to phonetic targets linked to the vowel features of height and backness.
|
| 181 |
+
|
| 182 |
+
More precisely, uniformity of a phonetic measure with respect to a phonological feature means that any two segments sharing that feature will tend to have approximately equal measurements in a given language, even when that value varies across languages. We can observe whether this is true by plotting the measures of the two segments against each other by language (e.g., Figure 4).
|
| 183 |
+
|
| 184 |
+
Vowels. As shown in Figure 4 and Table 3, the strongest correlations in mean F1 frequently reflected uniformity of height (e.g., high vowels /i/ -u:/ $r = 0.79$ , $p < 0.001$ , mid vowels /e/ -o:/ $r = 0.62$ , $p < 0.01$ ).<sup>12</sup> Nevertheless, some vowel pairs that differed in height were also moderately correlated in mean F1 (e.g., /o/ -a:/ $r = 0.66$ , $p < 0.001$ ). Correlations of mean F1 were overall moderate in strength, regardless of the vowels' phonological specifications.
|
| 185 |
+
|
| 186 |
+
Correlations of mean F2 were also strongest among vowels with a uniform backness specification (e.g., back vowels /u/–/o:/ $r = 0.69$ , $p < 0.001$ ; front vowels /i/–/ε:/ $r = 0.69$ , $p < 0.05$ ; Table 4). The correlation between front tense vowels /i/ and /e/ was significant and in the ex
|
| 187 |
+
|
| 188 |
+
pected direction, but also slightly weaker than the homologous back vowel pair ( $r = 0.41$ , $p < 0.05$ ). Vowels differing in backness frequently had negative correlations, which could reflect influences of category crowding or language-/speaker-specific differences in peripheralization. We leave further exploration of those relationships to future study.
|
| 189 |
+
|
| 190 |
+
The moderate to strong F1 correlations among vowels with a shared height specification are consistent with expectations based on previous studies, and also with predictions of uniformity. Similarly, we find an expected correlation of F2 means for vowels with a shared height specification. The correlations of vowel pairs that were predicted to have significant correlations, but did not, tended to have small sample sizes (< 14 readings).
|
| 191 |
+
|
| 192 |
+
Nevertheless, the correlations are not perfect; nor are the patterns. For instance, the back vowel correlations of F2 are stronger than the front vowel correlations. While speculative, the apparent peripheralization of /i/ (as revealed in the negative F2 correlations) could have weakened the expected uniformity relation of /i/ with other front vowels. Future research should take into account additional influences of the vowel inventory composition, as well as articulatory or auditory factors for a more complete understanding of the structural forces in the phonetic realization of vowels.
|
| 193 |
+
|
| 194 |
+
Sibilants. The mean mid-frequency peak values for $/s/$ and $/z/$ each varied substantially across readings, and were also strongly correlated with one another $(r = 0.87, p < 0.001$ ; Figure 4). This finding suggests a further influence of uniformity on the realization of place for $/s/$ and $/z/$ , and the magnitude is comparable to previous correlations observed across American English and Czech speakers, in which $r$ was $\approx 0.90$ (Chodroff, 2017).
|
| 195 |
+
|
| 196 |
+
# 5 Directions for Future Work
|
| 197 |
+
|
| 198 |
+
We hope our corpus may serve as a touchstone for further improvements in phonetic typology research and methodology. Here we suggest potential steps forward for known areas (§3.4) where this corpus could be improved:
|
| 199 |
+
|
| 200 |
+
A Sentence alignments were generated using Unitran, and could be improved with higher-quality G2P and verse-level text segmentation to standardize utterances across languages.
|
| 201 |
+
|
| 202 |
+
B Consistent and comparable phoneme labels are the ultimate goal. Concurrent work on universal phone recognition (Li et al., 2020) addresses this issue through a universal phone inventory constrained by language-specific PHOIBLE inventories (Moran and McCloy, 2019). However, free-decoding phones from speech alone is challenging. One exciting possibility is to use the orthography and audio jointly to guide semi-supervised learning of per-language pronunciation lexicons (Lu et al., 2013; Zhang et al., 2017).
|
| 203 |
+
C Reliable quality assessment for current methods remains an outstanding research question for many languages. For covered languages, using a universal label set to map additional high quality lexicons (e.g., hand-annotated lexicons) to the same label space as ours would enable direct label and alignment assessment through precision, recall, and PER.
|
| 204 |
+
D Curating additional resources beyond this corpus would improve coverage and balance, such as contributing additional Epitran modules. Additional readings exist for many languages on the original bible.is site and elsewhere. Annotations with speaker information are not available, but improved unsupervised speaker clustering may also support better analysis.
|
| 205 |
+
|
| 206 |
+
# 6 Conclusion
|
| 207 |
+
|
| 208 |
+
VoxClamantis v1.0 is the first large-scale corpus for phonetic typology, with extracted phonetic features for 635 typologically diverse languages. We present two case studies illustrating both the research potential and limitations of this corpus for investigation of phonetic typology at a large scale. We discuss several caveats for the use of this corpus and areas for substantial improvement. Nonetheless, we hope that directly releasing our alignments and token-level features enables greater research accessibility in this area. We hope this corpus will motivate and enable further developments in both phonetic typology and methodology for working with cross-linguistic speech corpora.
|
| 209 |
+
|
| 210 |
+
# Acknowledgments
|
| 211 |
+
|
| 212 |
+
The authors gratefully acknowledge Colin Wilson for his guidance and discussion on the topic, Florian Metze for resources, and Carlos Aguirre for helpful feedback.
|
| 213 |
+
|
| 214 |
+
# References
|
| 215 |
+
|
| 216 |
+
Gopala Krishna Anumanchipalli, Kishore Prahallad, and Alan W. Black. 2011. Festvox: Tools for creation and analyses of large speech corpora. In Workshop on Very Large Scale Phonetics Research, UPenn, Philadelphia.
|
| 217 |
+
Rosana Ardila, Megan Branson, Kelly Davis, Michael Henretty, Michael Kohler, Josh Meyer, Reuben Morais, Lindsay Saunders, Francis M. Tyers, and Gregor Weber. 2020. Common Voice: A massively-multilingual speech corpus. In Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020).
|
| 218 |
+
Roy Becker-Kristal. 2010. Acoustic typology of vowel inventories and Dispersion Theory: Insights from a large cross-linguistic corpus. Ph.D. thesis, University of California, Los Angeles.
|
| 219 |
+
Yoav Benjamini and Yosef Hochberg. 1995. Controlling the false discovery rate: A practical and powerful approach to multiple testing. Journal of the Royal Statistical Society: Series B (Methodological), 57(1):289-300.
|
| 220 |
+
Alan W. Black. 2006. CLUSTERGEN: A statistical parametric synthesizer using trajectory modeling. In Proceedings of INTERSPEECH.
|
| 221 |
+
Alan W. Black. 2019. CMU Wilderness Multilingual Speech Dataset. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5971-5975, Brighton, UK. IEEE.
|
| 222 |
+
Oliver Blacklock. 2004. Characteristics of Variation in Production of Normal and Disordered Fricatives, Using Reduced-Variance Spectral Methods. Ph.D. thesis, University of Southampton.
|
| 223 |
+
Paul Boersma and David Weenink. 2019. Praat: Doing phonetics by computer [computer program]. version 6.0.45.
|
| 224 |
+
Taehong Cho and Peter Ladefoged. 1999. Variation and universals in VOT: Evidence from 18 languages. Journal of Phonetics, 27(2):207-229.
|
| 225 |
+
Eleanor Chodroff. 2017. Structured Variation in Obstruent Production and Perception. Ph.D. thesis, Johns Hopkins University.
|
| 226 |
+
Eleanor Chodroff, Alessandra Golden, and Colin Wilson. 2019. Covariation of stop voice onset time across languages: Evidence for a universal constraint on phonetic realization. The Journal of the Acoustical Society of America, 145(1):EL109-EL115.
|
| 227 |
+
Sandra Ferrari Disner. 1983. Vowel Quality: The Relation between Universal and Language-specific Factors. Ph.D. thesis, UCLA.
|
| 228 |
+
|
| 229 |
+
Gary F. Simons Eberhard, David M. and Charles D. Fennig, editors. 2020. *Ethnologue: Languages of the world*, 23 edition. SIL international. Online version: http://www.ethnologue.com.
|
| 230 |
+
Arvo Eek and Einar Meister. 1994. Acoustics and perception of Estonian vowel types. *Phonetic Experimental Research*, XVIII:146-158.
|
| 231 |
+
Olle Engstrand and Una Cunningham-Andersson. 1988. Iris - a data base for cross-linguistic phonetic research.
|
| 232 |
+
Edward S. Flemming. 1995. Auditory Representations in Phonology. Ph.D. thesis, UCLA.
|
| 233 |
+
Edward S. Flemming. 2004. Contrast and perceptual distinctiveness. In Bruce Hayes, R. Kirchner, and Donca Steriade, editors, The Phonetic Bases of Phonological Markedness, 1968, pages 232-276. University Press, Cambridge, MA.
|
| 234 |
+
Harvey Fletcher. 1923. Physical measurements of audition and their bearing on the theory of hearing. Journal of the Franklin Institute, 196(3):289-326.
|
| 235 |
+
Karen Forrest, Gary Weisman, Paul Milenkovic, and Ronald N. Dougall. 1988. Statistical analysis of word-initial voiceless obstruents: Preliminary data. The Journal of the Acoustical Society of America, 84(1):115-123.
|
| 236 |
+
Brian R. Glasberg and Brian C.J. Moore. 1990. Derivation of auditory filter shapes from notched-noise data. Hearing Research, 47(1-2):103-138.
|
| 237 |
+
Matthew Gordon and Timo Roettger. 2017. Acoustic correlates of word stress: A cross-linguistic survey. Linguistics Vanguard, 3(1).
|
| 238 |
+
Kyle Gorman, Lucas F.E. Ashby, Aaron Goyzueta, Arya D. McCarthy, Shijie Wu, and Daniel You. 2020. The SIGMORPHON 2020 shared task on multilingual grapheme-to-phoneme conversion. In Proceedings of the SIGMORPHON Workshop.
|
| 239 |
+
Mary Harper. 2011. The IARPA Babel multilingual speech database. Accessed: 2020-05-01.
|
| 240 |
+
Arthur S. House and Kenneth N. Stevens. 1956. Analog studies of the nasalization of vowels. The Journal of Speech and Hearing Disorders, 21(2):218-232.
|
| 241 |
+
Roman Jakobson. 1968. Child Language, Aphasia and Phonological Universals. Mouton Publishers.
|
| 242 |
+
Allard Jongman, Ratree Wayland, and Serena Wong. 2000. Acoustic characteristics of English fricatives. The Journal of the Acoustical Society of America, 108(3):1252-1263.
|
| 243 |
+
Martin Joos. 1948. Acoustic phonetics. Language, 24(2):5-136.
|
| 244 |
+
|
| 245 |
+
Patricia A. Keating. 2003. Phonetic and other influences on voicing contrasts. In Proceedings of the 15th International Congress of Phonetic Sciences, pages 20-23, Barcelona, Spain.
|
| 246 |
+
Laura Koenig, Christine H. Shadle, Jonathan L. Preston, and Christine R. Mooshammer. 2013. Toward improved spectral measures of /s:/ Results from adolescents. Journal of Speech, Language, and Hearing Research, 56(4):1175-1189.
|
| 247 |
+
John Kominek, Tanja Schultz, and Alan W. Black. 2008. Synthesizer voice quality of new languages calibrated with mean mel cepstral distortion. In Spoken Languages Technologies for Under-Resourceful Languages.
|
| 248 |
+
Peter Ladefoged, Richard Harshman, Louis Goldstein, and Lloyd Rice. 1978. Generating vocal tract shapes from formant frequencies. The Journal of the Acoustical Society of America, 64(4):1027-1035.
|
| 249 |
+
Peter Ladefoged and Keith Johnson. 2014. A Course in Phonetics. Nelson Education.
|
| 250 |
+
Peter Ladefoged and Ian Maddieson. 2007. The UCLA phonetics lab archive.
|
| 251 |
+
Jackson L. Lee, Lucas F.E. Ashby, M. Elizabeth Garza, Yeonju Lee-Sikka, Sean Miller, Alan Wong, Arya D. McCarthy, and Kyle Gorman. 2020. Massively multilingual pronunciation mining with WikiPron. In Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020). European Language Resources Association (ELRA). Resources downloadable from https://github.com/kylebgorman.wikipron.
|
| 252 |
+
Xinjian Li, Siddharth Dalmia, Juncheng Li, Matthew Lee, Patrick Littell, Jiali Yao, Antonios Anastasopoulos, David R Mortensen, Graham Neubig, Alan W. Black, et al. 2020. Universal phone recognition with a multilingual allophone system. In Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 8249-8253. IEEE.
|
| 253 |
+
Mona Lindau and Patricia Wood. 1977. Acoustic vowel spaces. UCLA Working Papers in Phonetics, 38:41-48.
|
| 254 |
+
Björn Lindblom. 1986. Phonetic universals in vowel systems. In John J. Ohala and Jeri Jaeger, editors, Experimental Phonology, pages 13-44. Academic Press, Orlando.
|
| 255 |
+
Björn Lindblom and Johan Sundberg. 1971. Acoustical consequences of lip, tongue, jaw, and larynx movement. The Journal of the Acoustical Society of America, 50(4B):1166-1179.
|
| 256 |
+
Peder Livijn. 2000. Acoustic distribution of vowels in differently sized inventories-hot spots or adaptive dispersion. *Phonetic Experimental Research*, Institute of Linguistics, University of Stockholm (PER-ILUS), 11.
|
| 257 |
+
|
| 258 |
+
Liang Lu, Arnab Ghoshal, and Steve Renals. 2013. Acoustic data-driven pronunciation lexicon for large vocabulary speech recognition. In 2013 IEEE Workshop on Automatic Speech Recognition and Understanding, pages 374-379. IEEE.
|
| 259 |
+
Ian Maddieson. 1995. Gestural economy. In Proceedings of the 13th International Congress of Phonetic Sciences, Stockholm, Sweden.
|
| 260 |
+
André Martinet. 1955. Économie Des Changements Phonétiques: Traité de Phonologie Diachronique, volume 10. Bibliotheca Romanica.
|
| 261 |
+
Lucie Ménard, Jean-Luc Schwartz, and Jérôme Aubin. 2008. Invariance and variability in the production of the height feature in French vowels. Speech Communication, 50:14-28.
|
| 262 |
+
Steven Moran and Daniel McCloy, editors. 2019. PHOIBLE 2.0. Max Planck Institute for the Science of Human History, Jena.
|
| 263 |
+
David R. Mortensen, Siddharth Dalmia, and Patrick Littell. 2018. Epitran: Precision G2P for many languages. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Paris, France. European Language Resources Association (ELRA).
|
| 264 |
+
Terrance M. Nearey. 1977. *Phonetic Feature Systems for Vowels*. Ph.D. thesis, University of Alberta. Reprinted 1978 by Indiana University Linguistics Club.
|
| 265 |
+
Josef Robert Novak, Nobuaki Minematsu, and Keikichi Hirose. 2016. Phonetisaurus: Exploring graphemeto-phoneme conversion with joint n-gram models in the WFST framework. Natural Language Engineering, 22(6):907-938.
|
| 266 |
+
Livia Oushiro. 2019. Linguistic uniformity in the speech of Brazilian internal migrants in a dialect contact situation. In Proceedings of the 19th International Congress of Phonetic Sciences, Melbourne, Australia 2019, pages 686-690, Melbourne, Australia. Canberra, Australia: Australasian Speech Science and Technology Association Inc.
|
| 267 |
+
Daniel Povey, Arnab Ghoshal, Gilles Boulianne, Lukas Burget, Ondrej Glembek, Nagendra Goel, Mirko Hannemann, Petr Motlicek, Yanmin Qian, Petr Schwarz, Jan Silovsky, Georg Stemmer, and Karel Vesely. 2011. The Kaldi speech recognition toolkit. In IEEE 2011 Workshop on Automatic Speech Recognition and Understanding. IEEE Signal Processing Society. IEEE Catalog No.: CFP11SRW-USB.
|
| 268 |
+
Kishore Prahallad, Alan W. Black, and Ravishankhar Mosur. 2006. Sub-phonetic modeling for capturing pronunciation variations for conversational speech synthesis. In Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), volume 1. IEEE.
|
| 269 |
+
|
| 270 |
+
Ting Qian, Kristy Hollingshead, Su-youn Yoon, Kyoung-young Kim, and Richard Sproat. 2010. A Python toolkit for universal transliteration. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10), Valletta, Malta. European Language Resources Association (ELRA).
|
| 271 |
+
Karim Rahim and Wesley S. Burr. 2017. multitaper: Multitaper spectral analysis. R package version 1.0-14.
|
| 272 |
+
Daniel Recasens and Aina Espinosa. 2009. Dispersion and variability in Catalan five and six peripheral vowel systems. Speech Communication, 51(3):240-258.
|
| 273 |
+
Tanja Schultz. 2002. GlobalPhone: A multilingual speech and text database developed at Karlsruhe University. In Seventh International Conference on Spoken Language Processing, pages 345-348, Denver, CO.
|
| 274 |
+
Jean-Luc Schwartz and Lucie Ménard. 2019. Structured idiosyncrasies in vowel systems. OSF Preprints.
|
| 275 |
+
Christine H. Shadle, Wei-rong Chen, and D. H. Whalen. 2016. Stability of the main resonance frequency of fricatives despite changes in the first spectral moment. The Journal of the Acoustical Society of America, 140(4):3219-3220.
|
| 276 |
+
Kenneth N. Stevens and Samuel J. Keyser. 2010. Quantal theory, enhancement and overlap. Journal of Phonetics, 38(1):10-19.
|
| 277 |
+
Andreas Stolcke. 2002. SRILM - an extensible language modeling toolkit. In Seventh International Conference on Spoken Language Processing, pages 901-904.
|
| 278 |
+
Bert Vaux and Bridget Samuels. 2015. Explaining vowel systems: Dispersion theory vs natural selection. Linguistic Review, 32(3):573-599.
|
| 279 |
+
Dominic J. L. Watt. 2000. Phonetic parallels between the close-mid vowels of Tyneside English: Are they internally or externally motivated? Language Variation and Change, 12(1):69-101.
|
| 280 |
+
John C. Wells. 1995/2000. Computer-coding the IPA: A proposed extension of SAMPA.
|
| 281 |
+
D.H. Whalen and Andrea G. Levitt. 1995. The universality of intrinsic F0 of vowels. Journal of Phonetics, 23:349-366.
|
| 282 |
+
Matthew Wiesner, Oliver Adams, David Yarowsky, Jan Trmal, and Sanjeev Khudanpur. 2019. Zero-shot pronunciation lexicons for cross-language acoustic model transfer. In Proceedings of IEEE Association for Automatic Speech Recognition and Understanding (ASRU).
|
| 283 |
+
|
| 284 |
+
Xiaohui Zhang, Vimal Manohar, Daniel Povey, and Sanjeev Khudanpur. 2017. Acoustic data-driven lexicon learning based on a greedy pronunciation selection framework. arXiv preprint arXiv:1706.03747.
|
| 285 |
+
Eberhard Zwicker and Ernst Terhardt. 1980. Analytical expressions for critical-band rate and critical bandwidth as a function of frequency. The Journal of the Acoustical Society of America, 68(5):1523-1525.
|
| 286 |
+
|
| 287 |
+
# A Pairwise Correlations between Vowel Formant Measures (§4 Case Studies)
|
| 288 |
+
|
| 289 |
+
Table 3 and Table 4 respectively show Pearson correlations of mean F1 and mean F2 in ERB between vowels that appear in at least 10 readings. As formalized in the present analysis, phonetic uniformity predicts strong correlations of mean F1 among vowels with a shared height specification, and strong correlations of mean F2 among vowels with a shared backness specification. The respective “Height” and “Backness” columns in Table 3 and Table 4 indicate whether the vowels in each pair match in their respective specifications. $p$ -values are corrected for multiple comparisons using the Benjamini-Hochberg correction and a false discovery rate of 0.25 (Benjamini and Hochberg, 1995). Significance is assessed at $\alpha = 0.05$ following the correction for multiple comparisons; rows that appear in gray have correlations that are not significant according to this threshold.
|
| 290 |
+
|
| 291 |
+
<table><tr><td>V1</td><td>V2</td><td>Height</td><td># Readings</td><td>r</td><td>p</td></tr><tr><td>/i/</td><td>/i:/</td><td>✓</td><td>12</td><td>0.81</td><td>0.006</td></tr><tr><td>/e:/</td><td>/o:/</td><td>✓</td><td>10</td><td>0.81</td><td>0.015</td></tr><tr><td>/i/</td><td>/u/</td><td>✓</td><td>40</td><td>0.79</td><td>0.000</td></tr><tr><td>/ε/</td><td>/ɔ/</td><td>✓</td><td>11</td><td>0.68</td><td>0.053</td></tr><tr><td>/o/</td><td>/a/</td><td></td><td>37</td><td>0.66</td><td>0.000</td></tr><tr><td>/i:/</td><td>/o:/</td><td></td><td>11</td><td>0.65</td><td>0.070</td></tr><tr><td>/i:/</td><td>/u:/</td><td>✓</td><td>12</td><td>0.64</td><td>0.061</td></tr><tr><td>/e/</td><td>/o/</td><td>✓</td><td>35</td><td>0.62</td><td>0.001</td></tr><tr><td>/e/</td><td>/u/</td><td></td><td>36</td><td>0.59</td><td>0.001</td></tr><tr><td>/e/</td><td>/a/</td><td></td><td>34</td><td>0.58</td><td>0.002</td></tr><tr><td>/u/</td><td>/ə/</td><td></td><td>12</td><td>0.58</td><td>0.105</td></tr><tr><td>/i:/</td><td>/e:/</td><td></td><td>11</td><td>0.58</td><td>0.118</td></tr><tr><td>/i/</td><td>/e/</td><td></td><td>38</td><td>0.54</td><td>0.002</td></tr><tr><td>/ε/</td><td>/a/</td><td></td><td>12</td><td>0.54</td><td>0.127</td></tr><tr><td>/u/</td><td>/o/</td><td></td><td>38</td><td>0.49</td><td>0.007</td></tr><tr><td>/ε/</td><td>/u/</td><td></td><td>14</td><td>0.49</td><td>0.135</td></tr><tr><td>/i/</td><td>/o/</td><td></td><td>39</td><td>0.46</td><td>0.011</td></tr><tr><td>/e/</td><td>/ε/</td><td>✓</td><td>12</td><td>0.46</td><td>0.204</td></tr><tr><td>/u/</td><td>/a/</td><td></td><td>37</td><td>0.42</td><td>0.027</td></tr><tr><td>/i:/</td><td>/e/</td><td></td><td>11</td><td>0.42</td><td>0.288</td></tr><tr><td>/u/</td><td>/u:/</td><td>✓</td><td>10</td><td>0.41</td><td>0.334</td></tr><tr><td>/i:/</td><td>/u/</td><td>✓</td><td>11</td><td>0.33</td><td>0.430</td></tr><tr><td>/i:/</td><td>/a/</td><td></td><td>11</td><td>0.28</td><td>0.496</td></tr><tr><td>/i/</td><td>/a/</td><td></td><td>39</td><td>0.27</td><td>0.173</td></tr><tr><td>/i/</td><td>/ε/</td><td></td><td>14</td><td>0.24</td><td>0.496</td></tr><tr><td>/i:/</td><td>/o/</td><td></td><td>13</td><td>0.19</td><td>0.624</td></tr><tr><td>/i/</td><td>/ə/</td><td></td><td>13</td><td>0.10</td><td>0.785</td></tr><tr><td>/u/</td><td>/ɔ/</td><td></td><td>12</td><td>0.09</td><td>0.785</td></tr><tr><td>/ε/</td><td>/o/</td><td>✓</td><td>13</td><td>-0.09</td><td>0.785</td></tr><tr><td>/e/</td><td>/ɔ/</td><td>✓</td><td>10</td><td>-0.12</td><td>0.785</td></tr><tr><td>/u:/</td><td>/o/</td><td></td><td>10</td><td>-0.12</td><td>0.785</td></tr><tr><td>/i/</td><td>/ɔ/</td><td></td><td>11</td><td>-0.42</td><td>0.288</td></tr><tr><td>/o/</td><td>/ə/</td><td>✓</td><td>11</td><td>-0.51</td><td>0.173</td></tr><tr><td>/ə/</td><td>/a/</td><td></td><td>11</td><td>-0.90</td><td>0.001</td></tr></table>
|
| 292 |
+
|
| 293 |
+
Table 3: Pearson correlations $(r)$ of mean F1 in ERB between vowel categories.
|
| 294 |
+
|
| 295 |
+
<table><tr><td>V1</td><td>V2</td><td>Backness</td><td># Readings</td><td>r</td><td>p</td></tr><tr><td>/e/</td><td>/ε/</td><td>✓</td><td>12</td><td>0.77</td><td>0.019</td></tr><tr><td>/u/</td><td>/u:/</td><td>✓</td><td>10</td><td>0.77</td><td>0.037</td></tr><tr><td>/i/</td><td>/i:/</td><td>✓</td><td>12</td><td>0.70</td><td>0.038</td></tr><tr><td>/u/</td><td>/o/</td><td>✓</td><td>38</td><td>0.69</td><td>0.000</td></tr><tr><td>/i/</td><td>/ε/</td><td>✓</td><td>14</td><td>0.69</td><td>0.031</td></tr><tr><td>/u:/</td><td>/o/</td><td>✓</td><td>10</td><td>0.62</td><td>0.130</td></tr><tr><td>/u/</td><td>/ə/</td><td></td><td>12</td><td>0.60</td><td>0.107</td></tr><tr><td>/u/</td><td>/ɔ/</td><td>✓</td><td>12</td><td>0.52</td><td>0.168</td></tr><tr><td>/i/</td><td>/e/</td><td>✓</td><td>38</td><td>0.41</td><td>0.038</td></tr><tr><td>/ε/</td><td>/a/</td><td></td><td>12</td><td>0.32</td><td>0.519</td></tr><tr><td>/o/</td><td>/a/</td><td></td><td>37</td><td>0.30</td><td>0.159</td></tr><tr><td>/e:/</td><td>/o:/</td><td></td><td>10</td><td>0.27</td><td>0.666</td></tr><tr><td>/e/</td><td>/a/</td><td></td><td>34</td><td>0.24</td><td>0.339</td></tr><tr><td>/o/</td><td>/ə/</td><td></td><td>11</td><td>0.21</td><td>0.724</td></tr><tr><td>/ə/</td><td>/a/</td><td>✓</td><td>11</td><td>0.16</td><td>0.830</td></tr><tr><td>/i:/</td><td>/e/</td><td>✓</td><td>11</td><td>0.11</td><td>0.911</td></tr><tr><td>/i/</td><td>/a/</td><td></td><td>39</td><td>0.06</td><td>0.911</td></tr><tr><td>/i:/</td><td>/e:/</td><td>✓</td><td>11</td><td>0.06</td><td>0.965</td></tr><tr><td>/e/</td><td>/o/</td><td></td><td>35</td><td>0.01</td><td>0.965</td></tr><tr><td>/u/</td><td>/a/</td><td></td><td>37</td><td>0.00</td><td>0.985</td></tr><tr><td>/ε/</td><td>/ɔ/</td><td></td><td>11</td><td>-0.03</td><td>0.965</td></tr><tr><td>/i:/</td><td>/a/</td><td></td><td>11</td><td>-0.04</td><td>0.965</td></tr><tr><td>/ε/</td><td>/o/</td><td></td><td>13</td><td>-0.04</td><td>0.965</td></tr><tr><td>/e/</td><td>/u/</td><td></td><td>36</td><td>-0.12</td><td>0.666</td></tr><tr><td>/ε/</td><td>/u/</td><td></td><td>14</td><td>-0.22</td><td>0.666</td></tr><tr><td>/i/</td><td>/ə/</td><td></td><td>13</td><td>-0.23</td><td>0.666</td></tr><tr><td>/i:/</td><td>/o:/</td><td></td><td>11</td><td>-0.42</td><td>0.345</td></tr><tr><td>/i/</td><td>/o/</td><td></td><td>39</td><td>-0.48</td><td>0.017</td></tr><tr><td>/i:/</td><td>/o/</td><td></td><td>13</td><td>-0.52</td><td>0.149</td></tr><tr><td>/i/</td><td>/u/</td><td></td><td>40</td><td>-0.55</td><td>0.003</td></tr><tr><td>/i/</td><td>/ɔ/</td><td></td><td>11</td><td>-0.63</td><td>0.107</td></tr><tr><td>/e/</td><td>/ɔ/</td><td></td><td>10</td><td>-0.65</td><td>0.107</td></tr><tr><td>/i:/</td><td>/u/</td><td></td><td>11</td><td>-0.80</td><td>0.019</td></tr><tr><td>/i:/</td><td>/u:/</td><td></td><td>12</td><td>-0.83</td><td>0.009</td></tr></table>
|
| 296 |
+
|
| 297 |
+
Table 4: Pearson correlations $(r)$ of mean F2 in ERB between vowel categories.
|
| 298 |
+
|
| 299 |
+
# B Distributions of Unitran Segment Accuracy (§3.1.3 Quality Measures)
|
| 300 |
+
|
| 301 |
+
Here we evaluate the quality of the Unitran dataset in more detail. The goal is to explore the variation in the quality of the labeled Unitran segments across different languages and phoneme labels. This evaluation includes only readings in high-resource languages, where we have not only the aligned Unitran pronunciations but also aligned high-resource pronunciations (Epitran or WikiPron) against which to evaluate them. The per-token statistics used to calculate these plots are included in the corpus release to enable closer investigation of individual phonemes than is possible here.
|
| 302 |
+
|
| 303 |
+
# B.1 Unitran Pronunciation Accuracy
|
| 304 |
+
|
| 305 |
+
First, in Figures 5 and 6, we consider whether Unitran's utterance pronunciations are accurate without looking at the audio. For each utterance, we compute the unweighted Levenshtein alignment between the Unitran pronunciation of the utterance and the high-resource pronunciation. For each reading, we then score the percentage of Unitran 'phoneme' tokens that were aligned to high-resource 'phoneme' tokens with exactly the same label.[14] We can see in Figure 6 that many labels are highly accurate in many readings while being highly inaccurate in many others. Some labels are noisy in some readings.[15]
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
Figure 5: Unitran pronunciation accuracy per language, evaluated by Levenshtein alignment to WikiPron pronunciations (hatched bars) or Epitran pronunciations (plain bars). Where a language has multiple readings, error bars show the min and max across those readings.
|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
Figure 6: Unitran pronunciation accuracy per language, for selected phonemes. Accuracy is evaluated by Levenshtein alignment as in Figure 5. Each curve is a kernel density plot with integral 1. For the $/z/$ curve, the integral between $80\%$ and $100\%$ (for example) is the estimated probability that in a high-resource language drawn uniformly at random, the fraction of Unitran $/z/$ segments that align to high-resource $/z/$ segments falls in that range. The 'all' curve is the same, but now the uniform draw is from all pairs of (high-resource language, Unitran phoneme used in that language).
|
| 312 |
+
|
| 313 |
+
# B.2 Unitran Segment Label Accuracy
|
| 314 |
+
|
| 315 |
+
In Figures 7 and 8, we ask the same question again, but making use of the audio data. The match for each Unitran segment is now found not by Levenshtein alignment, but more usefully by choosing the high-resource segment with the closest midpoint. For each reading, we again score the percentage of Unitran 'phoneme' tokens whose aligned high-resource 'phoneme' tokens have exactly the same label. Notice that phonemes that typically had high accuracy in Figure 6, such as /p/ and /b/, now have far more variable accuracy in Figure 8, suggesting difficulty in aligning the Unitran pronunciations to the correct parts of the audio.
|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
Figure 7: Unitran pronunciation accuracy per language, as in Figure 5 but with audio midpoint alignment in place of Levenshtein alignment.
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
Figure 8: Unitran pronunciation accuracy per language, for selected phonemes, as in Figure 6 but with audio midpoint alignment in place of Levenshtein alignment.
|
| 322 |
+
|
| 323 |
+
# B.3 Unitran Segment Boundary Accuracy
|
| 324 |
+
|
| 325 |
+
Finally, in Figures 9 and 10, we measure whether Unitran segments with the "correct" label also have the "correct" time boundaries, where "correctness" is evaluated against the corresponding segments obtained using Epitran or WikiPron+G2P.
|
| 326 |
+
|
| 327 |
+

|
| 328 |
+
Figure 9: Mean error per language in the temporal boundaries of Unitran segments.. Each Unitran segment is evaluated against the WikiPron segment (hatched bars) or Epitran segment (plain bars) with the closest midpoint, as if the latter were truth. The error of a segment is the absolute offset of the left boundary plus the absolute offset of the right boundary. Only segments where the Unitran label matches the Epitran/WikiPron label are included in the average. Where a language has multiple readings, error bars show the min and max across those readings.
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
Figure 10: Mean error per language in the temporal boundaries of Unitran segments, for selected phonemes. Each curve is a kernel density plot with integral 1. For the /z/ curve, the integral between 50ms and 100ms (for example) is the estimated probability that in a high-resource language drawn uniformly at random, the Unitran /z/ segments whose corresponding Epitran or WikiPron segments are also labeled with /z/ have mean boundary error in that range. Small bumps toward the right correspond to individual languages where the mean error of /z/ is unusually high. The 'all' curve is the same, but now the uniform draw is from all pairs of (high-resource language, Unitran phoneme used in that language). The boundary error of a segment is evaluated as in Figure 9.
|
| 332 |
+
|
| 333 |
+
# C WikiPron Grapheme-to-Phoneme (G2P) Accuracy (§3.1.3 Quality Measures)
|
| 334 |
+
|
| 335 |
+
For each language where we used WikiPron, Table 5 shows the phoneme error rate (PER) of Phonetisaurus G2P models trained on WikiPron entries, as evaluated on held-out WikiPron entries. This is an estimate of how accurate our G2P-predicted pronunciations are on out-of-vocabulary words, insofar as those are distributed similarly to the in-vocabulary words. (It is possible, however, that out-of-vocabulary words such as Biblical names are systematically easier or harder for the G2P system to pronounce, depending on how they were transliterated.)
|
| 336 |
+
|
| 337 |
+
The same G2P configuration was used for all languages, with the hyperparameter settings shown in Table 6. (seq1_max and seq2_max describe how many tokens in the grapheme and phoneme sequences can align to each other.). These settings were tuned on SIGMORPHON 2020 Task 1 French, Hungarian, and Korean data (Gorman et al., 2020), using 20 random 80/20 splits.
|
| 338 |
+
|
| 339 |
+
<table><tr><td>ISO 639-3</td><td>fin</td><td>lat</td><td>nhx</td><td>srn</td><td>mah</td><td>por-po</td><td>mfe</td><td>mww</td><td>por-bz</td><td>eng</td><td>khm</td><td>mlg</td><td>ori</td><td>ban</td><td>urd</td></tr><tr><td>Train size</td><td>41741</td><td>34181</td><td>126</td><td>157</td><td>813</td><td>9633</td><td>203</td><td>227</td><td>10077</td><td>54300</td><td>3016</td><td>114</td><td>211</td><td>172</td><td>704</td></tr><tr><td>PER</td><td>0.8</td><td>2.4</td><td>4.1</td><td>4.6</td><td>9.6</td><td>10.1</td><td>10.7</td><td>10.8</td><td>11.4</td><td>14.5</td><td>15.5</td><td>15.8</td><td>16.1</td><td>19.5</td><td>26.7</td></tr><tr><td></td><td>±0.02</td><td>±0.04</td><td>±1.02</td><td>±0.76</td><td>±0.41</td><td>±0.11</td><td>±1.2</td><td>±1.29</td><td>±0.16</td><td>±0.06</td><td>±0.38</td><td>±1.44</td><td>±1.13</td><td>±1.35</td><td>±0.60</td></tr></table>
|
| 340 |
+
|
| 341 |
+
Table 5: WikiPron G2P Phone Error Rate (PER) calculated treating WikiPron annotations as ground-truth. We perform 20 trials with random 80/20 splits per language, and report PER averaged across trials with $95\%$ confidence intervals for each language.
|
| 342 |
+
|
| 343 |
+
<table><tr><td>Phonetisaurus Alignment Hyperparameters</td><td>seq1_max 1</td><td>seq2_max 3</td><td>seq1_del True</td><td>seq2_del True</td><td>grow True</td><td>max EM iterations 11</td><td></td></tr><tr><td>Graphone Language Model Hyperparameters</td><td>n-gram order 5</td><td>LM type max-ent</td><td>discounting Kneser-Ney</td><td>gt2min 2</td><td>gt3min 2</td><td>gt4min 3</td><td>gt5min 4</td></tr></table>
|
| 344 |
+
|
| 345 |
+
Table 6: Table of final G2P hyperparameter settings. Alignment parameters not listed here for phonetisaurus-align use the default values. The language model was trained using SRILM (Stolcke, 2002) ngram-count using default values except for those listed above.
|
| 346 |
+
|
| 347 |
+
# D Retention Statistics (§4.1 Data Filtering)
|
| 348 |
+
|
| 349 |
+
Table 7 shows what percentage of tokens would be retained after various methods are applied to filter out questionable tokens from the readings used in §4.1. In particular, the rightmost column shows the filtering that was actually used in §4.1. We compute statistics for each reading separately; in each column we report the minimum, median, mean, and maximum statistics over the readings. The top half of the table considers vowel tokens (for the vowels in Appendix A); the bottom half considers sibilant tokens (/s/ and /z/).
|
| 350 |
+
|
| 351 |
+
On the left side of the table, we consider three filtering techniques for Unitran alignments. Midpoint retains only the segments whose labels are "correct" according to the midpoint-matching methods of Appendix B. MCD retains only those utterances with $\mathrm{MCD} < 6$ . Outlier removes tokens that are outliers according to the criteria described in §4.1. Finally, AGG. is the aggregate retention rate retention rate after all three methods are applied in order.
|
| 352 |
+
|
| 353 |
+
On the right side of the table, we consider the same filtering techniques for the high-resource alignments that we actually use, with the exception of Midpoint, as here we have no higher-quality annotation to match against.
|
| 354 |
+
|
| 355 |
+
<table><tr><td colspan="2"></td><td colspan="5">Unitran Alignments</td><td colspan="4">High-Resource Alignments</td></tr><tr><td colspan="2"></td><td># Tokens</td><td>Midpoint</td><td>MCD</td><td>Outlier</td><td>AGG.</td><td># Tokens</td><td>MCD</td><td>Outlier</td><td>AGG.</td></tr><tr><td rowspan="4">Vowels</td><td>Min</td><td>50,132</td><td>2%</td><td>42%</td><td>83%</td><td>1%</td><td>61,727</td><td>42%</td><td>84%</td><td>37%</td></tr><tr><td>Median</td><td>21,5162</td><td>23%</td><td>88%</td><td>90%</td><td>16%</td><td>232,059</td><td>88%</td><td>90%</td><td>79%</td></tr><tr><td>Mean</td><td>23,9563</td><td>25%</td><td>81%</td><td>89%</td><td>20%</td><td>223,815</td><td>81%</td><td>90%</td><td>73%</td></tr><tr><td>Max</td><td>662,813</td><td>65%</td><td>100%</td><td>93%</td><td>60%</td><td>468,864</td><td>100%</td><td>93%</td><td>93%</td></tr><tr><td></td><td># Readings</td><td>49</td><td>46</td><td>48</td><td>49</td><td>45</td><td>49</td><td>48</td><td>49</td><td>48</td></tr><tr><td rowspan="4">Sibilants</td><td>Min</td><td>7,198</td><td>10%</td><td>42%</td><td>89%</td><td>13%</td><td>7184</td><td>44%</td><td>91%</td><td>43%</td></tr><tr><td>Median</td><td>28,690</td><td>70%</td><td>87%</td><td>97%</td><td>59%</td><td>27569</td><td>87%</td><td>97%</td><td>85%</td></tr><tr><td>Mean</td><td>30,025</td><td>63%</td><td>80%</td><td>95%</td><td>56%</td><td>27083</td><td>81%</td><td>96%</td><td>79%</td></tr><tr><td>Max</td><td>63,573</td><td>89%</td><td>100%</td><td>98%</td><td>79%</td><td>45,290</td><td>100%</td><td>99%</td><td>96%</td></tr><tr><td></td><td># Readings</td><td>36</td><td>26</td><td>35</td><td>36</td><td>19</td><td>25</td><td>22</td><td>25</td><td>22</td></tr></table>
|
| 356 |
+
|
| 357 |
+
Table 7: Summary of quality measure retention statistics for vowels and sibilants over unique readings with reading-level MCD $< 8$ for Unitran and high-resource alignments.
|
| 358 |
+
|
| 359 |
+
# E All VoxClamantis v1.0 Languages
|
| 360 |
+
|
| 361 |
+
All 635 languages from 690 readings are presented here with their language family, ISO 639-3 code, and mean utterance alignment quality in Mel Cepstral Distortion (MCD) from Black (2019). Languages for which we release Epitran and/or WikiPron alignments in addition to Unitran alignments are marked with $e$ and $w$ respectively. MCD ranges from purple (low), blue-green (mid), to yellow (high). Lower MCD typically corresponds to better audio-text utterance alignments and higher quality speech synthesis, but judgments regarding distinctions between languages may be subjective. ISO 639-3 is not intended to provide identifiers for dialects or other sub-language variations, which may be present here where there are multiple readings for one ISO 639-3 code. We report the most up-to-date language names from the ISO 639-3 schema (Eberhard and Fennig, 2020). Language names and codes in many schema could be pejorative and outdated, but where language codes cannot be easily updated, language names can and often are.
|
| 362 |
+
|
| 363 |
+
<table><tr><td colspan="2">NIGER-CONGO: 159</td></tr><tr><td>Abidji abi</td><td>6.3</td></tr><tr><td>Adele ade</td><td>6.9</td></tr><tr><td>Adioukrou adj</td><td>7.4</td></tr><tr><td>Akan aka</td><td>7.8</td></tr><tr><td>Akebu keu</td><td>7.0</td></tr><tr><td>Akoose bss</td><td>7.2</td></tr><tr><td>Anufo cko</td><td>6.9</td></tr><tr><td>Avatime avn</td><td>6.3</td></tr><tr><td>Bafut bfd</td><td>7.3</td></tr><tr><td>Bandial bqj</td><td>7.0</td></tr><tr><td>Bekwarra bkv</td><td>7.3</td></tr><tr><td>Bete-Bendi btt</td><td>9.1</td></tr><tr><td>Biali beh</td><td>7.6</td></tr><tr><td>Bimoba bim</td><td>7.0</td></tr><tr><td>Bokobaru bus</td><td>6.9</td></tr><tr><td>Bomu bmq</td><td>7.0</td></tr><tr><td>Buamu box</td><td>8.1</td></tr><tr><td>Buli (Ghana) bwu</td><td>7.3</td></tr><tr><td>Bum bmv</td><td>6.4</td></tr><tr><td>Cameroon Mambila mcu</td><td>7.6</td></tr><tr><td>Central-Eastern Niger fuq</td><td>7.1</td></tr><tr><td>Cerma cme</td><td>8.5</td></tr><tr><td>Cerma cme</td><td>6.1</td></tr><tr><td>Chopi cce</td><td>6.3</td></tr><tr><td>Chumburung ncu</td><td>7.3</td></tr><tr><td>Delo ntr</td><td>8.0</td></tr><tr><td>Denya anv</td><td>6.7</td></tr><tr><td>Ditammari tbz</td><td>7.7</td></tr><tr><td>Djimini Senoufo dyi</td><td>7.1</td></tr><tr><td>Duruma dug</td><td>6.7</td></tr><tr><td>Eastern Karaboro xrb</td><td>8.1</td></tr><tr><td>Ekajuk eka</td><td>7.5</td></tr><tr><td>Ewe ewe</td><td>6.3</td></tr><tr><td>Ewe ewe</td><td>6.7</td></tr><tr><td>Farefare gur</td><td>8.1</td></tr><tr><td>Farefare gur</td><td>8.3</td></tr><tr><td>Fon fon</td><td>8.7</td></tr><tr><td>Gikyode acd</td><td>7.7</td></tr><tr><td>Giryama nyf</td><td>6.8</td></tr><tr><td>Gitonga toh</td><td>6.8</td></tr><tr><td>Gogo gog</td><td>7.0</td></tr><tr><td>Gokana gkn</td><td>8.0</td></tr><tr><td>Gourmanchégaux</td><td>7.3</td></tr><tr><td>Gwere gwv</td><td>6.1</td></tr><tr><td>Hanga hag</td><td>7.2</td></tr><tr><td>Haya hay</td><td>7.1</td></tr><tr><td>Ifé ife</td><td>7.8</td></tr><tr><td>Ivbie North-Okpela-Ar atg</td><td>7.7</td></tr><tr><td>Izere izr</td><td>6.8</td></tr><tr><td>Jola-Fonyi dyo</td><td>7.1</td></tr><tr><td>Jola-Kasa esk</td><td>7.5</td></tr><tr><td>Jukun Takum jhu</td><td>7.9</td></tr><tr><td>Kabiyè kbp</td><td>7.4</td></tr><tr><td>Kagulu kki</td><td>6.6</td></tr><tr><td>Kako kkj</td><td>7.9</td></tr><tr><td>Kasem xsm</td><td>7.7</td></tr><tr><td>Kasem xsm</td><td>8.0</td></tr><tr><td>Kenyang ken</td><td>7.4</td></tr><tr><td>Kim kia</td><td>6.8</td></tr><tr><td>Kim kia</td><td>6.3</td></tr><tr><td>Koma kmy</td><td>7.3</td></tr><tr><td>Konkombaxon</td><td>7.8</td></tr><tr><td>Kono (Sierra Leone) kno</td><td>8.1</td></tr></table>
|
| 364 |
+
|
| 365 |
+
<table><tr><td>Koonzime ozm</td><td>8.0</td></tr><tr><td>Kouya kyf</td><td>8.2</td></tr><tr><td>Kukele kez</td><td>7.8</td></tr><tr><td>Kunda kdn</td><td>6.4</td></tr><tr><td>Kuo xuo</td><td>6.7</td></tr><tr><td>Kusaal kus</td><td>7.0</td></tr><tr><td>Kutep kub</td><td>6.9</td></tr><tr><td>Kutu kdc</td><td>5.7</td></tr><tr><td>Kuwataay cwt</td><td>7.4</td></tr><tr><td>Kwere cwe</td><td>7.5</td></tr><tr><td>Lama (Togo) las</td><td>7.9</td></tr><tr><td>Lelemi lef</td><td>7.3</td></tr><tr><td>Lobi lob</td><td>7.0</td></tr><tr><td>Lokaa yaz</td><td>6.6</td></tr><tr><td>Lukpa dop</td><td>8.0</td></tr><tr><td>Lyélé lee</td><td>8.1</td></tr><tr><td>Machame jmc</td><td>6.8</td></tr><tr><td>Mada (Nigeria) mda</td><td>6.6</td></tr><tr><td>Makaa mcp</td><td>6.9</td></tr><tr><td>Makhwuva vmw</td><td>6.8</td></tr><tr><td>Malawi Lomwe lon</td><td>5.8</td></tr><tr><td>Malba Birifor bfo</td><td>6.5</td></tr><tr><td>Mamara Senoufo myk</td><td>8.0</td></tr><tr><td>Mampruli maw</td><td>7.6</td></tr><tr><td>Mankanya knf</td><td>6.6</td></tr><tr><td>Masaaba myx</td><td>6.1</td></tr><tr><td>Meta'mgo</td><td>6.4</td></tr><tr><td>Miyobe soy</td><td>7.2</td></tr><tr><td>Moba mtq</td><td>8.1</td></tr><tr><td>Moba mtq</td><td>7.2</td></tr><tr><td>Mochi old</td><td>6.9</td></tr><tr><td>Mossi mos</td><td>7.2</td></tr><tr><td>Mossi mos</td><td>7.5</td></tr><tr><td>Mumuye zmm</td><td>7.7</td></tr><tr><td>Mundani mnf</td><td>6.8</td></tr><tr><td>Mwan moa</td><td>7.8</td></tr><tr><td>Mwani wmv</td><td>6.5</td></tr><tr><td>Mündü muh</td><td>8.4</td></tr><tr><td>Nafaanra nfr</td><td>6.8</td></tr><tr><td>Nande nb</td><td>7.2</td></tr><tr><td>Nateni ntm</td><td>7.4</td></tr><tr><td>Nawdm nzm</td><td>8.3</td></tr><tr><td>Ndogo ndz</td><td>6.9</td></tr><tr><td>Ngangam gng</td><td>8.0</td></tr><tr><td>Nigeria Mambila mzK</td><td>6.9</td></tr><tr><td>Nilamba nim</td><td>6.7</td></tr><tr><td>Ninzo nin</td><td>5.9</td></tr><tr><td>Nkonya nko</td><td>6.8</td></tr><tr><td>Noone nhu</td><td>7.2</td></tr><tr><td>Northern Dagara dgi</td><td>7.3</td></tr><tr><td>Ntcham bud</td><td>8.8</td></tr><tr><td>Nyabwa nwB</td><td>7.7</td></tr><tr><td>Nyakyusa-Ngonde nyy</td><td>6.7</td></tr><tr><td>Nyankole nyn</td><td>8.0</td></tr><tr><td>Nyaturu rim</td><td>6.7</td></tr><tr><td>Nyole nuj</td><td>5.9</td></tr><tr><td>Nyoro nyo</td><td>7.1</td></tr><tr><td>Nzima nzi</td><td>7.2</td></tr><tr><td>Obolo ann</td><td>8.5</td></tr><tr><td>Oku oku</td><td>8.3</td></tr><tr><td>Pasaal sig</td><td>7.5</td></tr><tr><td>Plapo Krumen ktj</td><td>7.0</td></tr><tr><td>Pokomo pkb</td><td>6.5</td></tr><tr><td>Pular fuf</td><td>7.6</td></tr></table>
|
| 366 |
+
|
| 367 |
+
<table><tr><td rowspan="32"></td><td>Rigwe iri</td><td>7.3</td></tr><tr><td>Rundi run</td><td>8.3</td></tr><tr><td>Saamia lsm</td><td>6.8</td></tr><tr><td>Sango sag</td><td>6.7</td></tr><tr><td>Sekpele lip</td><td>6.6</td></tr><tr><td>Selee snw</td><td>6.5</td></tr><tr><td>Sena seh</td><td>6.6</td></tr><tr><td>Shambala ksb</td><td>6.4</td></tr><tr><td>Sissala sld</td><td>7.6</td></tr><tr><td>Siwu akp</td><td>6.3</td></tr><tr><td>Soga xog</td><td>6.9</td></tr><tr><td>South Fali fal</td><td>7.7</td></tr><tr><td>Southern Birifor biv</td><td>7.6</td></tr><tr><td>Southern Bobo Madaré bwc</td><td>7.6</td></tr><tr><td>Southern Dagaare dga</td><td>6.5</td></tr><tr><td>Southern Nuni nww</td><td>7.6</td></tr><tr><td>Southwest Gbaya gso</td><td>7.6</td></tr><tr><td>Supyire Senoufo spp</td><td>8.3</td></tr><tr><td>Talinga-Bwisi tij</td><td>6.5</td></tr><tr><td>Tampulma tpm</td><td>7.1</td></tr><tr><td>Tharaka thk</td><td>7.8</td></tr><tr><td>Tikar tik</td><td>7.8</td></tr><tr><td>Timne tem</td><td>7.2</td></tr><tr><td>Toura (Côte d'Ivoire) neb</td><td>6.8</td></tr><tr><td>Tsonga tso</td><td>5.2</td></tr><tr><td>Tumulung Sisaala sil</td><td>8.0</td></tr><tr><td>Tuwuli bov</td><td>6.2</td></tr><tr><td>Tyap kcg</td><td>7.5</td></tr><tr><td>Vengo bay</td><td>6.7</td></tr><tr><td>Vunjo vun</td><td>6.5</td></tr><tr><td>West-Central Limba lia</td><td>7.3</td></tr><tr><td>Yocoboué Dida gud</td><td>7.0</td></tr><tr><td colspan="3">AUSTRONESIAN: 106</td></tr><tr><td></td><td>Achinese ace</td><td>6.5</td></tr><tr><td></td><td>Agutaynen agn</td><td>6.1</td></tr><tr><td></td><td>Alangan alj</td><td>5.9</td></tr><tr><td></td><td>Alune alp</td><td>6.3</td></tr><tr><td></td><td>Ambai amk</td><td>5.4</td></tr><tr><td></td><td>Amganad Ifugao ifa</td><td>5.9</td></tr><tr><td></td><td>Aralle-Tabulahan atq</td><td>6.7</td></tr><tr><td></td><td>Arop-Lokep apr</td><td>6.2</td></tr><tr><td></td><td>Arosi aia</td><td>5.6</td></tr><tr><td></td><td>Bada (Indonesia) bhz</td><td>5.4</td></tr><tr><td></td><td>Balantak blz</td><td>6.1</td></tr><tr><td></td><td>Balinese ban</td><td>6.4</td></tr><tr><td></td><td>Bambam ptu</td><td>5.8</td></tr><tr><td></td><td>Batad Ifugao ifb</td><td>6.2</td></tr><tr><td></td><td>Batak Dairi btd</td><td>6.1</td></tr><tr><td></td><td>Batak Karo btx</td><td>6.2</td></tr><tr><td></td><td>Batak Simalungun bts</td><td>6.4</td></tr><tr><td></td><td>Besoap beq</td><td>6.4</td></tr><tr><td></td><td>Brooke's Point Palawa plw</td><td>6.2</td></tr><tr><td></td><td>Caribbean Javanese jyn</td><td>6.8</td></tr><tr><td></td><td>Cebuano ceb</td><td>6.9</td></tr><tr><td></td><td>Central Bikol bel</td><td>6.5</td></tr><tr><td></td><td>Central Malay pse</td><td>6.6</td></tr><tr><td></td><td>Central Mnong cmo</td><td>6.0</td></tr><tr><td></td><td>Central Sama sml</td><td>6.7</td></tr><tr><td></td><td>Da'a Kaili kzf</td><td>6.5</td></tr><tr><td></td><td>Duri mvp</td><td>6.9</td></tr><tr><td></td><td>Fataleka far</td><td>6.3</td></tr><tr><td></td><td>Fijian fij</td><td>7.6</td></tr><tr><td></td><td>Fordata frd</td><td>5.3</td></tr><tr><td></td><td>Gilbertese gil</td><td>7.0</td></tr></table>
|
| 368 |
+
|
| 369 |
+
<table><tr><td>Gorontalo gor</td><td>6.2</td></tr><tr><td>Hanunoo hnn</td><td>6.0</td></tr><tr><td>Hiligaynon hil</td><td>6.7</td></tr><tr><td>Iban iba</td><td>6.5</td></tr><tr><td>e Iloko ilo</td><td>6.5</td></tr><tr><td>e Indonesian ind</td><td>7.2</td></tr><tr><td>e Indonesian ind</td><td>6.8</td></tr><tr><td>e Indonesian ind</td><td>6.4</td></tr><tr><td>Itawit itv</td><td>6.6</td></tr><tr><td>e Javanese jav</td><td>6.3</td></tr><tr><td>Kadazan Dusun dtp</td><td>8.5</td></tr><tr><td>Kagayanen cgc</td><td>6.2</td></tr><tr><td>Kalagan kqe</td><td>5.9</td></tr><tr><td>Kankanaey kne</td><td>5.7</td></tr><tr><td>Keley-I Kallahan ify</td><td>6.2</td></tr><tr><td>Khehek tlx</td><td>9.1</td></tr><tr><td>Kilivila kij</td><td>6.2</td></tr><tr><td>Kinaray-A krj</td><td>6.3</td></tr><tr><td>Kisar kje</td><td>6.3</td></tr><tr><td>Koronadal Blaan bpr</td><td>6.4</td></tr><tr><td>Lampung Api ljp</td><td>6.4</td></tr><tr><td>Lauje law</td><td>6.4</td></tr><tr><td>Ledo Kaili lew</td><td>7.0</td></tr><tr><td>Luang lex</td><td>6.1</td></tr><tr><td>Lundayeh lnd</td><td>6.5</td></tr><tr><td>Ma'anyan mhy</td><td>6.4</td></tr><tr><td>Madurese mad</td><td>7.4</td></tr><tr><td>Mag-antsi Aytasgb</td><td>6.4</td></tr><tr><td>Makasar mak</td><td>6.4</td></tr><tr><td>Malagasy mlg</td><td>8.8</td></tr><tr><td>Malagasy mlg</td><td>7.3</td></tr><tr><td>Malagasy mlg</td><td>6.3</td></tr><tr><td>Malay (macrolanguage) msa</td><td>6.3</td></tr><tr><td>e Malay (macrolanguage) msa</td><td>6.0</td></tr><tr><td>Mamasam qmj</td><td>6.3</td></tr><tr><td>Manado Malay xmm</td><td>5.2</td></tr><tr><td>Mapos Buang bzh</td><td>5.8</td></tr><tr><td>Marano mrw</td><td>6.0</td></tr><tr><td>Marshallese mah</td><td>7.9</td></tr><tr><td>Matigsalug Manobo mbt</td><td>6.4</td></tr><tr><td>Mayoyao Ifugao ifu</td><td>6.6</td></tr><tr><td>Mentawai mww</td><td>6.6</td></tr><tr><td>Minangkabau min</td><td>6.3</td></tr><tr><td>Misima-Panaeati mpx</td><td>6.3</td></tr><tr><td>Mongondow mog</td><td>6.7</td></tr><tr><td>Muna mnb</td><td>6.3</td></tr><tr><td>Napun py</td><td>6.7</td></tr><tr><td>Ngaju nij</td><td>7.3</td></tr><tr><td>Nias nia</td><td>6.8</td></tr><tr><td>Obo Manobo obo</td><td>5.8</td></tr><tr><td>Owa stn</td><td>6.3</td></tr><tr><td>Palauan pau</td><td>6.7</td></tr><tr><td>Pamona pmf</td><td>6.3</td></tr><tr><td>Pampanga pam</td><td>6.6</td></tr><tr><td>Pangasinan pag</td><td>6.7</td></tr><tr><td>Paranan prf</td><td>6.2</td></tr><tr><td>Rejiang rej</td><td>6.0</td></tr><tr><td>Roviana rug</td><td>5.7</td></tr><tr><td>Sambal xsb</td><td>6.0</td></tr><tr><td>Sambal xsb</td><td>6.0</td></tr><tr><td>Samoan smo</td><td>6.0</td></tr><tr><td>Sangir sxn</td><td>7.7</td></tr><tr><td>Sarangani Blaan bps</td><td>6.5</td></tr><tr><td>Sasak sas</td><td>6.3</td></tr></table>
|
| 370 |
+
|
| 371 |
+
<table><tr><td>Sudest tgo</td><td>6.1</td><td>Huastec hus</td><td>6.1</td><td>eRomanian ron</td><td>6.8</td><td>Yue Chinese yue</td><td>8.0</td></tr><tr><td>Sundanese sun</td><td>6.9</td><td>Ixil ixl</td><td>5.8</td><td>eRussian rus</td><td>5.6</td><td>Zyphé Chin zyp</td><td>7.2</td></tr><tr><td>eTagalog tgl</td><td>6.5</td><td>Ixil ixl</td><td>6.5</td><td>Sinte Romani rmo</td><td>6.6</td><td>QUECHUAN: 22</td><td></td></tr><tr><td>Tangoa tgp</td><td>7.1</td><td>Ixil ixl</td><td>7.6</td><td>eSpanish spa</td><td>6.2</td><td>Ayacucho Quechua quy</td><td>7.2</td></tr><tr><td>Termanu twu</td><td>6.1</td><td>K'iche' quc</td><td>6.6</td><td>eSpanish spa</td><td>7.9</td><td>Cajamarca Quechua qvc</td><td>7.8</td></tr><tr><td>Tombonus tx</td><td>7.2</td><td>K'iche' quc</td><td>6.6</td><td>eSpanish spa</td><td>7.8</td><td>Cañar Highland Quichu qxr</td><td>5.6</td></tr><tr><td>Toraja-Sa'dan sda</td><td>6.3</td><td>K'iche' quc</td><td>6.4</td><td>eSpanish spa</td><td>7.9</td><td>Cusco Quechua quz</td><td>6.8</td></tr><tr><td>Tuwali Ifugao ifk</td><td>6.7</td><td>K'iche' quc</td><td>6.3</td><td>eSpanish spa</td><td>6.7</td><td>Huallaga Huánuco Queq bub</td><td>7.1</td></tr><tr><td>Uma ppk</td><td>6.7</td><td>K'iche' quc</td><td>6.4</td><td>eSwedish swe</td><td>6.9</td><td>Huanalies-Dos de Mayo qvh</td><td>6.2</td></tr><tr><td>Western Bukidnon Mano mbb</td><td>6.6</td><td>K'iche' quc</td><td>7.1</td><td>eSwedish swe</td><td>6.1</td><td>Huaylas Ancash Quechua qwh</td><td>6.6</td></tr><tr><td>Western Tawbuid twb</td><td>6.0</td><td>Kaqchikel cak</td><td>6.1</td><td>eTajik tkg</td><td>6.8</td><td>Huaylla Wanca Quechua qvw</td><td>6.7</td></tr><tr><td>AFRO-ASIATIC: 45</td><td></td><td>Kaqchikel cak</td><td></td><td>Urdu urd</td><td>6.6</td><td>Inga inb</td><td>6.8</td></tr><tr><td>Bana bcw</td><td>7.2</td><td>Kaqchikel cak</td><td>5.5</td><td>Vlax Romani rmy</td><td>6.8</td><td>Lambayeque Quechua quf</td><td>6.9</td></tr><tr><td>Daasanach dsh</td><td>6.5</td><td>Kaqchikel cak</td><td>6.8</td><td>OTO-MANGUEAN: 27</td><td></td><td>Margos-Yarowilca-Laur qvm</td><td>6.1</td></tr><tr><td>Daba dbq</td><td></td><td>Kaqchikel cak</td><td>7.0</td><td>Atatláhuca Mixtec mib</td><td>6.2</td><td>Napo Lowland Quechua qvo</td><td>6.4</td></tr><tr><td>Dangaléat daa</td><td>7.0</td><td>Kaqchikel cak</td><td>7.9</td><td>Ayutla Mixtec mily</td><td>6.1</td><td>North Bolivian Quechua qul</td><td>6.7</td></tr><tr><td>Dawro dwr</td><td>8.3</td><td>Kekchi kek</td><td>6.5</td><td>Central Mazahuá maz</td><td>7.0</td><td>North Junin Quechua qvn</td><td>7.3</td></tr><tr><td>Eastern Oromo hae</td><td>6.5</td><td>Kekchi kek</td><td>6.3</td><td>Chihuaxtla Triqui trs</td><td>6.0</td><td>Northern Conchucos An qxn</td><td>5.9</td></tr><tr><td>Egyptian Arabic arz</td><td>7.4</td><td>Mam mam</td><td>6.3</td><td>Dixui-Tilantongo Mix xtd</td><td>6.5</td><td>Northern Pastaza Quic qyz</td><td>6.1</td></tr><tr><td>Gamo gmv</td><td>7.2</td><td>Mam mam</td><td>6.7</td><td>Jalapa De Diaz Mazate maj</td><td>8.3</td><td>Panao Huánco Quechua qxh</td><td>8.2</td></tr><tr><td>Gen gej</td><td>7.3</td><td>Mam mam</td><td>7.3</td><td>Jamiltepec Mixtec mxt</td><td>7.4</td><td>San Martin Quechua qvs</td><td>6.8</td></tr><tr><td>Gofa gof</td><td>6.5</td><td>Mam mam</td><td>7.1</td><td>Lalana Chinantec cnl</td><td>7.4</td><td>South Bolivian Quechua quh</td><td>6.5</td></tr><tr><td>Gofa gof</td><td>8.2</td><td>Mopán Maya mop</td><td>7.0</td><td>Lealao Chinantec cle</td><td>6.6</td><td>South Bolivian Quechua quh</td><td>7.0</td></tr><tr><td>Gude gde</td><td>7.3</td><td>Popti' jac</td><td>7.1</td><td>Magdalena Peñasco Mix xtm</td><td>5.6</td><td>Southern Pastaza Quec qup</td><td>6.1</td></tr><tr><td>Hamer-Banna amf</td><td>6.5</td><td>Popti' jac</td><td>6.3</td><td>Mezquital Otomi ote</td><td>6.8</td><td>Tena Lowland Quichua quw</td><td>6.2</td></tr><tr><td>Hausa hau</td><td>5.7</td><td>Poqomchi' poh</td><td>6.5</td><td>Nopal Chaatin cya</td><td>8.8</td><td>EASTERN SUDANIC: 19</td><td></td></tr><tr><td>Hdi xed</td><td>7.5</td><td>Poqomchi' poh</td><td>5.3</td><td>Ozumacin Chinantech chz</td><td>7.7</td><td>Acoli ach</td><td>6.8</td></tr><tr><td>Iraqw irk</td><td>8.4</td><td>Q'anjob'al kjb</td><td>6.8</td><td>Peñoles Mixtec mil</td><td>6.7</td><td>Adhola adh</td><td>6.5</td></tr><tr><td>Kabyle kab</td><td>7.4</td><td>Tektititeko ttc</td><td>6.0</td><td>Pinotepa Nacional Mix mio</td><td>6.0</td><td>Alur alz</td><td>7.3</td></tr><tr><td>Kafa kbr</td><td>7.3</td><td>Tz'utujil tzj</td><td>6.8</td><td>San Jerónimo Tecóatl maa</td><td>7.7</td><td>Bari bfa</td><td>5.2</td></tr><tr><td>Kambaata ktb</td><td>6.9</td><td>Tzeltal tzh</td><td>6.0</td><td>San Jerónimo Tecóatl maa</td><td>7.9</td><td>Datooga tcc</td><td>6.9</td></tr><tr><td>Kamwe hig</td><td>7.8</td><td>Tzeltal tzh</td><td>6.5</td><td>San Juan Atzingo Popo poe</td><td>6.5</td><td>Kakwa keo</td><td>6.7</td></tr><tr><td>Kera ker</td><td>7.3</td><td>Tzotzil tzo</td><td>6.2</td><td>San Marcos Tlacoyalco pls</td><td>5.9</td><td>Karamojong kdj</td><td>6.5</td></tr><tr><td>Kimré kqp</td><td>6.7</td><td>Tzotzil tzo</td><td>7.1</td><td>San Pedro Amuzgos Amu agz</td><td>7.2</td><td>Kumam kdi</td><td>6.2</td></tr><tr><td>Konso kxc</td><td>6.6</td><td>Western Kanjobal knj</td><td>6.8</td><td>Santa Maria Zacatepec mza</td><td>6.3</td><td>Kupsabiny Kpz</td><td>6.7</td></tr><tr><td>Koorete kcy</td><td>7.2</td><td>Yucateco yua</td><td>7.0</td><td>Sochiapam Chinantec cso</td><td>6.1</td><td>Lango (Uganda) laj</td><td>7.8</td></tr><tr><td>Lele (Chad) ln</td><td>6.8</td><td>INDO-EUROPEAN: 40</td><td></td><td>Southern Puebla Mixte mit</td><td>6.5</td><td>Luwo lwo</td><td>8.4</td></tr><tr><td>Male (Ethiopia) mdy</td><td>6.6</td><td>Albanian sqi</td><td>7.0</td><td>Tepetotutla Chinantec cnt</td><td>7.3</td><td>Mabaan mfz</td><td>6.7</td></tr><tr><td>Marba mpg</td><td>7.7</td><td>Awadi awa</td><td>7.4</td><td>Tezoatlán Mixtec mxb</td><td>6.0</td><td>Markweeta enb</td><td>7.3</td></tr><tr><td>Mbuk mq</td><td>7.9</td><td>Bengali ben</td><td>8.1</td><td>Usila Chinantec cue</td><td>6.7</td><td>Murle mur</td><td>7.8</td></tr><tr><td>Merey meq</td><td>8.1</td><td>Bengali ben</td><td>8.1</td><td>Yosondúa Mixtec mpm</td><td>6.7</td><td>Nuer nus</td><td>6.9</td></tr><tr><td>Mesopotamian Arabic acm</td><td>8.3</td><td>Bengali ben</td><td>8.1</td><td>SINO-TIBETAN: 24</td><td></td><td>Sabaot spi</td><td>8.1</td></tr><tr><td>Mofu-Gudur mif</td><td>8.0</td><td>Caribbean Hindustani hns</td><td>7.0</td><td>Achang acn</td><td>6.1</td><td>Shilluk shk</td><td>6.9</td></tr><tr><td>Muyang muy</td><td>6.6</td><td>Chhattisgarhi hne</td><td>6.6</td><td>Akeu aeu</td><td>6.9</td><td>Southwestern Dinka dif</td><td>7.5</td></tr><tr><td>Mwaghavul sur</td><td>7.1</td><td>Dari prs</td><td>6.9</td><td>Akha ahk</td><td>7.0</td><td>Teso teo</td><td>7.1</td></tr><tr><td>North Mofu mfk</td><td>7.0</td><td>English eng</td><td>6.9</td><td>Bawn Chin bgr</td><td>6.8</td><td>TURKIC: 18</td><td></td></tr><tr><td>Parkwa pbi</td><td>6.9</td><td>Fiji Hindi hif</td><td>6.9</td><td>Eastern Tamang taj</td><td>6.1</td><td>Bashkir bak</td><td>6.0</td></tr><tr><td>Péví lme</td><td>7.7</td><td>French fra</td><td>8.2</td><td>Falam Chin cfm</td><td>6.7</td><td>Chuvash chv</td><td>7.3</td></tr><tr><td>Sebat Bet Gurage sgw</td><td>6.6</td><td>French fra</td><td>8.5</td><td>Hakka Chinese hak</td><td>6.3</td><td>Crimean Tatar crh</td><td>5.4</td></tr><tr><td>eSomali som</td><td>8.3</td><td>Hindi hin</td><td>6.5</td><td>Kachin kac</td><td>6.3</td><td>Gagauz gag</td><td>5.3</td></tr><tr><td>Standard Arabic arb</td><td>7.9</td><td>Iranian Persian pes</td><td>7.3</td><td>Khumi Chin cnc</td><td>6.2</td><td>Gagauz gag</td><td>5.6</td></tr><tr><td>Sudanese Arabic apd</td><td>8.0</td><td>Latin lat</td><td>5.8</td><td>Kulung (Nepal) kle</td><td>6.0</td><td>Kara-Kalpak kaa</td><td>6.7</td></tr><tr><td>Tachelhit shi</td><td>5.0</td><td>Magahi mag</td><td>6.4</td><td>Lahu lhu</td><td>6.7</td><td>Karachay-Balkar krc</td><td>6.9</td></tr><tr><td>Tamasheq taq</td><td>7.1</td><td>Maithili mai</td><td>7.2</td><td>Lashi lsi</td><td>7.6</td><td>Kazakh kaz</td><td>6.8</td></tr><tr><td>eTigrinya tir</td><td>6.6</td><td>Malvi mup</td><td>6.5</td><td>Lolopo ycl</td><td>7.2</td><td>Khakas kjh</td><td>5.4</td></tr><tr><td>Tumak tmc</td><td>6.6</td><td>Marathi mar</td><td>6.8</td><td>Mandarin Chinese cmm</td><td>7.7</td><td>Kumyk kum</td><td>6.5</td></tr><tr><td>Wandala mfi</td><td>7.9</td><td>Northern Kurdish kmr</td><td>7.0</td><td>Maru mhx</td><td>7.6</td><td>Nogai nog</td><td>5.4</td></tr><tr><td>MAYAN: 42</td><td></td><td>Oriya (macrolanguage) ori</td><td>7.6</td><td>Min Nan Chinese nan</td><td>6.8</td><td>eNorth Azerbaijani azj</td><td>6.8</td></tr><tr><td>Achi acr</td><td>6.2</td><td>Ossetian oss</td><td>6.3</td><td>Mro-Khimi Chin cmr</td><td>7.3</td><td>Southern Altai alt</td><td>7.2</td></tr><tr><td>Aguacateco agu</td><td>5.8</td><td>Polish pol</td><td>7.7</td><td>Newari new</td><td>6.1</td><td>Tatar tat</td><td>7.4</td></tr><tr><td>Chol ctu</td><td>7.0</td><td>Portuguese por</td><td>7.2</td><td>Pwo Northern Karen pww</td><td>5.5</td><td>Turkish tur</td><td>7.8</td></tr><tr><td>Chorti caa</td><td>6.4</td><td>Portuguese por</td><td>7.6</td><td>Sherpa xsr</td><td>7.4</td><td>Turkish tur</td><td>8.6</td></tr><tr><td>Chuj cac</td><td>7.5</td><td>Portuguese por</td><td>8.2</td><td>Sunwar suz</td><td>6.6</td><td>Tuvinian tvv</td><td>6.2</td></tr><tr><td>Chuj cac</td><td>6.7</td><td>Portuguese por</td><td>7.9</td><td>Tedim Chin ctd</td><td>6.6</td><td>Uighur uig</td><td>6.2</td></tr></table>
|
| 372 |
+
|
| 373 |
+

|
acorpusforlargescalephonetictypology/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac0a6fe5527d5c76fbfdc50900adbc9cda0d5b4f0a91fa34a2ddd95c14e5ad13
|
| 3 |
+
size 1815426
|
acorpusforlargescalephonetictypology/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16e4501718b62fafce6a6676965bf211581fb3421f7b5d448effe80e521cae63
|
| 3 |
+
size 502915
|
activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94166a3edd0ab82f61b8cee2ca77a0adffaaa4979295aa1cc9ac8e7815ec41bc
|
| 3 |
+
size 80811
|
activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:62f0958d60a90e78ee4c55aa0743c5450b6116230dc41b68c6ba7c4f736914a9
|
| 3 |
+
size 99133
|
activeimitationlearningwithnoisyguidance/1cd987d1-2f57-45e1-806b-b3fb3e73ee83_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:28d58e7dd5a6203fe67bac90886c1cf483c1f7a8480250f7d448a32aab03f0e7
|
| 3 |
+
size 1266317
|
activeimitationlearningwithnoisyguidance/full.md
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Imitation Learning with Noisy Guidance
|
| 2 |
+
|
| 3 |
+
Kianté Brantley
|
| 4 |
+
|
| 5 |
+
University of Maryland
|
| 6 |
+
|
| 7 |
+
kdrbrant@cs.umd.edu
|
| 8 |
+
|
| 9 |
+
Amr Sharaf
|
| 10 |
+
|
| 11 |
+
University of Maryland
|
| 12 |
+
|
| 13 |
+
amr@cs.umd.edu
|
| 14 |
+
|
| 15 |
+
Hal Daumé III
|
| 16 |
+
|
| 17 |
+
University of Maryland
|
| 18 |
+
|
| 19 |
+
Microsoft Research
|
| 20 |
+
|
| 21 |
+
me@hal3.name
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
Imitation learning algorithms provide state-of-the-art results on many structured prediction tasks by learning near-optimal search policies. Such algorithms assume training-time access to an expert that can provide the optimal action at any queried state; unfortunately, the number of such queries is often prohibitive, frequently rendering these approaches impractical. To combat this query complexity, we consider an active learning setting in which the learning algorithm has additional access to a much cheaper noisy heuristic that provides noisy guidance. Our algorithm, LEAQI, learns a difference classifier that predicts when the expert is likely to disagree with the heuristic, and queries the expert only when necessary. We apply LEAQI to three sequence labeling tasks, demonstrating significantly fewer queries to the expert and comparable (or better) accuracies over a passive approach.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
Structured prediction methods learn models to map inputs to complex outputs with internal dependencies, typically requiring a substantial amount of expert-labeled data. To minimize annotation cost, we focus on a setting in which an expert provides labels for pieces of the input, rather than the complete input (e.g., labeling at the level of words, not sentences). A natural starting point for this is imitation learning-based "learning to search" approaches to structured prediction (Daumé et al., 2009; Ross et al., 2011; Bengio et al., 2015; Leblond et al., 2018). In imitation learning, training proceeds by incrementally producing structured outputs on piece at a time and, at every step, asking the expert "what would you do here?" and learning to mimic that choice. This interactive model comes at a substantial cost: the expert demonstrator must be continuously available and must be able to answer a potentially large number of queries.
|
| 30 |
+
|
| 31 |
+
We reduce this annotation cost by only asking an expert for labels that are truly needed; our algorithm, Learning to Query for Imitation (LEAQI, /'li:tfi:/) achieves this by capitalizing on two factors. First, as is typical in active learning (see §2), LEAQI only asks the expert for a label when it is uncertain. Second, LEAQI assumes access to a noisy heuristic labeling function (for instance, a rule-based model, dictionary, or inexpert annotator) that can provide low-quality labels. LEAQI operates by always asking this heuristic for a label, and only querying the expert when it thinks the expert is likely to disagree with this label. It trains, simultaneously, a difference classifier (Zhang and Chaudhuri, 2015) that predicts disagreements between the expert and the heuristic (see Figure 1).
|
| 32 |
+
|
| 33 |
+
The challenge in learning the difference classifier is that it must learn based on one-sided feedback: if it predicts that the expert is likely to agree with the heuristic, the expert is not queried and the classifier cannot learn that it was wrong. We address this one-sided feedback problem using the Apple Tasting framework (Helmbold et al., 2000), in which errors (in predicting which apples are tasty) are only observed when a query is made (an apple is tasted). Learning in this way particularly important in the general case where the heuristic is likely not just to have high variance with respect to the expert, but is also statistically biased.
|
| 34 |
+
|
| 35 |
+
Experimentally (§4.5), we consider three structured prediction settings, each using a different type of heuristic feedback. We apply LEAQI to: English named entity recognition where the heuristic is a rule-based recognizer using gazetteers (Khashabi et al., 2018); English scientific keyphrase extraction, where the heuristic is an unsupervised method (Florescu and Caragea, 2017); and Greek part-of-speech tagging, where the heuristic is a small dictio
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 1: A named entity recognition example (from the Wikipedia page for Clarence Ellis). $\pmb{x}$ is the input sentence and $\pmb{y}$ is the (unobserved) ground truth. The predictor $\pi$ operates left-to-right and, in this example, is currently at state $s_{10}$ to tag the 10th word; the state $s_{10}$ (highlighted in purple) combines $\pmb{x}$ with $\hat{\pmb{y}}_{1:9}$ . The heuristic makes two errors at $t = 4$ and $t = 6$ . The heuristic label at $t = 10$ is $y_{10}^{h} = \mathsf{ORG}$ . Under Hamming loss, the cost at $t = 10$ is minimized for $a = \mathsf{ORG}$ , which is therefore the expert action (if it were queried). The label that would be provided for $s_{10}$ to the difference classifier is 0 because the two policies agree.
|
| 39 |
+
|
| 40 |
+
nary compiled from the training data (Zesch et al., 2008; Haghighi and Klein, 2006). In all three settings, the expert is a simulated human annotator. We train LEAQI on all three tasks using fixed BERT (Devlin et al., 2019) features, training only the final layer (because we are in the regime of small labeled data). The goal in all three settings is to minimize the number of words the expert annotator must label. In all settings, we're able to establish the efficacy of LEAQI, showing that it can indeed provide significant label savings over using the expert alone and over several baselines and ablations that establish the importance of both the difference classifier and the Apple Tasting paradigm.
|
| 41 |
+
|
| 42 |
+
# 2 Background and Related Work
|
| 43 |
+
|
| 44 |
+
We review first the use of imitation learning for structured prediction, then online active learning, and finally applications of active learning to structured prediction and imitation learning problems.
|
| 45 |
+
|
| 46 |
+
# 2.1 Learning to Search
|
| 47 |
+
|
| 48 |
+
The learning to search approach to structured prediction casts the joint prediction problem of producing a complex output as a sequence of smaller classification problems (Ratnaparkhi, 1996; Collins and Roark, 2004; Daumé et al., 2009). For instance, in the named entity recognition example from Figure 1, an input sentence $x$ is labeled one word at a time, left-to-right. At the depicted state $(s_{10})$ , the model has labeled the first nine words and must next label the tenth word. Learning to search approaches assume access to an oracle policy $\pi^{\star}$ , which provides the optimal label at every position.
|
| 49 |
+
|
| 50 |
+
In (interactive) imitation learning, we aim to imitate the behavior of the expert policy, $\pi^{\star}$ , which provides the true labels. The learning to search view allows us to cast structured prediction as a (degenerate) imitation learning task, where states
|
| 51 |
+
|
| 52 |
+
Algorithm 1 DAgger(II, $N,\langle \beta_i\rangle_{i = 0}^N,\pi^\star$
|
| 53 |
+
1: initialize dataset $D = \{\}$
|
| 54 |
+
2: initialize policy $\hat{\pi}_1$ to any policy in $\Pi$
|
| 55 |
+
3: for $i = 1\ldots N$ do
|
| 56 |
+
4: $\triangleright$ stochastic mixture policy
|
| 57 |
+
5: Let $\pi_i = \beta_i\pi^\star + (1 - \beta_i)\hat{\pi}_i$
|
| 58 |
+
6: Generate a $T$ -step trajectory using $\pi_i$
|
| 59 |
+
7: Accumulate data $D \gets D \cup \{(s, \pi^\star(s))\}$ for all $s$ in those trajectories
|
| 60 |
+
8: Train classifier $\hat{\pi}_{i+1} \in \Pi$ on $D$
|
| 61 |
+
9: end for
|
| 62 |
+
10: return best (or random) $\hat{\pi}_i$
|
| 63 |
+
|
| 64 |
+
are (input, prefix) pairs, actions are operations on the output, and the horizon $T$ is the length of the sequence. States are denoted $s \in S$ , actions are denoted $a \in [K]$ , where $[K] = \{1, \ldots, K\}$ , and the policy class is denoted $\Pi \subseteq [K]^S$ . The goal in learning is to find a policy $\pi \in \Pi$ with small loss on the distribution of states that it, itself, visits.
|
| 65 |
+
|
| 66 |
+
A popular imitation learning algorithm, DAgger (Ross et al., 2011), is summarized in Alg 1. In each iteration, DAgger executes a mixture policy and, at each visited state, queries the expert's action. This produces a classification example, where the input is the state and the label is the expert's action. At the end of each iteration, the learned policy is updated by training it on the accumulation of all generated data so far. DAgger is effective in practice and enjoys appealing theoretical properties; for instance, if the number of iterations $N$ is $\tilde{O}(T^2\log(1/\delta))$ then with probability at least $1 - \delta$ , the generalization error of the learned policy is $O(1/T)$ (Ross et al., 2011, Theorem 4.2).
|
| 67 |
+
|
| 68 |
+
# 2.2 Active Learning
|
| 69 |
+
|
| 70 |
+
Active learning has been considered since at least the 1980s often under the name "selective sam
|
| 71 |
+
|
| 72 |
+
pling" (Rendell, 1986; Atlas et al., 1990). In ag- nostic online active learning for classification, a learner operates in rounds (e.g. Balcan et al., 2006; Beygelzimer et al., 2009, 2010). At each round, the learning algorithm is presented an example $x$ and must predict a label; the learner must decide whether to query the true label. An effective margin-based approach for online active learning is provided by Cesa-Bianchi et al. (2006) for linear models. Their algorithm defines a sampling probability $\rho = b / (b + z)$ , where $z$ is the margin on the current example, and $b > 0$ is a hyperparameter that controls the aggressiveness of sampling. With probability $\rho$ , the algorithm requests the label and performs a perceptron-style update.
|
| 73 |
+
|
| 74 |
+
Our approach is inspired by Zhang and Chaudhuri's (2015) setting, where two labelers are available: a free weak labeler and an expensive strong labeler. Their algorithm minimizes queries to the strong labeler, by learning a difference classifier that predicts, for each example, whether the weak and strong labelers are likely to disagree. Their algorithm trains this difference classifier using an example-weighting strategy to ensure that its Type II error is kept small, establishing statistical consistency, and bounding its sample complexity.
|
| 75 |
+
|
| 76 |
+
This type of learning from one-sided feedback falls in the general framework of partial-monitoring games, a framework for sequential decision making with imperfect feedback. Apple Tasting is a type of partial-monitoring game (Littlestone and Warmuth, 1989), where, at each round, a learner is presented with an example $\pmb{x}$ and must predict a label $\hat{y} \in \{-1, +1\}$ . After this prediction, the true label is revealed only if the learner predicts $+1$ . This framework has been applied in several settings, such as spam filtering and document classification with minority class distributions (Sculley, 2007). Sculley (2007) also conducts a through comparison of two methods that can be used to address the one-side feedback problem: label-efficient online learning (Cesa-Bianchi et al., 2006) and margin-based learning (Vapnik, 1982).
|
| 77 |
+
|
| 78 |
+
# 2.3 Active Imitation & Structured Prediction
|
| 79 |
+
|
| 80 |
+
In the context of structured prediction for natural language processing, active learning has been considered both for requesting full structured outputs (e.g. Thompson et al., 1999; Culotta and McCallum, 2005; Hachey et al., 2005) and for requesting only pieces of outputs (e.g. Ringger et al.,
|
| 81 |
+
|
| 82 |
+
2007; Bloodgood and Callison-Burch, 2010). For sequence labeling tasks, Haertel et al. (2008) found that labeling effort depends both on the number of words labeled (which we model), plus a fixed cost for reading (which we do not).
|
| 83 |
+
|
| 84 |
+
In the context of imitation learning, active approaches have also been considered for at least three decades, often called "learning with an external critic" and "learning by watching" (Whitehead, 1991). More recently, Judah et al. (2012) describe RAIL, an active learning-for-imitation-learning algorithm akin to our ACTIVATEDAGGER baseline, but which in principle would operate with any underlying i.i.d. active learning algorithm (not just our specific choice of uncertainty sampling).
|
| 85 |
+
|
| 86 |
+
# 3 Our Approach: LEAQI
|
| 87 |
+
|
| 88 |
+
Our goal is to learn a structured prediction model with minimal human expert supervision, effectively by combining human annotation with a noisy heuristic. We present LEAQI to achieve this. As a concrete example, return to Figure 1: at $s_{10}$ , $\pi$ must predict the label of the tenth word. If $\pi$ is confident in its own prediction, LEAQI can avoid any query, similar to traditional active learning. If $\pi$ is not confident, then LEAQI considers the label suggested by a noisy heuristic (here: ORG). LEAQI predicts whether the true expert label is likely to disagree with the noisy heuristic. Here, it predicts no disagreement and avoids querying the expert.
|
| 89 |
+
|
| 90 |
+
# 3.1 Learning to Query for Imitation
|
| 91 |
+
|
| 92 |
+
Our algorithm, LEAQI, is specified in Alg 2. As input, LEAQI takes a policy class $\Pi$ , a hypothesis class $\mathcal{H}$ for the difference classifier (assumed to be symmetric and to contain the "constant one" function), a number of episodes $N$ , an expert policy $\pi^{\star}$ , a heuristic policy $\pi^{\mathrm{h}}$ , and a confidence parameter $b > 0$ . The general structure of LEAQI follows that of DAGger, but with three key differences:
|
| 93 |
+
|
| 94 |
+
(a) roll-in (line 7) is according to the learned policy (not mixed with the expert, as that would require additional expert queries),
|
| 95 |
+
(b) actions are queried only if the current policy is uncertain at $s$ (line 12), and
|
| 96 |
+
(c) the expert $\pi^{\star}$ is only queried if it is predicted to disagree with the heuristic $\pi^{\mathrm{h}}$ at $s$ by the difference classifier, or if apple tasting method switches the difference classifier label (line 15; see §3.2).
|
| 97 |
+
|
| 98 |
+
Algorithm 2 LEAQI(Π, H, N, π*, πh, b)
|
| 99 |
+
1: initialize dataset $D = \{\}$
|
| 100 |
+
2: initialize policy $\pi_1$ to any policy in $\Pi$
|
| 101 |
+
3: initialize difference dataset $S = \{\}$
|
| 102 |
+
4: initialize difference classifier $h_1(s) = 1 (\forall s)$
|
| 103 |
+
5: for $i = 1 \dots N$ do
|
| 104 |
+
6: Receive input sentence $x$
|
| 105 |
+
7: generate a $T$ -step trajectory using $\pi_i$
|
| 106 |
+
8: Generate output $\hat{y}$ using $\pi_i$
|
| 107 |
+
9: for each $s$ in $\hat{y}$ do
|
| 108 |
+
10: draw bernoulli random variable
|
| 109 |
+
11: $Z_i \sim \mathrm{Bern}\left(\frac{b}{b + \text{certainty}(\pi_i, s)}\right)$ ; see §3.3
|
| 110 |
+
12: if $Z_i = 1$ then
|
| 111 |
+
13: set difference classifier prediction
|
| 112 |
+
14: $\hat{d}_i = h_i(s)$
|
| 113 |
+
15: if AppleTaste $(s, \pi^{\mathrm{h}}(s), \hat{d}_i)$ then
|
| 114 |
+
16: predict agree query heuristic
|
| 115 |
+
17: $D \gets D \cup \{(s, \pi^{\mathrm{h}}(s))\}$
|
| 116 |
+
18: else
|
| 117 |
+
19: predict disagree query expert
|
| 118 |
+
20: $D \gets D \cup \{(s, \pi^{\star}(s))\}$
|
| 119 |
+
21: $d_i = \mathbb{1}[\pi^{\star}(s) = \pi^{\mathrm{h}}(s)]$
|
| 120 |
+
22: $S \gets S \cup \{(s, \pi^{\mathrm{h}}(s), \hat{d}_i, d_i)\}$
|
| 121 |
+
23: end if
|
| 122 |
+
24: end if
|
| 123 |
+
25: end for
|
| 124 |
+
26: Train policy $\pi_{i+1} \in \Pi$ on $D$
|
| 125 |
+
27: Train difference classifier $h_{i+1} \in \mathcal{H}$ on $S$ to minimize Type II errors (see §3.2)
|
| 126 |
+
28: end for
|
| 127 |
+
29: return best (or random) $\pi_i$
|
| 128 |
+
|
| 129 |
+
In particular, at each state visited by $\pi_{i}$ , LEAQI estimates $z$ , the certainty of $\pi_{i}$ 's prediction at that state (see §3.3). A sampling probability $\rho$ is set to $b / (b + z)$ where $z$ is the certainty, and so if the model is very uncertain then $\rho$ tends to zero, following (Cesa-Bianchi et al., 2006). With probability $\rho$ , LEAQI will collect some label.
|
| 130 |
+
|
| 131 |
+
When a label is collected (line 12), the difference classifier $h_i$ is queried on state $s$ to predict if $\pi^{\star}$ and $\pi^{\mathrm{h}}$ are likely to disagree on the correct action. (Recall that $h_1$ always predicts disagreement per line 4.) The difference classifier's prediction, $\hat{d}_i$ , is passed to an apple tasting method in line 15. Intuitively, most apple tasting procedures (including the one we use, STAP; see §3.2) return $\hat{d}_i$ , unless the difference classifier is making many Type II errors, in which case it may return $\neg \hat{d}_i$ .
|
| 132 |
+
|
| 133 |
+
A target action is set to $\pi^{\mathrm{h}}(s)$ if the apple taste
|
| 134 |
+
|
| 135 |
+
Algorithm 3 AppleTaste_STAP(S, $a_i^{\mathrm{h}},\hat{d}_i$
|
| 136 |
+
1: $\triangleright$ count examples that are action $a_{i}^{h}$
|
| 137 |
+
2: let $t = \sum_{(-,a, - , - )\in S}\mathbb{1}[a_{i}^{\mathrm{h}} = a]$
|
| 138 |
+
3: $\triangleright$ count mistakes made on action $a_{i}^{h}$
|
| 139 |
+
4: let $m = \sum_{(-,a,\hat{d},d)\in S}\mathbb{1}[\hat{d}\neq d\wedge a_{i}^{\mathrm{h}} = a]$
|
| 140 |
+
5: $w = \frac{t}{|S|}$ $\triangleright$ percentage of time $a_{i}^{h}$ was seen
|
| 141 |
+
6: if $w < 1$ then
|
| 142 |
+
7: $\triangleright$ skew distribution
|
| 143 |
+
8: draw $r\sim Beta(1 - w,1)$
|
| 144 |
+
9: else
|
| 145 |
+
10: draw $r\sim$ Uniform(0,1)
|
| 146 |
+
11: end if
|
| 147 |
+
12: return $(d = 1)\wedge (r\leq \sqrt{(m + 1) / t})$
|
| 148 |
+
|
| 149 |
+
ing algorithm returns "agree" (line 17), and the expert $\pi^{\star}$ is only queried if disagreement is predicted (line 20). The state and target action (either heuristic or expert) are then added to the training data. Finally, if the expert was queried, then a new item is added to the difference dataset, consisting of the state, the heuristic action on that state, the difference classifier's prediction, and the ground truth for the difference classifier whose input is $s$ and whose label is whether the expert and heuristic actually disagree. Finally, $\pi_{i + 1}$ is trained on the accumulated action data, and $h_{i + 1}$ is trained on the difference dataset (details in §3.3).
|
| 150 |
+
|
| 151 |
+
There are several things to note about LEAQI:
|
| 152 |
+
|
| 153 |
+
If the current policy is already very certain, a expert annotator is never queried.
|
| 154 |
+
If a label is queried, the expert is queried only if the difference classifier predicts disagreement with the heuristic, or the apple tasting procedure flips the difference classifier prediction.
|
| 155 |
+
Due to apple tasting, most errors the difference classifier makes will cause it to query the expert unnecessarily; this is the "safe" type of error (increasing sample complexity but not harming accuracy), versus a Type II error (which leads to biased labels).
|
| 156 |
+
$\diamond$ The difference classifier is only trained on states where the policy is uncertain, which is exactly the distribution on which it is run.
|
| 157 |
+
|
| 158 |
+
# 3.2 Apple Tasting for One-Sided Learning
|
| 159 |
+
|
| 160 |
+
The difference classifier $h\in \mathcal{H}$ must be trained (line 27) based on one-sided feedback (it only ob
|
| 161 |
+
|
| 162 |
+
serves errors when it predicts "disagree") to minimize Type II errors (it should only very rarely predict "agree" when the truth is "disagree"). This helps keep the labeled data for the learned policies unbiased. The main challenge here is that the feedback to the difference classifier is one-sided: that is, if it predicts "disagree" then it gets to see the truth, but if it predicts "agree" it never finds out if it was wrong. We use one of (Helmbold et al., 2000)'s algorithms, STAP (see Alg 3), which works by random sampling from apples that are predicted to not be tasted and tasting them anyway (line 12). Formally, STAP tastes apples that are predicted to be bad with probability $\sqrt{(m + 1) / t}$ , where $m$ is the number of mistakes, and $t$ is the number of apples tasted so far.
|
| 163 |
+
|
| 164 |
+
We adapt Apple Tasting algorithm STAP to our setting for controlling the number of Type II errors made by the difference classifier as follows. First, because some heuristic actions are much more common than others, we run a separate apple tasting scheme per heuristic action (in the sense that we count the number of error on this heuristic action rather than globally). Second, when there is significant action imbalance<sup>2</sup> we find it necessary to skew the distribution from STAP more in favor of querying. We achieve this by sampling from a Beta distribution (generalizing the uniform), whose mean is shifted toward zero for more frequent heuristic actions. This increases the chance that Apple Tasting will have on finding bad apples error for each action (thereby keeping the false positive rate low for predicting disagreement).
|
| 165 |
+
|
| 166 |
+
# 3.3 Measuring Policy Certainty
|
| 167 |
+
|
| 168 |
+
In step 11, LEAQI must estimate the certainty of $\pi_{i}$ on $s$ . Following Cesa-Bianchi et al. (2006), we implement this using a margin-based criteria. To achieve this, we consider $\pi$ as a function that maps actions to scores and then chooses the action with largest score. The certainty measure is then the difference in scores between the highest and second highest scoring actions:
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\operatorname {c e r t a i n t y} (\pi , s) = \max _ {a} \pi (s, a) - \max _ {a ^ {\prime} \neq a} \pi (s, a ^ {\prime})
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
# 3.4 Analysis
|
| 175 |
+
|
| 176 |
+
Theoretically, the main result for LEAQI is an interpretation of the main DAgger result(s). Formally, let $d_{\pi}$ denote the distribution of states visited by $\pi$ , $C(s,a) \in [0,1]$ be the immediate cost of performing action $a$ in state $s$ , $C_{\pi}(s) = \mathbb{E}_{a \sim \pi(s)} C(s,a)$ , and the total expected cost of $\pi$ to be $J(\pi) = T\mathbb{E}_{s \sim d_{\pi}} C_{\pi}(s)$ , where $T$ is the length of trajectories. $C$ is not available to a learner in an imitation setting; instead the algorithm observes an expert and minimizes a surrogate loss $\ell(s,\pi)$ (e.g., $\ell$ may be zero/one loss between $\pi$ and $\pi^{\star}$ ). We assume $\ell$ is strongly convex and bounded in $[0,1]$ over $\Pi$ .
|
| 177 |
+
|
| 178 |
+
Given this setup assumptions, let $\epsilon_{\mathrm{pol - approx}} = \min_{\pi \in \Pi}\frac{1}{N}\sum_{i = 1}^{N}\mathbb{E}_{s\sim d_{\pi_i}}\ell (s,\pi)$ be the true loss of the best policy in hindsight, let $\epsilon_{\mathrm{dc - approx}} = \min_{h\in \mathcal{H}}\frac{1}{N}\sum_{i = 1}^{N}\mathbb{E}_{s\sim d_{\pi_i}}err(s,h,\pi^{\star}(s)\neq \pi^{\mathrm{h}}(s))$ be the true error of the best difference classifier in hindsight, and assuming that the regret of the policy learner is bounded by $reg_{\mathrm{pol}}(N)$ after $N$ steps, Ross et al. (2011) shows the following:
|
| 179 |
+
|
| 180 |
+
Theorem 1 (Thm 4.3 of Ross et al. (2011)). After $N$ episodes each of length $T$ , under the assumptions above, with probability at least $1 - \delta$ there exists a policy $\pi \in \pi_{1:N}$ such that:
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\begin{array}{l} \mathbb {E} _ {s \sim d _ {\pi}} \ell (s, \pi) \leq \\ \epsilon_ {\mathrm {p o l - a p p r o x}} + r e g _ {\mathrm {p o l}} (N) + \sqrt {(2 / N) \log (1 / \delta)} \\ \end{array}
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
This holds regardless of how $\pi_{1:N}$ are trained (line 26). The question of how well LEAQI performs becomes a question of how well the combination of uncertainty-based sampling and the difference classifier learn. So long as those do a good job on their individual classification tasks, DAgger guarantees that the policy will do a good job. This is formalized below, where $Q^{\star}(s,a)$ is the best possible cumulative cost (measured by $C$ ) starting in state $s$ and taking action $a$ :
|
| 187 |
+
|
| 188 |
+
Theorem 2 (Theorem 2.2 of Ross et al. (2011)). Let $u$ be such that $Q^{\star}(s,a) - Q^{\star}(s,\pi^{\star}(s))\leq u$ for all $a$ and all $s$ with $d_{\pi}(s) > 0$ ; then for some $\pi \in \pi_{1:N}$ , as $N\to \infty$ :
|
| 189 |
+
|
| 190 |
+
$$
|
| 191 |
+
J (\pi) \leq J (\pi^ {\star}) + u T \epsilon_ {\mathrm {p o l - a p p r o x}}
|
| 192 |
+
$$
|
| 193 |
+
|
| 194 |
+
Here, $u$ captures the most long-term impact a single decision can have; for example, for average Hamming loss, it is straightforward to see that $u = \frac{1}{T}$
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Task</td><td>Named Entity Recognition</td><td>Keyphrase Extraction</td><td>Part of Speech Tagging</td></tr><tr><td>Language</td><td>English (en)</td><td>English (en)</td><td>Modern Greek (el)</td></tr><tr><td>Dataset</td><td>CoNLL'03 (Tjong
|
| 197 |
+
Kim Sang and De Meulder,
|
| 198 |
+
2003)</td><td>SemEval 2017 Task 10
|
| 199 |
+
(Augenstein et al., 2017)</td><td>Universal Dependencies
|
| 200 |
+
(Nivre, 2018)</td></tr><tr><td># Ex</td><td>14,987</td><td>2,809</td><td>1,662</td></tr><tr><td>Avg. Len</td><td>14.5</td><td>26.3</td><td>25.5</td></tr><tr><td># Actions</td><td>5</td><td>2</td><td>17</td></tr><tr><td>Metric</td><td>Entity F-score</td><td>Keyphrase F-score</td><td>Per-tag accuracy</td></tr><tr><td>Features</td><td>English BERT (Devlin et al.,
|
| 201 |
+
2019)</td><td>SciBERT (Beltagy et al.,
|
| 202 |
+
2019)</td><td>M-BERT (Devlin et al.,
|
| 203 |
+
2019)</td></tr><tr><td>Heuristic</td><td>String matching against an offline gazeteer of entities from Khashabi et al. (2018)</td><td>Output from an unsupervised keyphrase extraction model
|
| 204 |
+
Florescu and Caragea
|
| 205 |
+
(2017)</td><td>Dictionary from Wiktionary, similar to Zesch et al. (2008) and Haghighi and Klein (2006)</td></tr><tr><td>Heur Quality</td><td>P 88%, R 27%, F 41%</td><td>P 20%, R 44%, F 27%</td><td>10% coverage, 67% acc</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 1: An overview of the three tasks considered in experiments.
|
| 208 |
+
|
| 209 |
+
because any single mistake can increase the number of mistakes by at most 1. For precision, recall and F-score, $u$ can be as large as one in the (rare) case that a single decision switches from one true positive to no true positives.
|
| 210 |
+
|
| 211 |
+
# 4 Experiments
|
| 212 |
+
|
| 213 |
+
The primary research questions we aim to answer experimentally are:
|
| 214 |
+
|
| 215 |
+
Q1 Does uncertainty-based active learning achieve lower query complexity than passive learning in the learning to search settings?
|
| 216 |
+
Q2 Does learning a difference classifier improve query efficiency over active learning alone?
|
| 217 |
+
Q3 Does Apple Tasting successfully handle the problem of learning from one-sided feedback?
|
| 218 |
+
Q4 Is the approach robust to cases where the noisy heuristic is uncorrelated with the expert?
|
| 219 |
+
Q5 Is casting the heuristic as a policy more effective than using its output as features?
|
| 220 |
+
|
| 221 |
+
To answer these questions, we conduct experiments on three tasks (see Table 1): English named entity recognition, English scientific keyphrase extraction, and low-resource part of speech tagging on Modern Greek (el), selected as a low-resource setting.
|
| 222 |
+
|
| 223 |
+
# 4.1 Algorithms and Baselines
|
| 224 |
+
|
| 225 |
+
In order to address the research questions above, we compare LEAQI to several baselines. The baselines below compare our approach to previous methods:
|
| 226 |
+
|
| 227 |
+
# DAGGER. Passive DAGger (Alg 1)
|
| 228 |
+
|
| 229 |
+
ACTIVEDAGGER. An active variant of DAgger that asks for labels only when uncertain. (This is equivalent to LEAQI, but with neither the difference classifier nor apple tasting.)
|
| 230 |
+
|
| 231 |
+
DAGGER+FEAT. DAGGER with the heuristic policy's output appended as an input feature.
|
| 232 |
+
|
| 233 |
+
ACTIVEDAGGER+FEAT. ACTIVEDAGGER with the heuristic policy as a feature.
|
| 234 |
+
|
| 235 |
+
The next set of comparisons are explicit ablations:
|
| 236 |
+
|
| 237 |
+
LEAQI+NOAT LEAQI with no apple tasting.
|
| 238 |
+
|
| 239 |
+
LEAQI+NOISYHEUR. LEAQI, but where the heuristic returns a label uniformly at random.
|
| 240 |
+
|
| 241 |
+
The baselines and LEAQI share a linear relationship. DAGGER is the baseline algorithm used by all algorithms described above but it is very query inefficient with respect to an expert annotator. ACTIVEDAGGER introduces active learning to make DAGGER more query efficient; the delta to the previous addresses Q1. LEAQI+NOAT introduces the difference classifier; the delta addresses
|
| 242 |
+
|
| 243 |
+
Q2. LEAQI adds apple tasting to deal with one-sided learning; the delta addresses Q3. Finally, LEAQI+NOISYHEUR. (vs LEAQI) addresses Q4 and the +FEAT variants address Q5.
|
| 244 |
+
|
| 245 |
+
# 4.2 Data and Representation
|
| 246 |
+
|
| 247 |
+
For named entity recognition, we use training, validation, and test data from CoNLL'03 (Tjong Kim Sang and De Meulder, 2003), consisting of IO tags instead of BIO tags (the “B” tag is almost never used in this dataset, so we never attempt to predict it) over four entity types: Person, Organization, Location, and Miscellaneous. For part of speech tagging, we use training and test data from modern Greek portion of the Universal Dependencies (UD) treebanks (Nivre, 2018), consisting of 17 universal tags<sup>4</sup>. For keyphrase extraction, we use training, validation, and test data from SemEval 2017 Task 10 (Augenstein et al., 2017), consisting of IO tags (we use one “I” tag for all three keyphrase types).
|
| 248 |
+
|
| 249 |
+
In all tasks, we implement both the policy and difference classifier by fine-tuning the last layer of a BERT embedding representation (Devlin et al., 2019). More specifically, for a sentence of length $T$ , $w_{1},\ldots ,w_{T}$ , we first compute BERT embeddings for each word, $x_{1},\ldots ,x_{T}$ using the appropriate BERT model: English BERT and M-BERT for named entity and part-of-speech, respectively, and SciBERT (Beltagy et al., 2019) for keyphrase extraction. We then represent the state at position $t$ by concatenating the word embedding at that position with a one-hot representation of the previous action: $s_t = [w_t;\mathrm{onehot}(a_{t - 1})]$ . This feature representation is used both for learning the labeling policy and also learning the difference classifier.
|
| 250 |
+
|
| 251 |
+
# 4.3 Expert Policy and Heuristics
|
| 252 |
+
|
| 253 |
+
In all experiments, the expert $\pi^{\star}$ is a simulated human annotator who annotates one word at a time. The expert returns the optimal action for the relevant evaluation metric (F-score for named entity recognition and keyphrase extraction, and accuracy for part-of-speech tagging). We take the annotation cost to be the total number of words labeled.
|
| 254 |
+
|
| 255 |
+
The heuristic we implement for named entity recognition is a high-precision gazeteer-based string matching approach. We construct this by taking a gazeteer from Wikipedia using the CogComp framework (Khashabi et al., 2018), and use
|
| 256 |
+
|
| 257 |
+
FlashText (Singh, 2017) to label the dataset. This heuristic achieves a precision of 0.88, recall of 0.27 and F-score of 0.41 on the training data.
|
| 258 |
+
|
| 259 |
+
The keyphrase extraction heuristic is the output of an "unsupervised keyphrase extraction" approach (Florescu and Caragea, 2017). This system is a graph-based approach that constructs word-level graphs incorporating positions of all word occurrences information; then using PageRank to score the words and phrases. This heuristic achieves a precision of 0.20, recall of 0.44 and F-score of 0.27 on the training data.
|
| 260 |
+
|
| 261 |
+
The part of speech tagging heuristic is based on a small dictionary compiled from Wiktionary. Following Haghighi and Klein (2006) and Zesch et al. (2008), we extract this dictionary using Wiktionary as follows: for word $w$ in our training data, we find the part-of-speech $y$ by querying Wiktionary. If $w$ is in Wiktionary, we convert the Wiktionary part of speech tag to a Universal Dependencies tag (see §A.1), and if word $w$ is not in Wiktionary, we use a default label of "X". Furthermore, if word $w$ has multiple parts of speech, we select the first part of speech tag in the list. The label "X" is chosen 90% of the time. For the remaining 10%, the heuristic achieves an accuracy of 0.67 on the training data.
|
| 262 |
+
|
| 263 |
+
# 4.4 Experimental Setup
|
| 264 |
+
|
| 265 |
+
Our experimental setup is online active learning. We make a single pass over a dataset, and the goal is to achieve an accurate system as quickly as possible. We measure performance (accuracy or F-score) after every 1000 words ( $\approx$ 50 sentences) on held-out test data, and produce error bars by averaging across three runs and reporting standard deviations.
|
| 266 |
+
|
| 267 |
+
Hyperparameters for DAGGER are optimized using grid-search on the named entity recognition training data and evaluated on development data. We then fix DAGGER hyperparameters for all other experiments and models. The difference classifier hyperparameters are subsequently optimized in the same manner. We fix the difference classifier hyperparameters for all other experiments.
|
| 268 |
+
|
| 269 |
+
# 4.5 Experimental Results
|
| 270 |
+
|
| 271 |
+
The main results are shown in the top two rows of Figure 2; ablations of LEAQI are shown in Figure 3.
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
Figure 2: Empirical evaluation on three tasks: (left) named entity recognition, (middle) keyphrase extraction and (right) part of speech tagging. The top rows show performance (f-score or accuracy) with respect to the number of queries to the expert. The bottom row shows the number of queries as a function of the number of words seen.
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+
In Figure 2, the top row shows traditional learning curves (performance vs number of queries), and the bottom row shows the number of queries made to the expert as a function of the total number of words seen.
|
| 281 |
+
|
| 282 |
+
Active vs Passive (Q1). In all cases, we see that the active strategies improve on the passive strategies; this difference is largest in keyphrase extraction, middling for part of speech tagging, and small for NER. While not surprising given previous successes of active learning, this confirms that it is also a useful approach in our setting. As expected, the active algorithms query far less than the passive approaches, and LEAQI queries the least.
|
| 283 |
+
|
| 284 |
+
Heuristic as Features vs Policy (Q5). We see that while adding the heuristic's output as a feature can be modestly useful, it is not uniformly useful and, at least for keyphrase extraction and part of speech tagging, it is not as effective as LEAQI. For named entity recognition, it is not effective at all, but this is also a case where all algorithms perform essentially the same. Indeed, here, LEAQI learns quickly with few queries, but never quite reaches the performance of ActiveDAgger. This is likely due to the difference classifier becoming overly confident too quickly, especially on the "O"
|
| 285 |
+
|
| 286 |
+
label, given the (relatively well known) oddness in mismatch between development data and test data on this dataset.
|
| 287 |
+
|
| 288 |
+
Difference Classifier Efficacy (Q2). Turning to the ablations (Figure 3), we can address Q2 by comparing the ActiveDAgger curve to the LeaQI+NoAT curve. Here, we see that on NER and keyphrase extraction, adding the difference classifier without adding apple tasting results in a far worse model: it learns very quickly but plateaus much lower than the best results. The exception is part of speech tagging, where apple tasting does not seem necessary (but also does not hurt). Overall, this essentially shows that without controlling Type II errors, the difference classifier on it's own does not fulfill its goals.
|
| 289 |
+
|
| 290 |
+
Apple Tasting Efficacy (Q3). Also considering the ablation study, we can compare LeaQI+NoAT with LeaQI. In the case of part of speech tagging, there is little difference: using apple tasting to combat issues of learning from one sided feedback neither helps nor hurts performance. However, for both named entity recognition and keyphrase extraction, removing apple tasting leads to faster learning, but substantially lower final performance (accuracy or f-score). This is somewhat expected:
|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
Figure 3: Ablation results on (left) named entity recognition, (middle) keyphrase extraction and (right) part of speech tagging. In addition to LEAQI and DAgger (copied from Figure 2), these graphs also show LEAQI+NOAT (apple tasting disabled), and LEAQI+NOISYHEUR. (a heuristic that produces labels uniformly at random).
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+
without apple tasting, the training data that the policy sees is likely to be highly biased, and so it gets stuck in a low accuracy regime.
|
| 300 |
+
|
| 301 |
+
Robustness to Poor Heuristic (Q4). We compare LeaQI+NoisyHeur to ActiveDAgger. Because the heuristic here is useless, the main hope is that it does not degrade performance below ActiveDAgger. Indeed, that is what we see in all three cases: the difference classifier is able to learn quite quickly to essentially ignore the heuristic and only rely on the expert.
|
| 302 |
+
|
| 303 |
+
# 5 Discussion and Limitations
|
| 304 |
+
|
| 305 |
+
In this paper, we considered the problem of reducing the number of queries to an expert labeler for structured prediction problems. We took an imitation learning approach and developed an algorithm, LEAQI, which leverages a source that has low-quality labels: a heuristic policy that is suboptimal but free. To use this heuristic as a policy, we learn a difference classifier that effectively tells LEAQI when it is safe to treat the heuristic's action as if it were optimal. We showed empirically—across Named Entity Recognition, Keyphrase Extraction and Part of Speech Tagging tasks—that the active learning approach improves significantly on passive learning, and that leveraging a difference classifier improves on that.
|
| 306 |
+
|
| 307 |
+
1. In some settings, learning a difference classifier may be as hard or harder than learning the structured predictor; for instance if the task is binary sequence labeling (e.g., word segmentation), minimizing its usefulness.
|
| 308 |
+
2. The true labeling cost is likely more complicated than simply the number of individual
|
| 309 |
+
|
| 310 |
+
actions queried to the expert.
|
| 311 |
+
|
| 312 |
+
Despite these limitations, we hope that LEAQI provides a useful (and relatively simple) bridge that can enable using rule-based systems, heuristics, and unsupervised models as building blocks for more complex supervised learning systems. This is particularly attractive in settings where we have very strong rule-based systems, ones which often outperform the best statistical systems, like coreference resolution (Lee et al., 2011), information extraction (Riloff and Wiebe, 2003), and morphological segmentation and analysis (Smit et al., 2014).
|
| 313 |
+
|
| 314 |
+
# Acknowledgements
|
| 315 |
+
|
| 316 |
+
We thank Rob Schapire, Chicheng Zhang, and the anonymous ACL reviewers for very helpful comments and insights. This material is based upon work supported by the National Science Foundation under Grant No. 1618193 and an ACM SIGHPC/Intel Computational and Data Science Fellowship to KB. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation nor of the ACM.
|
| 317 |
+
|
| 318 |
+
# References
|
| 319 |
+
|
| 320 |
+
Les E Atlas, David A Cohn, and Richard E Ladner. 1990. Training connectionist networks with queries and selective sampling. In NeurIPS.
|
| 321 |
+
Isabelle Augenstein, Mrinal Das, Sebastian Riedel, Lakshmi Vikraman, and Andrew McCallum. 2017. Semeval 2017 task 10: Scienceie - extracting keyphrases and relations from scientific publications. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017).
|
| 322 |
+
|
| 323 |
+
Nina Balcan, Alina Beygelzimer, and John Langford. 2006. Agnostic active learning. In ICML.
|
| 324 |
+
Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. Scibert: Pretrained language model for scientific text. In EMNLP.
|
| 325 |
+
Samy Bengio, Oriol Vinyals, Navdeep Jaitly, and Noam Shazeer. 2015. Scheduled sampling for sequence prediction with recurrent neural networks. In NeurIPS.
|
| 326 |
+
Alina Beygelzimer, Sanjoy Dasgupta, , and John Langford. 2009. Importance weighted active learning. In ICML.
|
| 327 |
+
Alina Beygelzimer, Daniel Hsu, John Langford, and Tong Zhang. 2010. Agnostic active learning without constraints. In NeurIPS.
|
| 328 |
+
Michael Bloodgood and Chris Callison-Burch. 2010. Bucking the trend: Large-scale cost-focused active learning for statistical machine translation. In ACL.
|
| 329 |
+
Nicolò Cesa-Bianchi, Claudio Gentile, and Luca Zaniboni. 2006. Worst-case analysis ofselective sampling for linear classification. JMLR.
|
| 330 |
+
Michael Collins and Brian Roark. 2004. Incremental parsing with the perceptron algorithm. In ACL.
|
| 331 |
+
Aron Culotta and Andrew McCallum. 2005. Reducing labeling effort for structured prediction tasks. In AAAI.
|
| 332 |
+
Hal Daumé, III, John Langford, and Daniel Marcu. 2009. Search-based structured prediction. Machine Learning Journal.
|
| 333 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL*.
|
| 334 |
+
Corina Florescu and Cornelia Caragea. 2017. Position-Rank: An unsupervised approach to keyphrase extraction from scholarly documents. In ACL.
|
| 335 |
+
Ben Hachey, Beatrice Alex, and Markus Becker. 2005. Investigating the effects of selective sampling on the annotation task. In CoNLL.
|
| 336 |
+
Robbie Haertel, Eric K. Ringger, Kevin D. Seppi, James L. Carroll, and Peter McClanahan. 2008. Assessing the costs of sampling methods in active learning for annotation. In ACL.
|
| 337 |
+
Aria Haghighi and Dan Klein. 2006. Prototype-driven learning for sequence models.
|
| 338 |
+
David P. Helmbold, Nicholas Littlestone, and Philip M. Long. 2000. Apple tasting. Information and Computation.
|
| 339 |
+
Kshitij Judah, Alan Paul Fern, and Thomas Glenn Dietterich. 2012. Active imitation learning via reduction to iid active learning. In AAAI.
|
| 340 |
+
|
| 341 |
+
Daniel Khashabi, Mark Sammons, Ben Zhou, Tom Redman, Christos Christodoulopoulos, Vivek Srikumar, Nicholas Rizzolo, Lev Ratinov, Guanheng Luo, Quang Do, Chen-Tse Tsai, Subhro Roy, Stephen Mayhew, Zhili Feng, John Wieting, Xiaodong Yu, Yangqiu Song, Shashank Gupta, Shyam Upadhyay, Naveen Arivazhagan, Qiang Ning, Shaoshi Ling, and Dan Roth. 2018. CogCompNLP: Your swiss army knife for NLP. In LREC.
|
| 342 |
+
Rémi Leblond, Jean-Baptiste Alayrac, Anton Osokin, and Simon Lacoste-Julien. 2018. SEARNN: Training RNNs with global-local losses. In ICLR.
|
| 343 |
+
Heeyoung Lee, Yves Peirsman, Angel Chang, Nathanael Chambers, Mihai Surdeanu, and Dan Jurafsky. 2011. Stanford's multi-pass sieve coreference resolution system at the conll-2011 shared task. In Proceedings of the Fifteenth Conference on Computational Natural Language Learning: Shared Task.
|
| 344 |
+
N. Littlestone and M. K. Warmuth. 1989. The weighted majority algorithm. In Proceedings of the 30th Annual Symposium on Foundations of Computer Science.
|
| 345 |
+
Joakim et. al Nivre. 2018. Universal dependencies v2.5. LINDAT/CLARIN digital library at the Institute of Formal and Applied Linguistics, Charles University.
|
| 346 |
+
Adwait Ratnaparkhi. 1996. A maximum entropy model for part-of-speech tagging. In EMNLP.
|
| 347 |
+
Larry Rendell. 1986. A general framework for induction and a study of selective induction. *Machine Learning Journal*.
|
| 348 |
+
Ellen Riloff and Janyce Wiebe. 2003. Learning extraction patterns for subjective expressions. In EMNLP.
|
| 349 |
+
Eric Ringger, Peter McClanahan, Robbie Haertel, George Busby, Marc Carmen, James Carroll, Kevin Seppi, and Deryle Lonsdale. 2007. Active learning for part-of-speech tagging: Accelerating corpus annotation. In Proceedings of the Linguistic Annotation Workshop.
|
| 350 |
+
Stéphane Ross, Geoff J. Gordon, and J. Andrew Bagnell. 2011. A reduction of imitation learning and structured prediction to no-regret online learning. In AI-Stats.
|
| 351 |
+
David Sculley. 2007. Practical learning from one-sided feedback. In KDD.
|
| 352 |
+
Vikash Singh. 2017. Replace or retrieve keywords in documents at scale. CoRR, abs/1711.00046.
|
| 353 |
+
Peter Smit, Sami Virpioja, Stig-Arne Gronroos, and Mikko Kurimo. 2014. Morfessor 2.0: Toolkit for statistical morphological segmentation. In EACL.
|
| 354 |
+
Cynthia A. Thompson, Mary Elaine Califf, and Raymond J. Mooney. 1999. Active learning for natural language parsing and information extraction. In ICML.
|
| 355 |
+
|
| 356 |
+
Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In NAACL/HLT.
|
| 357 |
+
Vladimir Vapnik. 1982. Estimation of Dependencies Based on Empirical Data: Springer Series in Statistics (Springer Series in Statistics). Springer-Verlag, Berlin, Heidelberg.
|
| 358 |
+
Steven Whitehead. 1991. A study of cooperative mechanisms for faster reinforcement learning. Technical report, University of Rochester.
|
| 359 |
+
Torsten Zesch, Christof Müller, and Iryna Gurevych. 2008. Extracting lexical semantic knowledge from Wikipedia and Wiktionary. In LREC.
|
| 360 |
+
Chicheng Zhang and Kamalika Chaudhuri. 2015. Active learning from weak and strong labelers. In NeurIPS.
|
| 361 |
+
|
| 362 |
+
# Supplementary Material For: Active Imitation Learing with Noisy Guidance
|
| 363 |
+
|
| 364 |
+
# A Experimental Details:
|
| 365 |
+
|
| 366 |
+
# A.1 Wiktionary to Universal Dependencies
|
| 367 |
+
|
| 368 |
+
<table><tr><td>POS Tag Source</td><td>Greek, Modern (el) Wiktionary</td><td>Universal Dependencies</td></tr><tr><td></td><td>adjective</td><td>ADJ</td></tr><tr><td></td><td>adposition</td><td>ADP</td></tr><tr><td></td><td>preposition</td><td>ADP</td></tr><tr><td></td><td>adverb</td><td>ADV</td></tr><tr><td></td><td>auxiliary</td><td>AU</td></tr><tr><td></td><td>coordinating conjunction</td><td>CCONJ</td></tr><tr><td></td><td>determiner</td><td>DET</td></tr><tr><td></td><td>interjection</td><td>INTJ</td></tr><tr><td></td><td>noun</td><td>NOUN</td></tr><tr><td></td><td>numeral</td><td>NUM</td></tr><tr><td></td><td>particle</td><td>PART</td></tr><tr><td></td><td>pronoun</td><td>PRON</td></tr><tr><td></td><td>proper noun</td><td>pROPN</td></tr><tr><td></td><td>punctuation</td><td>PUNCT</td></tr><tr><td></td><td>subordinating conjunction</td><td>SCONJ</td></tr><tr><td></td><td>symbol</td><td>SYM</td></tr><tr><td></td><td>verb</td><td>VERB</td></tr><tr><td></td><td>other</td><td>X</td></tr><tr><td></td><td>article</td><td>DET</td></tr><tr><td></td><td>conjunction</td><td>PART</td></tr></table>
|
| 369 |
+
|
| 370 |
+
Table 2: Conversion between Greek, Modern (el) Wiktionary POS tags and Universal Dependencies POS tags.
|
| 371 |
+
|
| 372 |
+
# A.2 Hyperparameters
|
| 373 |
+
|
| 374 |
+
Here we provide a table of all of hyperparameters we considered for LEAQI and baselines models. (see section 4.4)
|
| 375 |
+
|
| 376 |
+
Table 3: Hyperparameters
|
| 377 |
+
|
| 378 |
+
<table><tr><td>Hyperparameter</td><td>Values Considered</td><td>Final Value</td></tr><tr><td>Policy Learning rate</td><td>10-3, 10-4, 10-5, 10-6, 5.5 · 10-6, 10-6</td><td>10-6</td></tr><tr><td>Difference Classifier Learning rate h</td><td>10-1, 10-2, 10-3, 10-4</td><td>10-2</td></tr><tr><td>Confidence parameter (b)</td><td>5.0 · 10-1, 10 · 10-1, 15 · 10-1</td><td>5.0 · 10-1</td></tr></table>
|
| 379 |
+
|
| 380 |
+
# A.3 Ablation Study Difference Classifier Learning Rate (see Figure 4)
|
| 381 |
+
|
| 382 |
+
# A.4 Ablation Study Confidence Parameter: $b$ (see Figure 5)
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
Figure 4: (top-row) English keyphrase extraction and (bottom-row) low-resource language part of speech tagging on Greek, Modern (el). We show the performance of using different learning for the difference classifier $h$ . These plots indicate that their is small difference in performance depending on the difference classifier learning rate.
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
|
| 395 |
+

|
| 396 |
+
|
| 397 |
+

|
| 398 |
+
|
| 399 |
+

|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
Figure 5: (top-row) English keyphrase extraction and (bottom-row) low-resource language part of speech tagging on Greek, Modern (el). We show the performance of using difference confidence parameters $b$ . These plots indicate that our model is robust to difference confidence parameters.
|
| 403 |
+
|
| 404 |
+

|
activeimitationlearningwithnoisyguidance/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b0236bc25c8668ea76bfe416229edab8f9eee4cb90284350a6d7fe9404b9cd6
|
| 3 |
+
size 939572
|
activeimitationlearningwithnoisyguidance/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d5b921f3737eabc66103ca5458c043365726adeea706a6512bafd41a26a85de
|
| 3 |
+
size 505696
|
activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:28a8212d394f9a369b80bcd2d95ec4a16ceaa2830b62329e4398ffbabec2adf6
|
| 3 |
+
size 70603
|
activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:25e8eb8d264408d84ddcaae63fda8a0578bdd910e1d0ff7e3ce334c1c604c6ac
|
| 3 |
+
size 81934
|
activelearningforcoreferenceresolutionusingdiscreteannotation/97c9749f-2ce8-4b80-9725-60e8e57f6363_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed6463d7b804195cd1e6f9b429af7b02a48c445810b7778c0293a6fb46c48189
|
| 3 |
+
size 925801
|
activelearningforcoreferenceresolutionusingdiscreteannotation/full.md
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Learning for Coreference Resolution using Discrete Annotation
|
| 2 |
+
|
| 3 |
+
Belinda Z. Li†* Gabriel Stanovsky\* Luke Zettlemoyer\*
|
| 4 |
+
\* University of Washington Allen Institute for AI Facebook belindali@fb.com {gabis,1sz}@cs.washington.edu
|
| 5 |
+
|
| 6 |
+
# Abstract
|
| 7 |
+
|
| 8 |
+
We improve upon pairwise annotation for active learning in coreference resolution, by asking annotators to identify mention antecedents if a presented mention pair is deemed not coreferent. This simple modification, when combined with a novel mention clustering algorithm for selecting which examples to label, is much more efficient in terms of the performance obtained per annotation budget. In experiments with existing benchmark coreference datasets, we show that the signal from this additional question leads to significant performance gains per human-annotation hour. Future work can use our annotation protocol to effectively develop coreference models for new domains. Our code is publicly available. $^1$
|
| 9 |
+
|
| 10 |
+
# 1 Introduction
|
| 11 |
+
|
| 12 |
+
Coreference resolution is the task of resolving anaphoric expressions to their antecedents (see Figure 1). It is often required in downstream applications such as question answering (Dasigi et al., 2019) or machine translation (Stanovsky et al., 2019). Exhaustively annotating coreference is an expensive process as it requires tracking coreference chains across long passages of text. In news stories, for example, important entities may be referenced many paragraphs after their introduction.
|
| 13 |
+
|
| 14 |
+
Active learning is a technique which aims to reduce costs by annotating samples which will be most beneficial for the learning process, rather than fully labeling a large fixed training set. Active learning consists of two components: (1) a task-specific learning algorithm, and (2) an iterative sample selection algorithm, which examines the performance of the model trained at the previous iteration and selects samples to add to the annotated
|
| 15 |
+
|
| 16 |
+
A volcano in Mexico, known to locals as Po-po, just started spewing molten rock. Are the two mentions coreferent? No What is the first appearance of the entity that the yellowhighlighted text refers to? A volcano in Mexico
|
| 17 |
+
|
| 18 |
+
Figure 1: Discrete annotation. The annotator is shown the document, a span (yellow), and the span's predicted antecedent (blue). In case the answer to the coreference question is negative (i.e., the spans are not coreferring), we present a follow-up question ("what is the first appearance of the entity?"), providing additional cost-effective signal. Our annotation interface can be seen in Figure 5 in the Appendix.
|
| 19 |
+
|
| 20 |
+
training set. This method has proven successful for various tasks in low-resource domains (Garrette and Baldridge, 2013; Kholghi et al., 2015; Syed et al., 2016, 2017).
|
| 21 |
+
|
| 22 |
+
Sachan et al. (2015) showed that active learning can be employed for the coreference resolution task. They used gold data to simulate pairwise human-annotations, where two entity mentions are annotated as either coreferring or not (see first question in Figure 1).
|
| 23 |
+
|
| 24 |
+
In this paper, we propose two improvements to active learning for coreference resolution. First, we introduce the notion of discrete annotation (Section 3), which augments pairwise annotation by introducing a simple additional question: if the user deems the two mentions non-coreferring, they are asked to mark the first occurrence of one of the mentions (see second question in Figure 1). We show that this simple addition has several positive implications. The feedback is relatively easy for annotators to give, and provides meaningful signal which dramatically reduces the number of annotations needed to fully label a document.
|
| 25 |
+
|
| 26 |
+
Second, we introduce mention clustering (Section 4). When selecting the next mention to label, we take into account aggregate model predictions
|
| 27 |
+
|
| 28 |
+
for all antecedents which belong to the same cluster. This avoids repeated labeling that would come with separately verifying every mention pair within the same cluster, as done in previous methods.
|
| 29 |
+
|
| 30 |
+
We conduct experiments across several sample selection algorithms using existing gold data for user labels and show that both of our contributions significantly improve performance on the CoNLL-2012 dataset (Pradhan et al., 2012). Overall, our active learning method presents a superior alternative to pairwise annotation for coreference resolution, achieving better performing models for a given annotation budget.
|
| 31 |
+
|
| 32 |
+
# 2 Background
|
| 33 |
+
|
| 34 |
+
Our work relies on two main components: a coreference resolution model and a sample selection algorithm.
|
| 35 |
+
|
| 36 |
+
Coreference resolution model We use the span ranking model introduced by Lee et al. (2017), and later implemented in AllenNLP framework (Gardner et al., 2018). This model computes span embeddings for all possible spans $i$ in a document, and uses them to compute a probability distribution $P(y = \mathrm{ant}(i))$ over the set of all candidate antecedents $\mathcal{Y}(i) = \{K \text{ previous mentions in the document}\} \cup \{\epsilon\}$ , where $\epsilon$ is a dummy antecedent signifying that span $i$ has no antecedent. This model does not require additional resources, such as syntactic dependencies or named entity recognition, and is thus well-suited for active learning scenarios for low-resource domains.
|
| 37 |
+
|
| 38 |
+
Sample selection algorithm Previous approaches for the annotation of coreference resolution have used mostly pairwise selection, where pairs of mentions are shown to a human annotator who marks whether they are co-referring (Gasperin, 2009; Laws et al., 2012; Zhao and Ng, 2014; Sachan et al., 2015). To incorporate these binary annotations into their clustering coreference model, Sachan et al. (2015) introduced the notion of must-link and cannot-link penalties, which we describe and extend in Section 4.
|
| 39 |
+
|
| 40 |
+
# 3 Discrete Annotation
|
| 41 |
+
|
| 42 |
+
In discrete annotation, as exemplified in Figure 1, we present the annotator with a document where the least certain span $i$ ("Po-po", in the example) and $i$ 's model-predicted antecedent, $A(i)$ ("locals"), are
|
| 43 |
+
|
| 44 |
+
highlighted. Similarly to pairwise annotation, annotators are first asked whether $i$ and $A(i)$ are coreferent. If they answer positively, we move on to the next sample. Otherwise, we deviate from pairwise sampling and ask the annotator to mark the antecedent for $i$ ("A volcano in Mexico") as the follow-up question. The annotator can abstain from answering the follow-up question in case $i$ is not a valid mention or if it does not have an antecedent in the document. See Figure 5 in the Appendix for more example annotations.
|
| 45 |
+
|
| 46 |
+
In Section 5, we show that discrete annotation is superior to the classic pairwise annotation in several aspects. First, it makes better use of human annotation time, as often an annotator needs to resolve the antecedent of the presented mention to answer the first question. For example, identifying that "Po-po" refers to the volcano, and not the locals. Second, we find that discrete annotation is a better fit for mention ranking models (Lee et al., 2017), which assign the most-likely antecedent to each mention, just as an annotator does in discrete annotation.
|
| 47 |
+
|
| 48 |
+
# 4 Mention Clustering
|
| 49 |
+
|
| 50 |
+
We experiment with three selection techniques by applying popular active learning selectors like entropy or query-by-committee (Settles, 2010) to clusters of spans. Because our model outputs antecedent probabilities and predictions, we would like to aggregate these outputs, such that we have only one probability per mention cluster rather than one per antecedent. We motivate this with an example: suppose span $i$ 's top two most likely antecedents are $y_{1}$ and $y_{2}$ . In scenario 1, $y_{1}$ and $y_{2}$ are predicted to be clustered together, and in scenario 2, they are predicted to be clustered apart. Span $i$ should have a "higher certainty" in scenario 1 (and thus be less likely to be picked by active learning), because its two most likely antecedents both imply the same clustering, whereas in scenario 2, picking $y_{1}$ vs. $y_{2}$ results in a different downstream clustering. Thus, rather than simply using the raw probability $i$ refers to a particular antecedents, we use the probability $i$ belongs to a certain cluster. This implies modelling $y_{1}$ and $y_{2}$ "jointly" in scenario 1, and separately in scenario 2.
|
| 51 |
+
|
| 52 |
+
Formally, we compute the probability that a span $i$ belongs in a cluster $C$ by summing $P(\operatorname{ant}(i) = y)$
|
| 53 |
+
|
| 54 |
+
for all $y$ that belong in some cluster $C$ , since $i$ having an antecedent in a cluster necessarily also implies $i$ is also in that cluster. This allows us to convert the predicted antecedent probabilities to in-cluster probabilities:
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
P (i \in C) = \sum_ {y \in C \cap \mathcal {Y} (i)} P (\operatorname {a n t} (i) = y) \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
Similarly, for query-by-committee, we aggregate predictions such that we have one vote per cluster rather than one vote per antecedent:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
V (i \in C) = \sum_ {y \in C \cap \mathcal {Y} (i)} V (A (i) = y) \tag {2}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $V(A(i) = y)\in \{0,1,\dots ,\mathcal{M}\}$ refers to the number of models that voted $y$ to be the antecedent of $i$ .
|
| 67 |
+
|
| 68 |
+
The cluster information $(y\in C\cap \mathcal{V}(i))$ we use in Equations 1 and 2 is computed from a combination of model-predicted labels and labels queried through active learning. Antecedents which were not predicted to be in clusters are treated as singleton clusters.
|
| 69 |
+
|
| 70 |
+
Additionally, to respect user annotations during the selection process, we must keep track of all prior annotations. To do this, we use the concept of must-link (ML; if two mentions are judged coreferent) and cannot-link (CL; if two mentions are judged non-coreferent) relations between mentions introduced by Sachan et al. (2015), and adapt it for our purposes. Specifically, in our discrete setting, we build the links as follows: if the user deems the pair coreferent, it is added to ML. Otherwise, it is added to CL, while the user-corrected pair (from the second question) is always added to ML.
|
| 71 |
+
|
| 72 |
+
In addition, we use these links to guide how we select for the next mention to query. For example, if a CL relation exists between spans $m_{1}$ and $m_{2}$ , we will be less likely to query for $m_{1}$ , since we are slightly more certain on what $m_{1}$ 's antecedent should be (not $m_{2}$ ). Formally, we revise probabilities and votes $P(i \in C)$ and $V(i \in C)$ in accordance to our link relations, which affects the selector uncertainty scores. $^{3}$
|
| 73 |
+
|
| 74 |
+
Finally, following (Sachan et al., 2015), we impose transitivity constraints, which allow us to model links beyond what has been explicitly
|
| 75 |
+
|
| 76 |
+
pointed out during annotation:
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
M L \left(m _ {i}, m _ {j}\right) \wedge M L \left(m _ {j}, m _ {k}\right)\rightarrow M L \left(m _ {i}, m _ {k}\right) \tag {3}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
C L \left(m _ {i}, m _ {j}\right) \wedge M L \left(m _ {i}, m _ {k}\right)\rightarrow C L \left(m _ {j}, m _ {k}\right) \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
However, recomputing these closures after each active learning iteration can be extremely inefficient. Instead, we build up the closure incrementally by adding only the minimum number of necessary links to maintain the closure every time a new link is added.
|
| 87 |
+
|
| 88 |
+
We experiment with the following clustered selection techniques:
|
| 89 |
+
|
| 90 |
+
Clustered entropy We compute entropy over cluster probabilities and select the mention with the highest clustered entropy:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
E (i) = - \sum_ {C \in \text {a l l c l u s t e r s}} P (i \in C) \cdot \log P (i \in C) \tag {5}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
Where $P(i \in C)$ is defined as in Equation 1.
|
| 97 |
+
|
| 98 |
+
Clustered query-by-committee We train $\mathcal{M}$ models (with different random seeds) and select the mention with the highest cluster vote entropy:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\mathrm {V E} (i) = - \sum_ {C \in \text {a l l c l u s t e r s})} \frac {V (i \in C)}{\mathcal {M}} \cdot \log \frac {V (i \in C)}{\mathcal {M}} \tag {6}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
Using votes counted over clusters, as defined in Equation 2.
|
| 105 |
+
|
| 106 |
+
Least coreferent clustered mentions / Most coreferent unclustered mentions (LCC/MCU) We aim to select a subset of spans for which the model was least confident in its prediction. For each span $i$ which was assigned a cluster $C_i$ , we compute a score $s_C(i) = P(i \in C_i)$ , and choose $n$ spans with the smallest $s_C(i)$ . For each singleton $j$ , we give an "unclustered" score $s_U(i) = \max_{C \in \text{all clusters}} P(j \in C)$ and choose $m$ spans with the largest $s_U(i)$ . $P(i \in C_i)$ and $P(j \in C)$ are computed with Equation 1.
|
| 107 |
+
|
| 108 |
+
# 5 Evaluation
|
| 109 |
+
|
| 110 |
+
We compare discrete versus pairwise annotation using the English CoNLL-2012 coreference dataset (Pradhan et al., 2012). Following Sachan et al. (2015), we conduct experiments where user judgments are simulated from gold labels.
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
Figure 2: Comparing various selectors for discrete versus pairwise annotation (dashed orange line).
|
| 114 |
+
|
| 115 |
+
<table><tr><td>Set</td><td># labels/doc</td><td>Active learning
|
| 116 |
+
iteration</td><td># docs</td><td># ?s</td></tr><tr><td rowspan="4">A</td><td>20</td><td>1st (retrained 0x)</td><td>5</td><td>15</td></tr><tr><td>20</td><td>7th (retrained 6x)</td><td>5</td><td>15</td></tr><tr><td>200</td><td>2nd (retrained 1x)</td><td>5</td><td>15</td></tr><tr><td>200</td><td>8th (retrained 7x)</td><td>5</td><td>15</td></tr><tr><td rowspan="4">B</td><td>20</td><td>2nd (retrained 1x)</td><td>5</td><td>15</td></tr><tr><td>20</td><td>8th (retrained 7x)</td><td>5</td><td>15</td></tr><tr><td>200</td><td>1st (retrained 0x)</td><td>5</td><td>15</td></tr><tr><td>200</td><td>7th (retrained 6x)</td><td>5</td><td>15</td></tr></table>
|
| 117 |
+
|
| 118 |
+
Annotation time estimation To compare annotation times between pairwise and discrete questions, we collected eight 30-minute sessions from 7 in-house annotators with background in NLP. Annotators were asked to answer as many instances as they could during those 30 minutes. We additionally asked 1 annotator to annotate only discrete questions for 30 minutes. To be as representative as possible, the active learning queries for these experiments were sampled from various stages of active learning (see Table 1). On average, an annotator completed about 67 questions in a single session, half of which were answered negatively, requiring the additional discrete question. Overall, these estimates rely on 826 annotated answers. Our annotation interface is publicly available, $^{4}$ see examples in Figure 5 in the Appendix.
|
| 119 |
+
|
| 120 |
+
Timing results are shown in Table 2. Answering
|
| 121 |
+
|
| 122 |
+
Table 1: Timing experiments sampling. For each of the 2 datasets, we collected 60 total active learning questions from 20 documents. We collected 5 documents and 15 questions for each of the 4 categories: trained with many/few labels per document, and early/late in active learning process. The 15 questions were sampled randomly from within an iteration.
|
| 123 |
+
|
| 124 |
+
<table><tr><td></td><td>Avg. Time per ?</td></tr><tr><td>Initial question</td><td>15.96s</td></tr><tr><td>Follow-up question</td><td>15.57s</td></tr><tr><td>ONLY Follow-up questions</td><td>28.01s</td></tr></table>
|
| 125 |
+
|
| 126 |
+
Table 2: Average annotation time for the initial pairwise question, the discrete followup question, and the discrete question on its own.
|
| 127 |
+
|
| 128 |
+
the discrete question after the initial pairwise question takes about the same time as answering the first question (about $16s$ ). Furthermore, answering only discrete questions took $28.01s$ per question, which confirmed that having an initial pairwise question indeed saves annotator time if answered positively.
|
| 129 |
+
|
| 130 |
+
In the following experiments, we use these measurements to calibrate pairwise and discrete followup questions when computing total annotation times.
|
| 131 |
+
|
| 132 |
+
Baselines We implement a baseline for pairwise annotation with entropy selector. We also implement two discrete annotation baselines with random selection. The partially-labelled baseline follows the standard active learning training loop, but selects the next mention to label at random. The fully-labelled baseline creates a subset of the training data by taking as input an annotation time $t$ and selecting at random a set of documents that the user can fully label in $t$ hours using ONLY discrete annotation. By comparing the fully-labelled baseline against our active learning results, we can determine whether active learning is effective over labelling documents exhaustively.
|
| 133 |
+
|
| 134 |
+
Hyperparameters We use the model hyperparameters from the AllenNLP implementation of Lee et al. (2017). We train up to 20 epochs with a patience of 2 before adding labels. After all documents have been added, we retrain from scratch. We use a query-by-committee of $\mathcal{M} = 3$ models, due to memory constraints. For LCC/MCU, given $L$ annotations per document, we split the annotations equally between clusters and singletons.
|
| 135 |
+
|
| 136 |
+
Results Figure 2 plots the performance of discrete annotation with the various selectors from Section 4, against the performance of pairwise annotation, calibrated according to our timing experiments. In all figures, we report MUC, B3, and CEAFe as an averaged F1 score.
|
| 137 |
+
|
| 138 |
+
The three non-random active learning frameworks outperform the fully-labelled baseline, show
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Figure 3: Mention detection accuracy (in document-micro F1) for pairwise versus discrete selection per human annotation time.
|
| 142 |
+
|
| 143 |
+
ing that active learning is more effective for coreference resolution when annotation budget is limited.
|
| 144 |
+
|
| 145 |
+
Most notably, Figure 2 shows that every nonrandom discrete selection protocol outperforms pairwise annotation. Where the gap in performance is the largest ( $>15$ minutes per document), we consistently improve by $\sim 4\%$ absolute $F1$ over pairwise selection.
|
| 146 |
+
|
| 147 |
+
# 6 Analysis
|
| 148 |
+
|
| 149 |
+
A major reason for discrete annotations outperforms the pairwise baseline. The number of pairwise annotations needed to fully label a document is much larger than the number of discrete annotations. In an average development document with 201 candidates per mention, the number of pairwise queries needed to fully label a document is 15,050, while the maximum number of discrete queries is only 201 (i.e., asking for the antecedent of every mention). Thus, the average document can be fully annotated via discrete annotation in only $2.6\%$ of the time it takes to fully label it with pairwise annotation, suggesting that our framework is also a viable exhaustive annotation scheme.
|
| 150 |
+
|
| 151 |
+
Further analysis shows that the improvement in discrete selection stems in part from better use of annotation time for mention detection accuracy (Figure 3) and pronoun resolution (Figure 4), in which we measure performance only on clusters with pronouns, as identified automatically by the spaCy tagger (Honnibal and Montani, 2017).
|
| 152 |
+
|
| 153 |
+
Finally, Table 3 shows ablations on our discrete annotation framework, showing the contribution of each component of our paradigm.
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Figure 4: Pronoun resolution accuracy (average F1) for pairwise versus discrete selection per human annotation time.
|
| 157 |
+
|
| 158 |
+
<table><tr><td></td><td>F1 score</td></tr><tr><td>Discrete annotation</td><td>57.08</td></tr><tr><td>—clustered probabilities</td><td>56.49</td></tr><tr><td>—incremental link closures</td><td>56.98</td></tr><tr><td>Pairwise annotation</td><td>54.27</td></tr></table>
|
| 159 |
+
|
| 160 |
+
Table 3: Ablations over the different model elements, at a single point ( $\sim$ 315 annotation hours). Entropy selector was used for all experiments.
|
| 161 |
+
|
| 162 |
+
# 7 Discussion and Conclusion
|
| 163 |
+
|
| 164 |
+
We presented discrete annotation, an attractive alternative to pairwise annotation in active learning of coreference resolution in low-resource domains. By adding a simple question to the annotation interface, we obtained significantly better models per human-annotation hour. In addition, we introduced a clustering technique which further optimizes sample selection during the annotation process. More broadly, our work suggests that improvements in annotation interfaces can elicit responses which are more efficient in terms of the obtained performance versus the invested annotation time.
|
| 165 |
+
|
| 166 |
+
# Acknowledgements
|
| 167 |
+
|
| 168 |
+
We would like to thank Christopher Clark, Terra Blevins, and the anonymous reviewers for their helpful feedback, and Aaron Jaech, Mason Kamb, Madian Khabsa, Kaushal Mangipudi, Nayeon Lee, and Anisha Uppugonduri for their participation in our timing experiments.
|
| 169 |
+
|
| 170 |
+
# References
|
| 171 |
+
|
| 172 |
+
Pradeep Dasigi, Nelson F. Liu, Ana Marasovic, Noah A. Smith, and Matt Gardner. 2019. Quoref: A reading comprehension dataset with questions requiring coreferential reasoning. Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP).
|
| 173 |
+
Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew E. Peters, Michael Schmitz, and Luke S. Zettlemoyer. 2018. Allennlp: A deep semantic natural language processing platform. CoRR, abs/1803.07640.
|
| 174 |
+
Dan Garrette and Jason Baldridge. 2013. Learning a part-of-speech tagger from two hours of annotation. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 138-147, Atlanta, Georgia. Association for Computational Linguistics.
|
| 175 |
+
Caroline Gasperin. 2009. Active learning for anaphora resolution. In Proceedings of the NAACL HLT 2009 Workshop on Active Learning for Natural Language Processing, HLT '09, pages 1-8, Stroudsburg, PA, USA. Association for Computational Linguistics.
|
| 176 |
+
Matthew Honnibal and Ines Montani. 2017. spacy 2: Natural language understanding with bloom embeddings, convolutional neural networks and incremental parsing. To appear.
|
| 177 |
+
Mahnoosh Kholghi, Laurianne Sitbon, Guido Zuccon, and Anthony Nguyen. 2015. Active learning: a step towards automating medical concept extraction. Journal of the American Medical Informatics Association, 23(2):289-296.
|
| 178 |
+
Florian Laws, Florian Heimerl, and Hinrich Schütze. 2012. Active learning for coreference resolution. In Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 508-512, Montréal, Canada. Association for Computational Linguistics.
|
| 179 |
+
Kenton Lee, Luheng He, Mike Lewis, and Luke S. Zettlemoyer. 2017. End-to-end neural coreference resolution. ArXiv, abs/1707.07045.
|
| 180 |
+
Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. Conll-2012 shared task: Modeling multilingual unrestricted coreference in onthonotes. In Joint Conference on EMNLP and CoNLL-Shared Task, pages 1-40. Association for Computational Linguistics.
|
| 181 |
+
Mrinmaya Sachan, Eduard Hovy, and Eric P. Xing. 2015. An active learning approach to coreference resolution. In Proceedings of the 24th International Conference on Artificial Intelligence, IJ-CAI'15, pages 1312-1318. AAAI Press.
|
| 182 |
+
|
| 183 |
+
Burr Settles. 2010. Active learning literature survey. University of Wisconsin, Madison, 52(55-66):11.
|
| 184 |
+
Gabriel Stanovsky, Noah A. Smith, and Luke Zettlemoyer. 2019. Evaluating gender bias in machine translation. In ACL, page (to appear), Florence, Italy. Association for Computational Linguistics.
|
| 185 |
+
A. R. Syed, A. Rosenberg, and E. Kislal. 2016. Supervised and unsupervised active learning for automatic speech recognition of low-resource languages. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5320-5324.
|
| 186 |
+
A. R. Syed, A. Rosenberg, and M. Mandel. 2017. Active learning for low-resource speech recognition: Impact of selection size and language modeling data. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5315-5319.
|
| 187 |
+
Shanheng Zhao and Hwee Tou Ng. 2014. Domain adaptation with active learning for coreference resolution. In Proceedings of the 5th International Workshop on Health Text Mining and Information Analysis (Louhi), pages 21-29, Gothenburg, Sweden. Association for Computational Linguistics.
|
| 188 |
+
|
| 189 |
+
# A Appendix
|
| 190 |
+
|
| 191 |
+
# A.1 Timing Experiment Details and Computations.
|
| 192 |
+
|
| 193 |
+
In order to properly calibrate the results from discrete and pairwise querying, we conducted experiments (eight 30-minute sessions) to time how long annotators take to answer discrete and pairwise questions. See Figure 5 for the interface we designed for our experiments.
|
| 194 |
+
|
| 195 |
+
The questions we ask for the experiment are all sampled from real queries from full runs of our active learning simulations. To obtain representative times, we sampled a diverse selection of active learning questions—at various stages of active learning (first iteration before retraining vs. after retraining $n$ times) and various numbers of annotation per document (20 vs. 200). For each document, we randomly selected between 1-5 questions (of the total 20 or 200) to ask the annotator. Full details on how we sampled our queries can be found in Table 1. Note that we divided our samples into two datasets. We ran four 30-minute sessions with Dataset A before Dataset B and four 30-minute sessions with Dataset B before Dataset A—for a total of eight 30-minute sessions across 7 annotators (1 annotator completed a 1-hour session).
|
| 196 |
+
|
| 197 |
+
Since pairwise annotation is the same as answering only the initial question under the discrete setting, we run a single discrete experiment for each annotation session and use the time taken to answer an initial question as a proxy for pairwise annotation time. Our results show that answering the initial question took an average of $15.96s$ whereas answering the follow-up question took $15.57s$ . Thus, we derive the following formulas to compute the time it takes for pairwise and discrete annotation:
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
t = 1 5. 9 6 p \tag {7}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
t = 1 5. 9 6 d _ {c} + 1 5. 5 7 d _ {n c} \tag {8}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
where $p = \#$ of pairwise instances. $d_{c}, d_{nc} = \#$ of discrete instances for which the initial pair was "coreferent" $(d_{c})$ and "not coreferent" $(d_{nc})$ , respectively. We also compute the number of pairwise examples $p$ we can query in the same time it takes to query $d_{c} + d_{nc}$ discrete examples:
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
1 5. 9 6 p = 1 5. 9 6 d _ {c} + 1 5. 5 7 d _ {n c}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
$$
|
| 214 |
+
p = d _ {c} + 0. 9 7 6 d _ {n c} \tag {9}
|
| 215 |
+
$$
|
| 216 |
+
|
| 217 |
+
Moreover, we additionally conduct a single 30-minute experiment to determine how long it takes
|
| 218 |
+
|
| 219 |
+
to answer only discrete questions (without the initial pairwise step). We find that it takes $28.01s$ per question under the only-discrete setting. This is longer than the time it takes to answer a pairwise question, thus confirming that having an initial pairwise question indeed saves time if the pair is coreferent. Moreover, this also shows that answering the initial pairwise question significantly helps with answering the follow-up discrete question.
|
| 220 |
+
|
| 221 |
+
# A.2 Additional Model Adaptations
|
| 222 |
+
|
| 223 |
+
Adapting Link Relations for our Model We use must-link and cannot-link relations between mentions to guide our active learning selector. We revise probabilities and model outputs (from which the model computes uncertainty scores for entropy, QBC, and LCC/MCU) in accordance to the following rules:
|
| 224 |
+
|
| 225 |
+
1. Clustered entropy. For every $CL(a, b)$ relationship, we set $P(\mathrm{ant}(a) = b) = 0$ and re-normalize probabilities of all other candidate antecedents. This decreases the probability that the active learning selector chooses $a$ . Moreover, for every $ML(a, b)$ relationship, we set $P(\mathrm{ant}(a) = b) = 1$ and $P(\mathrm{ant}(a) = c) = 0$ for all $c \neq b$ . If there are multiple $ML$ relationships involving $a$ , we choose only one of $a$ 's antecedent to set to 1 (to maintain the integrity of the probability distribution). This guarantees that the active learning selector will never select $a$ , as any ML link out of $a$ means we have already queried for $a$ .
|
| 226 |
+
2. Clustered query-by-committee. To ensure we do not choose a mention we have already queried for, after each user judgment, for every $ML(a,b)$ relation, we set $V(A(a) = b) = \mathcal{M}$ , and $V(A(a) = c) = 0$ for all other $c \neq b$ . Moreover, for every $CL(a,b)$ relation, we set $V(A(a) = b) = 0$ , which decreases the vote entropy of $a$ , making it less likely for the selector to choose $a$ .
|
| 227 |
+
3. LCC/MCU. We revise the probabilities in the same way as in clustered entropy and add the constraint that, when choosing MCU spans $j$ , we disregard those that already have probability 1 (signifying that we have already queried for them).
|
| 228 |
+
|
| 229 |
+
Incremental Closures Algorithm We introduce an algorithm to compute link closures incrementally. Instead of re-computing and re-adding the
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Figure 5: Timing experiments interface. Top: The initial pairwise question. Bottom: The user is presented with the discrete question when they click "No". They are asked to select the appropriate tokens in the text representing the first occurrence of the yellow entity in the text.
|
| 233 |
+
|
| 234 |
+
entire set of closures (based on a set of all prior human annotations that we keep track of) each time we query for a new mention, we add the minimum set of necessary links. See Algorithm 1.
|
| 235 |
+
|
| 236 |
+
To determine how much time our incremental closure algorithm saves over recomputing closures from scratch, we simulated annotations on a single document with 1600 mentions, and recorded how long it took to re-compute the closure after each annotation. Our experiments show that recomputing from scratch takes progressively longer as more labels get added: at 1600 labels, our incremental algorithm is 556 times faster than recomputing from scratch (1630ms vs. 2.93ms).
|
| 237 |
+
|
| 238 |
+
Figure 6 plots the runtime of our incremental closure algorithm ("incremental closure") against the run-time of recomputing closures from scratch ("closure") using Equations 3 and 4. In the latter case, we keep track of the set of user-added edges which we update after each annotation, and re-compute the closures from that set.
|
| 239 |
+
|
| 240 |
+
# A.3 Additional Analysis
|
| 241 |
+
|
| 242 |
+
Computing the time to fully-label a document under discrete and pairwise annotation. First, we compute the maximum number of pairwise questions we can ask. We consider the setup of Lee et al. (2017)'s model. This model considers only spans with highest mention scores (the "top spans"), and only considers at most $K$ antecedents per top span. Thus, for a document with $m$ top spans, we can ask up to
|
| 243 |
+
|
| 244 |
+
$$
|
| 245 |
+
\frac {K (K - 1)}{2} + (m - K) K \tag {10}
|
| 246 |
+
$$
|
| 247 |
+
|
| 248 |
+
pairwise questions. The first factor $\frac{K(K - 1)}{2}$ comes from considering the first $K$ spans in the document. For each of these spans $i = 1\cdots K$ , we can ask about the first $i - 1$ spans. The second factor $(m - K)K$ comes from considering the spans after the $K$ -th span. For each of these $m - K$ spans in the document, we can only consider up to $K$ antecedents. Using statistics for the average document $(m = 201)$ and the standard hyper-parameter settings $(K = 100)$ , we plug into Equation 10 to
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 6: Under each closure algorithm, the time to compute the closure after the next annotation is added, as # of existing annotations increases.
|
| 252 |
+
|
| 253 |
+
get 15,050 overall pairwise questions needed to fully label a document (in worst-case). Meanwhile, the maximum number of discrete questions we can ask is only 201 (i.e., asking for the antecedent of every mention). Using timing Equations 7 and 8, we compute that it takes at most $6337.53s$ to answer 201 discrete questions in the worst-case scenario, and $240198s$ to answer 15050 pairwise questions. Thus, in the worst-case scenario for both discrete and pairwise selection, discrete selection will take only $2.64\%$ of the time it takes pairwise selection to fully label a document.
|
| 254 |
+
|
| 255 |
+
Quantifying "Information Gain" from Discrete and Pairwise Annotation. Let $\overline{D_U}$ be the set of training documents we are annotating for in a given round of active learning. To better quantify how much information discrete and pairwise annotation can supply in same amount of time, we define $\Delta F1$ as the change in the $F1$ score on $\overline{D_U}$ , before and after model predictions are supplemented with user annotation.
|
| 256 |
+
|
| 257 |
+
Figure 7 shows average $\Delta F1$ as annotation time increases for discrete and pairwise annotation. Across the 10 annotation times we recorded, discrete annotation results in an average $\Delta F1$ that more than twice that of pairwise, in the same annotation time.
|
| 258 |
+
|
| 259 |
+
# A.4 Hyperparameters
|
| 260 |
+
|
| 261 |
+
Model. We preserve the hyperparameters from the AllenNLP implementation of Lee et al. (2017)'s model. The AllenNLP implementation mostly maintains the original hyperparameters, except it sets the maximum number of antecedents considered to $K = 100$ , and excludes speaker features
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
Figure 7: Comparing F1 score improvement on $\overline{D_U}$ for discrete vs. pairwise annotation.
|
| 265 |
+
|
| 266 |
+
and variational dropout, due to machine memory limitations.
|
| 267 |
+
|
| 268 |
+
Training. We use a 700/2102 fully-labelled/unlabelled initial split of the training data, and actively label 280 documents at a time. We train to convergence each round. Before all documents have been added, we train up to 20 epochs with a patience of 2 before we add more training documents. After all documents have been added, we retrain from scratch and use the original training hyperparameters from Lee et al. (2017).
|
| 269 |
+
|
| 270 |
+
**Operators.** For query-by-committee, we use a committee of $\mathcal{M} = 3$ models. We were not able to experiment with more due to memory constraints.
|
| 271 |
+
|
| 272 |
+
For LCC/MCU, given $L$ annotations per document, we allocate $n$ annotations to least-coreferent clustered mentions and the remaining $m$ to most-coreferent unclustered mentions. We use $n = \min(L/2, \text{number of clustered spans})$ , and $m = \min(L - n, \text{number of un-clustered spans})$ .
|
| 273 |
+
|
| 274 |
+
# A.5 Active Learning Training Setup Full Details
|
| 275 |
+
|
| 276 |
+
In our active learning setup, we begin by training our model on a 700-document subset of the full training set. We discard the labels of the remaining 2102 documents. In each round of active learning, we choose 280 unlabelled documents, and query up to $Q$ annotations per document. We then add these documents to the labelled set and continue training our model on this set (now with new documents). After all documents have been labelled, we retrain our model on the full document set from scratch, resetting all model and trainer parameters.
|
| 277 |
+
|
| 278 |
+
In Algorithm 2, we show our main training loop for active learning using discrete selection. This is the training loop we use for our clustered entropy and LCC/MCU selectors, and our partially-labelled random baseline. In Algorithm 3, we modify that loop for the clustered query-by-committee selector.
|
| 279 |
+
|
| 280 |
+
In Algorithm 1, we show our incremental closures algorithm, which builds up the transitive closure incrementally by adding only the minimum number of necessary links to maintain the closure each time a new link is added.
|
| 281 |
+
|
| 282 |
+
# Algorithm 1: Incremental Link Closures Algorithm
|
| 283 |
+
|
| 284 |
+
Let $(a,b) =$ link pair being added, $A = a$ 's old cluster before the pair is added, $B = b$ 's old cluster before the pair is added, $\overline{A} =$ set of element $a$ has a CL relationship to before the pair is added, $\overline{B} =$ set of elements $b$ has a CL relationship to before the pair is added.
|
| 285 |
+
|
| 286 |
+
1. If pair $(a, b)$ was added to must-link, both must-link and cannot-link needs to be updated.
|
| 287 |
+
|
| 288 |
+
First, resolve the MLs by adding a ML relationship between every element in $A$ and every element in $B$ :
|
| 289 |
+
|
| 290 |
+
$$
|
| 291 |
+
\forall a ^ {\prime}, b ^ {\prime} \quad (M L (a, a ^ {\prime}) \wedge M L (b, b ^ {\prime})) \rightarrow (M L (a, b ^ {\prime}) \wedge M L (a ^ {\prime}, b) \wedge M L (a ^ {\prime}, b ^ {\prime}))
|
| 292 |
+
$$
|
| 293 |
+
|
| 294 |
+
Next, resolve the CLs by adding a CL relationship between every element of $A$ and $\overline{B}$ , and every element of $B$ and $\overline{A}$ :
|
| 295 |
+
|
| 296 |
+
$$
|
| 297 |
+
\forall a ^ {\prime}, \hat {b} \quad (M L (a, a ^ {\prime}) \wedge C L (\hat {b}, \hat {b})) \rightarrow (C L (a, \hat {b}) \wedge C L (a ^ {\prime}, \hat {b}))
|
| 298 |
+
$$
|
| 299 |
+
|
| 300 |
+
$$
|
| 301 |
+
\forall b ^ {\prime}, \hat {a} \quad (M L (b, b ^ {\prime}) \wedge C L (a, \hat {a})) \rightarrow (C L (b, \hat {a}) \wedge C L (b ^ {\prime}, \hat {a}))
|
| 302 |
+
$$
|
| 303 |
+
|
| 304 |
+
2. If pair $(a, b)$ was added to cannot-link, only cannot-link needs to be updated. Add a CL relationship between every element of $A$ and every element of $B$ :
|
| 305 |
+
|
| 306 |
+
$$
|
| 307 |
+
\forall a ^ {\prime}, b ^ {\prime} \quad (M L (a, a ^ {\prime}) \wedge M L (b, b ^ {\prime})) \rightarrow (C L (a, b ^ {\prime}) \wedge C L (a ^ {\prime}, b) \wedge C L (a ^ {\prime}, b ^ {\prime}))
|
| 308 |
+
$$
|
| 309 |
+
|
| 310 |
+
# Algorithm 2: Training loop for active learning
|
| 311 |
+
|
| 312 |
+
$D_F = \{\text{fully-labelled docs}\}$ , $D_U = \{\text{unlabelled docs}\}$ , $D_A = \{\text{docs labelled through active learning}\}$ , $M = \text{model}$ , $ML = \text{must-link pairs}$ , $CL = \text{cannot-link pairs}$ ;
|
| 313 |
+
|
| 314 |
+
Init: $D_F = \{\text{first 700 docs}\}$ , $D_U = \{\text{remaining docs}\}$ , $D_A = \emptyset$ , $ML = CL = \emptyset$ ;
|
| 315 |
+
|
| 316 |
+
while $D_U$ is not empty do
|
| 317 |
+
|
| 318 |
+
train $M$ to convergence on data $D_F\cup D_A$ $\overline{D_U} = 280$ -document subset of $D_U$ ..
|
| 319 |
+
for $D\in \overline{D_U}$ do
|
| 320 |
+
$\mathcal{P}_D,\mathcal{L}_D,\mathcal{C}_D = \mathrm{run}M$ on $D$ . $\mathcal{P}_D =$ model-outputted probabilities $= \{P(y = \mathrm{ant}(i))|y\in \mathcal{V}(i),i\in$ top_spans(D)} $\mathcal{L}_D =$ model-outputted antecedent labels $= \{(i,A(i))|i\in$ top_spans(D)} $\mathcal{C}_D =$ model-outputted clusters from $\mathcal{L}_D$
|
| 321 |
+
while num_queried $<$ num_to_query do
|
| 322 |
+
$m =$ choose next-mention-to-query $(\mathcal{P}_D,\mathcal{C}_D)$ [Section 4]
|
| 323 |
+
$a = \max_{y\in \mathcal{Y}(m)\backslash \epsilon}P(y = \mathrm{ant}(m))$ if user deems m and a coreferent then $ML = ML\cup (a,m);$ $\mathcal{L}_D = \mathcal{L}_D\cup (a,m);$ Add $(a,m)$ to $\mathcal{C}_D$ .
|
| 324 |
+
else $\hat{a} =$ user-selected antecedent for m; $CL = CL\cup (a,m);ML = ML\cup (\hat{a},m);$ $\mathcal{L}_D = (\mathcal{L}_D\backslash (a,m))\cup (\hat{a},m);$ Remove $(a,m)$ and add $(\hat{a},m)$ to $\mathcal{C}_D$ .
|
| 325 |
+
end
|
| 326 |
+
$ML,CL =$ compute-link-closures; [Algorithms 1]
|
| 327 |
+
$\mathcal{P}_D =$ update-based-on-links(ML,CL); [Section A.2]]
|
| 328 |
+
end
|
| 329 |
+
Label $D$ with $\mathcal{C}_D$ ..
|
| 330 |
+
end
|
| 331 |
+
$D_A = D_A\cup \overline{D_U};D_U = D_U\backslash \overline{D_U};$
|
| 332 |
+
|
| 333 |
+
end
|
| 334 |
+
|
| 335 |
+
# Algorithm 3: Training loop for active learning with QBC selector (Differences from Algorithm 2 are highlighted)
|
| 336 |
+
|
| 337 |
+
```latex
|
| 338 |
+
$D_F = \{$ fully-labelled docs\}, $D_U = \{$ unlabelled docs\}, $D_A = \{$ docs labelled through active learning\}, $\widehat{M} =$ ensemble model of submodels $\{M_1,\dots ,M_M\}$ , $ML =$ must-link pairs, $CL =$ cannot-link pairs; Init: $D_{F} = \{$ first 700 docs\}, $D_U = \{$ remaining docs\}, $D_A = \emptyset$ , $ML = CL = \emptyset$ ;
|
| 339 |
+
while $D_U$ is not empty do
|
| 340 |
+
train all $M_1,\dots ,M_M$ to convergence on data $D_F\cup D_A$ $\overline{D_U} = 280$ document subset of $D_U$
|
| 341 |
+
for $D\in \overline{D_U}$ do
|
| 342 |
+
$\{\mathcal{P}_{D,i}\} ,\{\mathcal{L}_{D,i}\} ,\mathcal{P}_D,\mathcal{L}_D,\mathcal{C}_D = \text{run}\widehat{M}$ on $D$ $\mathcal{P}_{D,i} =$ submodel $i$ 's output probabilities
|
| 343 |
+
$\mathcal{L}_{D,i} =$ submodel $i$ 's output antecedent labels
|
| 344 |
+
$\mathcal{P}_D =$ ensembled (averaged) output probabilities from each submodel
|
| 345 |
+
$\mathcal{L}_D =$ ensembled antecedent labels computed from $\mathcal{P}_D$ $\mathcal{C}_D =$ ensembled clusters computed from $\mathcal{L}_D$
|
| 346 |
+
while num_queried < num_to_query do
|
| 347 |
+
$m =$ choose next-mention-to-query( $\{\mathcal{L}_{D,i}\}, \mathcal{C}_D$ ); [[Section 4]]
|
| 348 |
+
$a = \max_{y\in \mathcal{Y}(m)\backslash \epsilon}P(y = \operatorname {ant}(m))$ ; [[Probabilities from $\mathcal{P}_D$ ]]
|
| 349 |
+
if user deems m and a coreferent then
|
| 350 |
+
$ML = ML\cup (a,m)$ ;
|
| 351 |
+
Add $(a,m)$ to $\mathcal{C}_D$
|
| 352 |
+
else
|
| 353 |
+
$\hat{a} =$ user-selected antecedent for m;
|
| 354 |
+
$CL = CL\cup (a,m)$ ; $ML = ML\cup (\hat{a},m)$ ; Remove $(a,m)$ and add $(\hat{a},m)$ to $\mathcal{C}_D$
|
| 355 |
+
end
|
| 356 |
+
$ML,CL =$ compute-link-closures $(ML,CL)$ ; [[Algorithm 1]]
|
| 357 |
+
$\mathcal{L}_{D,i} =$ update-based-on-links $(ML,CL)$ ; [[Section A.2]]
|
| 358 |
+
end
|
| 359 |
+
Label $D$ with $\mathcal{C}_D$
|
| 360 |
+
end
|
| 361 |
+
$DA = DA\cup \overline{DU}; D_U = D_U\backslash \overline{DU}$ ;
|
| 362 |
+
```
|
activelearningforcoreferenceresolutionusingdiscreteannotation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50033a4d1e9ea7ca50cf5cd0e36260fd80d4f49512d608b76b38decb4c75b8a4
|
| 3 |
+
size 420120
|
activelearningforcoreferenceresolutionusingdiscreteannotation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10043a32cdb25a2044f180a902a1189034471ad47a6dab1b8060a64b1ab016ae
|
| 3 |
+
size 478114
|
adaptivecompressionofwordembeddings/616ae366-0520-4bac-92fe-f6a591154b5a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a5c5a6482da404988e983742b673d5bc6a036385efed5654cfcedd6c85c9846
|
| 3 |
+
size 67877
|
adaptivecompressionofwordembeddings/616ae366-0520-4bac-92fe-f6a591154b5a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc7e92eccb5d1d68c94ad5a43485e20d6778c1683b459937fc11e6cfbb49cb4c
|
| 3 |
+
size 83192
|