| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:27:28.273647Z" |
| }, |
| "title": "Computational Linguistics & Chinese Language Processing Aims and Scope", |
| "authors": [ |
| { |
| "first": "\u5ed6\u5bb6\u8abc", |
| "middle": [ |
| "\uf02a" |
| ], |
| "last": "\u3001\u6797\u4e9e\u5ba3\u3001\u6797\u51a0\u6210\u3001\u5f35\u5bb6\u744b", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jia-Yi", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Ya-Hsuan", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kuan-Cheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jia-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jiaweichang.gary@gmail.com" |
| }, |
| { |
| "first": "Ching-Wen", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Hsuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jheng-Long", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jlwu@gm.scu.edu.tw" |
| }, |
| { |
| "first": "Chao-Chun", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ccliang@iis.sinica.edu.tw" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Michigan", |
| "location": { |
| "addrLine": "Ann Arbor", |
| "region": "Michigan", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Meng-Tse", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Hsin-Min", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Keh-Yih", |
| "middle": [], |
| "last": "Su", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "kysu@iis.sinica.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Emotion is an important attribute in music information retrieval. Deep learning methods have been widely used in the automatic recognition of music emotion. Most of the studies focus on the audio data, the role of lyrics in music emotion classification remains under-appreciated. Due to the richness of English language resources, most previous studies were based on English lyrics but rarely in Chinese. This study proposes an approach without specific training for the Chinese lyrics", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Emotion is an important attribute in music information retrieval. Deep learning methods have been widely used in the automatic recognition of music emotion. Most of the studies focus on the audio data, the role of lyrics in music emotion classification remains under-appreciated. Due to the richness of English language resources, most previous studies were based on English lyrics but rarely in Chinese. This study proposes an approach without specific training for the Chinese lyrics", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "\u97f3\u6a02\u548c\u4eba\u985e\u60c5\u7dd2\u76f8\u4e92\u5f71\u97ff\uff0c\u5728\u751f\u6d3b\u4e2d\u626e\u6f14\u4e0d\u53ef\u6216\u7f3a\u7684\u89d2\u8272\u3002\u97f3\u6a02\u7684\u641c\u5c0b\u901a\u5e38\u4ee5\u6b4c\u66f2\u6a19\u984c\u3001", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7dd2\u8ad6 (Introduction)", |
| "sec_num": "1." |
| }, |
| { |
| "text": "\u904e\u53bb\u6587\u672c\u60c5\u7dd2\u5206\u6790\u662f\u4f7f\u7528\u57fa\u65bc\u7d71\u8a08\u7684\u8a5e\u888b\u6a21\u578b\u548c\u975c\u614b\u7279\u5fb5\u7684\u8a5e\u5411\u91cf\u6a21\u578b\u5c07\u6587\u672c\u8f49\u70ba\u5411\u91cf \u7279\u5fb5 (Barry, 2017; Han et al., 2013) \uff0c\u4f46\u9019\u4e9b\u65b9\u6cd5\u6703\u9047\u5230\u7121\u6cd5\u89e3\u8b80\u591a\u7fa9\u8a5e\u7684\u74f6\u9838\u3002\u6b4c\u8a5e\u88ab\u8996 \u70ba\u662f\u6558\u4e8b\u800c\u975e\u5f7c\u6b64\u7368\u7acb\u7684\u53e5\u5b50\uff0c\u9700\u8981\u6355\u6349\u4e0a\u4e0b\u6587\u7684\u4f9d\u8cf4\u95dc\u4fc2\uff0c\u5728\u6b4c\u8a5e\u7684\u97f3\u6a02\u60c5\u7dd2\u5206\u985e\u4efb \u52d9\u4e0a\uff0c\u82e5\u57fa\u65bc\u50b3\u7d71\u8a5e\u5178\u9032\u884c\u6548\u679c\u6709\u9650 (Hu & Downie, 2010; Hu et al., 2009) \uff0cAbdillah et al. (2020) \u904b \u7528 \u80fd \u6355 \u6349 \u6642 \u5e8f\u95dc \u4fc2 \u7684 \u96d9 \u5411\u9577 \u77ed \u671f \u8a18 \u61b6 (Long Short-Term Memory \uff0c LSTM)\u5c0d MoodyLyrics \u8cc7\u6599\u96c6(\u00c7ano & Morisio, 2017b)\u9032\u884c\u6b4c\u8a5e\u7684\u60c5\u7dd2\u5206\u985e\uff0c\u4f46\u905e\u6b78\u67b6\u69cb\u96e3\u4ee5\u5177\u5099 \u5e73\u884c\u904b\u7b97\u7684\u80fd\u529b\u3002Transformer (Vaswani et al., 2017) \u5247\u6539\u8b8a\u904e\u53bb\u5e8f\u5217\u7db2\u8def\u7684\u505a\u6cd5\uff0c\u81ea\u6ce8\u610f \u529b\u6a5f\u5236\u85c9\u7531 Scaled dot-product Attention \u8b93\u8cc7\u6599\u5f97\u4ee5\u5e73\u884c\u904b\u7b97\uff0c \u8003\u616e\u8a5e\u5728\u4e0d\u540c\u7a7a\u9593\u6620\u5c04 \u7684\u91cd\u8981\u6027\uff0c\u5141\u8a31 BERT (Devlin et al., 2018) Agrawal, Y., Shanker, R. G. R., & Alluri, V. (2021). Transformer-based approach towards music emotion recognition from lyrics. arXiv preprint arXiv:2101.02051. Barry, J. (2017) . Sentiment Analysis of Online Reviews Using Bag-of-Words and LSTM Approaches. In AICS, 272-274. Chen, S., Ma, K., & Zheng, Y. (2019) . Med3d: Transfer learning for 3d medical image analysis. arXiv preprint arXiv:1904.00625.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 55, |
| "text": "(Barry, 2017;", |
| "ref_id": null |
| }, |
| { |
| "start": 56, |
| "end": 73, |
| "text": "Han et al., 2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 155, |
| "end": 174, |
| "text": "(Hu & Downie, 2010;", |
| "ref_id": null |
| }, |
| { |
| "start": 175, |
| "end": 191, |
| "text": "Hu et al., 2009)", |
| "ref_id": null |
| }, |
| { |
| "start": 359, |
| "end": 381, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 469, |
| "end": 490, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": null |
| }, |
| { |
| "start": 651, |
| "end": 667, |
| "text": "Barry, J. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 765, |
| "end": 801, |
| "text": "Chen, S., Ma, K., & Zheng, Y. (2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u7684\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7d50\u679c\uff0c\u5982\u8868 6 \u6240 \u793a\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6a21\u578b\u5728\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6e96\u78ba\u5ea6\u70ba 0.71\uff0c\u6a19\u7c64 Q1 \u548c Q4 \u7684 F1- score \u8f03\u4f4e\uff0c\u5206\u5225\u70ba 0.69 \u548c 0.51\uff0c\u800c Q2 \u548c Q3 \u7684 F1-score \u8f03\u9ad8\uff0c\u5206\u5225\u70ba 0.83 \u548c 0.72\u3002\u672a \u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6a21\u578b\u5728\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6e96\u78ba\u5ea6\u70ba 0.50\uff0c\u540c\u6a23\u662f\u6a19\u7c64 Q1 \u548c Q4 \u7684 F1- score \u8f03\u4f4e\uff0c\u5206\u5225\u70ba 0.41 \u548c 0.29\uff0c\u800c Q2 \u548c Q3 \u7684 F1-score \u8f03\u9ad8\uff0c\u5206\u5225\u70ba 0.64 \u548c 0.55\u3002\u6bd4 \u8f03\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\u4e2d\u6bcf\u4e00\u500b\u60c5\u7dd2\u6a19\u7c64\u7684\u5206 \u985e\u7d50\u679c\u90fd\u512a\u65bc\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\uff0c\u53ef\u5f97\u77e5\u5230\u5728\u8a13\u7df4\u968e\u6bb5 CVAT \u6a21\u578b\u5b78\u7fd2\u6548\u679c\u8f03\u4f73\u7684\u6a21 \u578b \uff0c \u61c9 \u7528 \u5728 \u6b4c \u8a5e \u7684 \u60c5 \u7dd2 \u5206 \u985e \u4e5f \u80fd \u5f97 \u5230 \u8f03 \u4f73 \u7684 \u7d50 \u679c \uff0c \u8868 \u793a \u7d93 \u9077 \u79fb \u5b78 \u7fd2 \u7684 \u6a21 \u578b \u5728 CVAW+CVAP \u8cc7\u6599\u96c6\u4e2d\u6240\u5b78\u7fd2\u5230\u7684\u4e2d\u6587\u60c5\u7dd2\u7279\u5fb5\uff0c\u6709\u52a9\u65bc\u63d0\u5347\u6a21\u578b\u5728\u6b4c\u8a5e\u6587\u672c\u7684\u60c5\u7dd2\u8fa8 \u8b58\u80fd\u529b\u3002 \u8868 5. \u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u4e4b\u6df7\u80b4\u77e9\u9663\uff1a\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 [", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Devlin, J., Chang, M. W., Lee, K., & Toutanova, K. (2018) . Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 57, |
| "text": "Lee, K., & Toutanova, K. (2018)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Fawaz, H. I., Forestier, G., Weber, J., Idoumghar, L., & Muller, P. A. (2018) . Transfer learning for time series classification. arXiv e-prints, arXiv:1811 .01533.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 77, |
| "text": "Idoumghar, L., & Muller, P. A. (2018)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Han, B. J., Rho, S., Dannenberg, R. B., & Hwang, E. (2009) ", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 58, |
| "text": "Rho, S., Dannenberg, R. B., & Hwang, E. (2009)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Due to the rapid rise of new media, streaming platforms and video providers have increased. According to one report, 68% of people prefer watching a video rather than reading a long product manual to acquire information. People change the way of their entertainment even daily habitual. No one wants to be tied to a TV schedule, so people nowadays favor subscribing to streaming video services, such as Netflix or YouTube, to enjoy watching videos anytime and anywhere. Also, mobile phone viewers or smartphone viewers have increased astonishingly. YouTube reports that mobile video consumption is rising with an impressive rate of 100 percent every year. The large amount of data captured by video platforms provides insights for video streaming apps, and video stream services make recommendations based on audience's viewing profiles. Because of High-speed internet connectivity, more and more people have been allowed to become YouTubers and create large volumes of high-quality videos. YouTube has 16 million active users in Taiwan monthly, and nearly 93% of users have visited YouTube. It seems that YouTube has played an increasingly important role in modern life and entertainment. Therefore, we aim to analyze audience's habitual preferences on consuming information and entertainment on YouTube.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "According to the audience's watching records, YouTube can create customized recommended content, which means consumers' interests have been collected and analyzed by YouTube. On the contrary, YouTubers, who upload videos to the YouTube platform, also want to check their videos' performance. YouTube has provided several analysis functions such as average view duration, browsing history, variance in audience's demographics, Etc for YouTubers to check their channel's performance. However, it lacks sentiment analysis on the audience's comments. It is verified that public views, comments, and attitudes towards many events can be analyzed through social media (Heredia et al., 2016) . Public reviews on Amazon were used to evaluate users' opinions and determine the audience's preference by classifying opinions into negative, positive, and neutral (Bhatt et al., 2015) . Therefore, we deduced that YouTube could also serve as a sentiment analysis platform because it provides an increasing number of comments.", |
| "cite_spans": [ |
| { |
| "start": 662, |
| "end": 684, |
| "text": "(Heredia et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 851, |
| "end": 871, |
| "text": "(Bhatt et al., 2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "We utilized comments to monitor YouTube viewers' emotions in the previous task by designing three sentiment indicators, YouTuber preference, video preferences, and excitement level. In this task, we are not changing sentiment indicators but aim to optimize the result of sentiment detection, hoping to get higher overall accuracy to analyze audience's feelings. We not only use comments to monitor emotions, but we also consider characteristics in YouTube channels as an additional feature. Before restarting the experiment, we trained YouTubers' correlation and established YouTuber embeddings, a critical vector in determining what characteristic YouTubers shared between each other. Also, the similarity between different channels can be calculated by placing similar YouTubers close together in the embedding space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The social sentiment is excellent in providing a better understanding of how their audience perceives the YouTuber channel or brand. In general, sentiment analysis focuses on determining positive, negative, or neutral emotions (Cunha et al., 2019) . Therefore, before this task, we also conducted some experiments that used YouTube comments to identify users' positive, negative, or neutral emotions and how strong those emotions are. Unlike previous tasks, we change our method in the experiment stage. We combine comments and use our established YouTuber word embedding. Not only to capture emotions behind everything social viewers but also to measure YouTubers intimately by translating YouTubers' features into a relatively low-dimensional space. The analyzing result may help video loaders who want to identify their viewers' depth of feeling and provide a chance for YouTubers to engage with their viewers directly.", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 247, |
| "text": "(Cunha et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Improving Sentiment Classification of YouTube Comments", |
| "sec_num": null |
| }, |
| { |
| "text": "By modifying the structure of models that contain pre-trained YouTuber word embeddings as part of the sentence input, we expect a better model's performance than the previous tasks, not containing pre-trained YouTuber word embedding. Anticipate that YouTuber word embedding can provide additional information when analyzing sentiment tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Improving Sentiment Classification of YouTube Comments", |
| "sec_num": null |
| }, |
| { |
| "text": "Various models deal with text-based sentiment classification tasks. Machine learning-based models are used to address the text classification task (Zhang & Zheng, 2016) . Other deep learning models have been used for sentiment analysis and obtained acceptable performances (Hassan & Mahmood, 2017) . Recently, it has refreshed the best performance of using pretrained language models, such as Bidirectional Encoder Representations from the Transformers (BERT) because its pre-trained method has captured linguistic structure from learning and detecting different tasks. Sun et al. (2019) have explored BERT pre-trained structure to deal with classification task and achieve excellent performance through the way of fine-tuning in the downstream tasks. In our previous task, we also fine-tuning BERT model to detect a multidimensional aspect of the audience's comments. Although the experiment results outperformed machine learning-based classifiers and even had similar outcomes in the deep learning-based classifier, it may lack task-specific knowledge and domain-related knowledge to further improve the BERT model's performance. Considering viewers may present different passion intensities through many kinds of channels they watch, so we take channels' information, which means the types and features of YouTubers, into consideration. For example, YouTubers who always share ironic videos, their viewers may reflect stronger emotions than educational videos. Peters et al. (2018) realized that word representations are key component in many neural language understanding models, so they introduced a new type of word representations which can deal with syntax and semantics. However, our way of dealing with complex characteristics of word use is adding YouTubers' information into each comment. We proposed pre-trained YouTuber embeddings to fully present domain-related knowledge in YouTube, so we can confirm whether characteristic of YouTubers can improve models' comprehension. Specifically, we concatenate the original sentence embedding and YouTuber embeddings which serve as additional features when analyzing comments' emotion tendency.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 168, |
| "text": "(Zhang & Zheng, 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 273, |
| "end": 297, |
| "text": "(Hassan & Mahmood, 2017)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1464, |
| "end": 1484, |
| "text": "Peters et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Compared with the limited dataset for training a relatedness between terms, more researchers have focused on using a pre-trained word embedding to understand semantic relatedness and similarity between terms in recent years. Zhu et al. (2017) show that increasing the size of datasets can identify more relations of biomedical terms even though it does not guarantee models' better precision. As a result, because of the small size of dataset, researcher often have to use pre-trained word embeddings to better capture meaningful vectors. Rezaeinia et al. (2017) have increased the accuracy on sentiment analysis research by using pre-trained word embeddings. Their method is experience different word representation methods, such as Part-of-Speech (POS) tagging techniques, lexicon-based approaches, and Word2Vec/Glove methods to compare their effectiveness.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 242, |
| "text": "Zhu et al. (2017)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 539, |
| "end": 562, |
| "text": "Rezaeinia et al. (2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Recently word embeddings methods have been widely applied in downstream models. Aydo\u011fan & Karci (2020) used Word2Vec method on a large corpus of approximately 11 billion words to train word vectors, then applied to deep neural networks. The result did show that embedding method affected the rate of accuracy. Another research used pre-trained word embedding as a critical component for its downstream models. (Miyato et al., 2017) . Cited from the above experiences, initially, we decided to utilize word vectors from the 2021 Wikipedia Chinese corpus to represent YouTuber similarity because of the large size of corpuses. However, we only focus on capturing the strong connection between each YouTuber and extracting characteristic behind YouTubers. According to our selected 25 YouTuber's channels, we select comments beneath each channel latest ten videos. Then, we filter these substantial comments by checking whether comments involve different YouTubers' names. Comments that up to standard are remained to train YouTubers embeddings. To compare whether the sentiment detection tasks can perform better by adding generating exact vectors, we propose a novel method, concatenating comments with YouTubers embeddings, to apply on classifiers.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 102, |
| "text": "Aydo\u011fan & Karci (2020)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 410, |
| "end": 431, |
| "text": "(Miyato et al., 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Usherwood & Smit (2019) focus on comparing BERT and top classical machine learning approaches on a trinary sentiment classification task. Their task aims to verify whether BERT can perform state-of-the-art result when one only has 100-1000 labelled examples per class. As the result, BERT outperformed top classical machine learning algorithms even when training with 100 examples per class. Another research shows the superiority of BERT and support to use BERT as a default technique in NLP problems (Gonz\u00e1lez-Carvajal & Garrido-Merch\u00e1n 2020). With similar task, we apply our own generated word vectors and go on the previous algorithms to determine if these approaches may represent the better result or even both BERT and machine learning-based methods are valid options. Figure 1 shows the proposed method for sentiment analysis and classification processes. Firstly, we collected the audience's comments from the YouTube platform and subsequently labeled these comments according to our designed three sentiment indicators. Data preprocessing works include transferring emojis to texts and establishing a YouTube-based dictionary for tokenization. Next, all comments are converted into vectors, and YouTuber embedding is prepared to concatenate in the proper layer according to models. Finally, by the experiment stage, we evaluate the performance of each classifier in three detection tasks and discuss a comparative study. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 776, |
| "end": 784, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "To cover the diversity of YouTube channels, we generated our dataset by selecting different types of YouTube channels. The composition of the selected videos' film creation types with game 1%, education 4%, DIY with 4%, science and technology with 5%, comedy 9%, entertainment with 28%, and blog with 49%. Through these 25 selected channels, we then filter five videos from each channel that have been highly popular or controversial since 2019 because people imminently show their interest in new tread and debatable topics. Therefore, the data source contains a total of 125 videos. In this way, we collected more controversial and polarizing comments, and it becomes easier for annotators to determine the sentimental tendency of comments. However, to avoid different accumulated numbers of comments in each video, we randomly remain 100 pieces of comments from each video. Thus, a total of 12500 pieces of comments is taken into consideration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comment Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "YouTube has provided a discussion function for audiences to express their opinion by clicking like or dislike bottom under the videos. However, positive or negative sentiment classifications cannot explain why the audience does not like the videos and what reason keeps the audience subscribing to a specific channel. There is no noticeable analysis of likes and dislikes opinion, so we design three indicators, YouTube preference, video preference, and excitement level, to investigate different aspects of the audience's comment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Definition of Sentiment Indicators", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "YouTuber preference: In the indicator of YouTuber preference, comments can roughly divide into non-relative and relative towards YouTubers. Excluding non-relative comments, the rest comments that talk about YouTubers' names or affairs can continue to dig into positive, negative, and neutral attitudes according to their comments' content.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "Video preference: The indicator categories are the same as YouTuber preference. Nonrelative, unlike, neutral, and like are four categories used to judge Video preference. For example, comments that do not talk about video content will be labeled as non-relative comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "Excitement level: This indicator is designed into five categories, from barely excited to hyper excited. We classify the audience's speaking tone from no emotion to extreme emotion state step by step. In addition, we consider emojis a judgment in this indicator because people tend to use emojis as their comments. For example, the second level of Excited level means the audience can speak confidently and contain two types of emojis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "The main drawback of using own data sources is having to label our dataset. Therefore, the main objective is to address semantic comprehension gaps between annotators. We introduce some guidelines to properly annotate our comments. For example, watching videos before annotation is required because it might resonate powerfully with the audience's opinions. During the annotation process, we eliminate some non-relative comments, such as advertisements, comments that not using Mandarin, comments that post links to external web pages, and merely timestamps in the comments, to optimize the availability of the dataset. In the last part, we use the majority decision to filter out inconsistent labels unless each comment annotation is marked as the same point.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Indicator Labeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In Table 1 , we use three methods to calculate agreement scores after labeling comments, which include Krippendorff's Alpha, Fleiss's Kappa, and Cronbach's Alpha. With", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 1", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Indicator Labeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Krippendorff's Alpha method, due to the reason that values smaller than 0.667 represent as discard data, so our three indicators are shown not up to the standard. Fleiss's Kappa method stands for fair and moderate data because values between 0.21 to 0.6 are considered acceptable levels. Cronbach's Alpha method evaluates three indicators as outstanding labeling work because a value higher than 0.7 may show annotation agreement, let alone we get 0.9 on Excitement level. Therefore, two of the methods were qualified as acceptance results, and thus we provide an adequately labeled dataset to train and assess a given model. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Improving Sentiment Classification of YouTube Comments", |
| "sec_num": null |
| }, |
| { |
| "text": "We consider emojis as part of emotional expressions. The first step of text processing is to transfer emojis to text, so dealing with rich emojis is our priority. We transfer emojis to text by the package called \"emojiswitch.\" Then we establish a user-defined dictionary to recognize specific words, such as YouTubers' names and the texts transferred from emojis. In this way, we go through word tokenization, and thus now we can accurately determine unique objects from a user-defined dictionary. After these two parts, we go through word tokenization using the current state-of-the-art word tokenization tool created by the Chinese Knowledge and Information Processing (CKIP) Group. This tool is available for dealing with tokenization in Mandarin. In previous task, after completing all these above steps, we can start model training and evaluating.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Preprocessing", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "There are various sentiment analysis techniques, but recently, word embeddings have been widely used in sentiment classification tasks. Word2Vec and GloVe are among the most accurate and usable word embedding methods to convert words into meaningful vectors. Therefore, we trained YouTubers embedding, a dense vector representation of words that capture something about their meaning, to present meaningful vectors to understand the relationship between YouTubers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training YouTuber Embedding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "To have the best results when using the generated embeddings, we selected ten newly released videos, due to October 2021, from 25 YouTube channels that are the primary data source in this task. The comments' contents are selected based on having YouTubers' names, whether lead actors/actresses or supporting actors/actresses. A total of 175,000 pieces of comments remains and applied to train YouTuber correlations. As a result, we present a YouTuber embedding dictionary that stores YouTubers' names and their corresponding 300dimensional vector. This step aims to retrieve information about the audience's perceptions of different YouTubers because YouTubers' attitudes or behavior can stand for the character of the channel. In this way, the similarity between YouTubers has been predicted and presented in a low-dimensional vector. After training YouTuber embedding, we can use this vectorial representation to replace the YouTuber variable and obtain the corresponding vector from each comment. For example, we use each comment as a key to finding which YouTuber's channel is, and the YouTuber information can continue to map with its 300-dimensional vector. The next step is to apply this embedding; the input may be comment vectors after an additional YouTuber embedding to automatically train on classification models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training YouTuber Embedding", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We propose a BERT-based model via constructing an additional embedding layer before calculating the probability distributions over categorical labels. In the beginning, we did not change the input; we sum the position embeddings, word embeddings, and segmentation embeddings for each token. Then we add YouTuber embeddings to each sequence after extracting the hidden state vector. Finally, using a SoftMax classifier to determine over categorical labels. Figure 2 shows the modified structure of BERT model. We only used comments to detect audience's emotions and did not change the structure of pretrained BERT model in the previous task. This time, we still remain comments and incorporate YouTube domain knowledge by adding YouTuber embedding to detect emotion variance more precisely.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 456, |
| "end": 464, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Classifiers", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "Besides the BERT-based model, machine learning-based models: RandomForest, Xgboost, and SVM, are also used as a classifier to deal with dimensional sentiment analysis tasks. We transform comments into numerical vectors using TF-IDF, greatly improving the more basic methods like word counts in text analysis with machine learning. TF-IDF gives us a way to associate each word in a document with a number that represents how relevant each word is in that document. In the previous task, the TF-IDF score was fed to algorithms. However, we add a 300-dimensional vector, which stands for Youtubers' information, after retrieving the TF-IDF score of each comment at this time. Simply put, each comment may find their corresponded YouTubes' channel at first. Then, each channel can be mapped with our pre-trained YouTube word embeddings. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Classifiers", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "Sentiment analysis is a fast-growing area and one of the well-known tasks of research in natural language processing (NLP) and text classifications. To better capture wide emotion variance on the audience's comment, we use three sentiment indicators and five modified models to train classifiers and analyze five targets, T1 to T5 in this task. The following elaborates the meaning of five tasks for our experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Tasks", |
| "sec_num": "3.7" |
| }, |
| { |
| "text": "T1: Whether comments are related to YouTubers is a binary classification task. The data sources are generated from the result of the indicator, YouTubers preferences. By rearranging the category of the labeled datasets, we merge the annotation result of unlike, neutral, and like comments into related comments. In contrast, non-related comments remain to be. This classification task is aims to discover the motivation behind watching videos. If comments are talking about YouTubers' affairs, audience might pay attention to YouTubers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "T2: Audience's sentiment towards YouTubers is an extended issue from an indicator of YouTuber preference. Exclude non-relative comments; we extract unlike, neutral, and like comments from the annotation result. Like to dislike can serve as an indicator for YouTubers to check the followers of his or her channel. Also, YouTubers can know what attractive they own or what causes them to make a nuisance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "T3: Whether comments are related to videos, also be rearranged from the indicator, video preference. We duplicate the same techniques for whether comments are related to YouTubers but present a completely different meaning. This task may explain whether the contents of the video arouse discussion or become no interest to the audience. If the topic interested to the audience, it may show more relative comments towards video.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "T4: Audience's sentiment towards videos excludes non-relative comments from the indicator, video preference; The rest of the comments can deal with the audience's sentiment towards video. Even if watching the same channel, the different themes will captivate and engage different audiences. Therefore, this task may help YouTubers understand their audience's preferences within a specific channel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "T5: Corresponding to the indicator of excitement level, T5 aims to analyze the audience's emotional ups and downs from barely excited to hyper excited, which can firmly confirm the degree of support from different audiences and affirm the audience's attitude towards specific issues.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\uf0b7", |
| "sec_num": null |
| }, |
| { |
| "text": "Moving to the composition of annotated comments according to three indicators. We applied three indicators to five analysis tasks, so comments have also been rearranged into five datasets. When analyzing the target, whether comments are related to Youtubers, the proportion of the non-relative comments to the relative comments is three to one. It presented that audiences prefer talking about video content rather than YouTubers' affairs. At the same time, it comes out that the most significant piece of comments was labeled as like in audience's sentiment towards YouTubers, which is extracted from the above relative comments. This composition made sense because if people do not like someone, they may not notice their condition, even watching their channel. Next, relative comments in whether comments are related to video account for the majority in the task, and 60 percent of comments with a neutral attitude talked about the video's content. This proportion presented that the audience does not frequently present animosity on the YouTube platform within our selected channels. The fifth analyzing task, emotional ups and downs, revealed that the audience could express their health and happiness by commenting. The following table shows the proportion of data to our five tasks. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "This section presents multiple models in Table 4 that we experiment with. Except for BERT models that we followed it pre-trained parameters, other models have experimented with different parameters. We configure the best parameters on each model through experiments and then apply them to analyze different aspects of sentiment tasks. Also, we use 5-fold crossvalidation to ensure the performance for all models. By fixedly setting k=5 to our dataset, 80% of data will be randomly selected for training and 20% for testing in each fold. In M1, M2, M3, M4, we set the number of epochs as 10 through the entire training dataset to make sure that the BERT model can have enough time to learn the pattern from social comments. After conducting experiments, we evaluate and interpret the performances of different models through the suitable metrics used for classification problems: overall accuracy. The results of social sentiment analysis are shown in the next section.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 41, |
| "end": 48, |
| "text": "Table 4", |
| "ref_id": "TABREF18" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Design", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Ching-Wen Hsu et al. Figure 3 and Figure 4 are the result of predicting the target, whether comments are related to YouTubers. The result shows that adding YouTuber embedding machine learning-based classifiers can better detect relative or non-relative comments towards YouTubers. On the contrary, after adding YouTuber embedding, the BERT model does not show better performances in the prediction result. We can also notice that M6 performed the worst in the previous task. However, it improved to become M9 and serve as the best classifier in the end. Figure 6 show the result of the audience's sentiment towards YouTubers. The data in this detection task is comment about YouTubers' affairs, so we expected that adding YouTuber embedding after each comments can increase overall accuracy and F1-score. Machine learning-based classifiers proved the same result with our exception. The models 'performances have at least increased 7% in overall accuracy and 8% in F1-score. However, BERT, the variance seen from M2 to M4 surprisingly decrease. Figure 8 are the result of predicting the target, whether comments are related to videos. We notice that overall accuracy in all models is upscale to nearly 90%. However, the improvement in F1-score is limited, only increasing smaller than 3% or even regressing in BERT method when adding YouTuber embedding. We deduce the small amount of increment or even getting worse because YouTubers' information has little relationship with determining relative or non-relative comments towards videos. Although data in this detection task is comments that discuss video content, the experiment result show that machine learning-based methods improved the predicted result after adding YouTuber embedding. In comparison, M4 and M4 do less well than before, decreasing from 5% to 10% and becoming the worst classifier. Figure 12 shows the result of predicting the audience's emotional ups and downs from their leaving comments. Compared with adding YouTuber embedding and without YouTuber embedding, the former method can improve model performance in machine learningbased methods. We deduce that the improvement may result from different types of YouTubers having different audiences. The more controversial YouTuber, the more excitement level may show in their audience's comments. For example, a YouTuber who prefers talking about political issues may vary their audience emotional variance than educational channels. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 29, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 34, |
| "end": 42, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 554, |
| "end": 562, |
| "text": "Figure 6", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 1045, |
| "end": 1053, |
| "text": "Figure 8", |
| "ref_id": null |
| }, |
| { |
| "start": 1853, |
| "end": 1862, |
| "text": "Figure 12", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "28", |
| "sec_num": null |
| }, |
| { |
| "text": "In summary, there are three findings after we conducted experiments (1) Within machine learning-based models, the experiment results validate that adding YouTuber embedding is an effective way to identify audiences' emotions and depth of feeling. Also, we notice that YouTuber embedding is significantly helpful when detecting audience sentiment towards YouTubers. This result explains that we successfully trained YouTuber word embedding by using many comments with YouTubers' or guests' names who are invited on YouTuber's channel. 2We notice that BERT neither improves the prediction score nor goes backward, a nearly ten percent decrease when predicting T1, T2, and T3. However, when predicting T5, two kinds of BERT (M3 and M4) do not regress their performance but remain top ranking. This result explains that BERT's model construction is more suitable for addressing multidimensional classification tasks. (3) Except for BERT models that performance well in determining audience's emotional ups and downs, BERT cannot perfectly deal with the polarity classificational tasks after adding YouTubers embedding. We also discover two characters that social media users own on the YouTube streaming platform. People prefer to discuss videos' content rather than YouTubes' affairs. In addition, people do not frequently present animosity in their comments; most people present their comments as neutral or barely excited attitudes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "This paper focuses on improving the over-all accuracy and F1-score on dimensional sentiment classification task. This time, we combine comments with YouTuber embeddings to train on the all classifiers. In machine learning-based classifiers, we use TF-IDF as sentence vectors and concatenate YouTuber Embedding in the last layer to fit in RandomForest, Xgboost, and SVM. On the contrary, we add YouTuber embeddings to the hidden state vector of BERT model. After that, we compare the above experiments' result with the previous tasks that only utilize comments as our data sources. Although BERT does not present a better prediction score on sentiment polarity problems, it perfectly deals with a muti-dimensional problem, the task of predicting the audience's excitement level. This result proves the superiority of BERT by achieving at least 10 % more in overall accuracy and F1-score than other classifiers. In comparison to the traditional machine learning classifiers, we identify that although machine learning models cannot perform as well as BERT before adding YouTuber embeddings, the performances of the machine learning-based classifiers can be dramatically improved after our proposed method which concatenating comments text with trained YouTubers embeddings to these classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Analyzing the public's perception of YouTubers and the influence of their videos is a challenging task for researchers so far. Much work has been done in this paper, but it still has a long way to overcome some problems. In this research, we have emphasized the following Improving Sentiment Classification of YouTube Comments problems in order to make our results improve. In the future, we could explore more information on YouTube, such as combining videos' cover photo as features, to optimize multipledimensional sentiment analysis tasks. In this way, even if imbalanced dataset, models may identify feature represented on the picture and capture different aspects of information that cannot present in context only. In addition, with the recent emergence of deep learning, an increasing number of researchers have started to use deep neural networks to deal with sentiment analysis, we may explore deep leering techniques to automated detect the audience's preference on social media. Last but not least, others indicators, such as whether the comments contain an ironic statement or whether the comments contain an erotic statement, can be added for analyzing other aspects of the audience's comments. The latter proposed indicator may serve as a guard for children's users, and the former indicator may prevent YouTubers from getting into conflict with their fans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Sun, C., Qiu, X., Xu, Y. & Huang, X. (2019) (Wang, 2005; Srinivasan et al., 2006) \u3001\u7406\u60f3\u6bd4\u4f8b\u906e\u7f69 (ideal ratio mask, IRM) (Srinivasan et al., 2006) ", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 43, |
| "text": "Qiu, X., Xu, Y. & Huang, X. (2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 44, |
| "end": 56, |
| "text": "(Wang, 2005;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 57, |
| "end": 81, |
| "text": "Srinivasan et al., 2006)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 114, |
| "end": 139, |
| "text": "(Srinivasan et al., 2006)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "(\u5373\u52a0\u6b0a\u904b\u7b97)\u3002\u4e8c\u8005\u5404\u64c5\u52dd\u5834\uff0c\u4f46\u8fd1\u5e74\u4f86\u4f3c\u4e4e\u662f\u4ee5\u906e\u7f69\u5f0f\u7684\u8a9e\u97f3\u5f37\u5316\u66f4\u53d7\u91cd\u8996\u8207 \u767c \u5c55 \uff0c \u76f8 \u95dc \u7684 \u6f14 \u7b97 \u6cd5 \u5305 \u62ec \u4e86 \u7406 \u60f3 \u4e8c \u5143 \u906e \u7f69 (ideal binary mask, IBM)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u5728\u672c\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u9078\u64c7\u52a0\u4ee5\u7814\u7a76\u6539\u9032\u7684\u662f\u7406\u60f3\u6bd4\u4f8b\u906e\u7f69(ideal ratio mask, IRM)\u6cd5\uff0c\u6b64\u6cd5\u901a \u5e38\u662f\u6c42\u53d6\u8a9e\u97f3\u4e4b\u4e00\u822c\u6642\u983b\u5716 (spectrogram) \u6216\u8033\u8778\u6642\u983b\u5716 (cochleagram) \u5c0d\u61c9\u7684\u7406\u60f3\u906e \u7f69\u503c\uff1a , | , | | , | | , | ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "X \u22ef (2) \u5176\u5c3a\u5bf8\u70ba \u3002 \u6b65\u9a5f\u4e8c\uff1a\u4e0a\u8ff0\u4e4b\u7279\u5fb5\u77e9\u9663X\u7684\u4efb\u4e00\u7b2c \u500b\u6a6b\u5217\u5411\u91cf X , X , . . . X ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Conclusion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "In recent years, speech synthesis system can generate speech with high speech quality. However, multi-speaker text-to-speech (TTS) system still require large amount of speech data for each target speaker. In this study, we would like to construct a multi-speaker TTS system by incorporating two sub modules into artificial neural network-based speech synthesis system to alleviate this problem. First module is to add the speaker embedding into encoding module of the end-toend TTS framework while using small amount of the speech data of the training speakers. For speaker embedding method, in our study, two speaker embedding methods, namely speaker verification embedding and voice conversion embedding, are compared for deciding which one is suitable for the personalized TTS system. Besides, we substituted the conventional post-net module, which is conventionally adopted to enhance the output spectrum sequence, to a post-filter network, which is further improving the speech quality of the generated speech utterance. Finally, experiment results showed that the speaker embedding is useful by adding it into encoding module and the resultant speech utterance indeed perceived as the target speaker. Also, the post-filter network not only improving the speech quality and also enhancing the speaker similarity of the generated speech utterances. The constructed TTS system can generate a speech utterance of the target speaker in fewer than 2 seconds. In the future, other feature such as prosody information will be incorporated to help the TTS framework to improve the performance. Table \u4f86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u5c07\u6bd4\u8f03\u5206\u5225\u4f7f\u7528\u8a9e \u97f3\u8f49\u63db\u548c\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u7a2e\u4efb\u52d9\u6240\u8a2d\u8a08\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4f5c\u70ba\u6211\u5011 TTS \u7cfb\u7d71\u4e2d\u8a9e\u8005\u7684\u8868\u793a\u65b9 \u5f0f\uff0c\u4e26\u6bd4\u8f03\u4f55\u8005\u5c0d\u65bc\u6211\u5011\u63d0\u51fa\u7684\u67b6\u69cb\u66f4\u5408\u9069\u3002 \u6211\u5011\u7684 TTS \u67b6\u69cb\u662f\u57fa\u65bc Google \u6240\u63d0\u51fa\u7684\u81ea\u56de\u6b78\u6a21\u578b Tacotron 2 (Shen et al., 2018) \uff0c \u5b83\u7531\u4e09\u500b\u795e\u7d93\u7db2\u8def\u5340\u584a\u7d44\u6210\uff0c\u6bcf\u500b\u5340\u584a\u90fd\u6709\u660e\u78ba\u7684\u76ee\u7684\u4ee5\u4fbf\u6211\u5011\u9032\u884c\u6539\u52d5\uff1a Tacotron 2 \u6574\u9ad4\u67b6\u69cb\u5c0d\u65bc\u76ee\u524d\u795e\u7d93\u7db2\u8def\u7684\u6280\u8853\u4f86\u8aaa\u662f\u76f8\u5c0d\u820a\u7684\uff0c\u96a8\u8457 Self-Attention (Vaswani et al., 2017) \u5927\u91cf\u88ab\u904b\u7528\u65bc\u8a9e\u97f3\u5408\u6210\u7684\u4efb\u52d9\u4e0a\uff0c\u6539\u5584\u4e86\u5982 Tacotron 2 \u56e0\u4f7f\u7528 RNN \u795e \u7d93 \u7db2 \u8def \u9700 \u8981 \u4f9d \u7167 \u9806 \u5e8f \u50b3 \u64ad \u7684 \u5927 \u91cf \u8a08 \u7b97 \uff0c \u5982 Transformer TTS (Li et al., 2019) \u548c Fastspeech 2 (Ren et al., 2020) \uff1b\u4e5f\u6709\u8457\u5404\u7a2e\u6ce8\u610f\u529b\u6a5f\u5236\u7684\u65b9\u6cd5\u88ab\u63d0\u51fa\uff0c\u4ee5\u6539\u5584 Tacotron 2 \u820a\u6709\u6ce8\u610f\u529b\u6a5f\u5236\u8a13\u7df4\u901f\u5ea6\u6162\u6216\u662f\u8f03\u9577\u7684\u53e5\u5b50\u6703\u767c\u751f\u6f0f\u5b57\u6216\u91cd\u8907\u767c\u97f3\u7684\u554f\u984c\uff0c\u5982 Forward Attention (Zhang et al., 2018) \u53ca Dynamic Convolution Attention (Battenberg et al., 2020) ", |
| "cite_spans": [ |
| { |
| "start": 1734, |
| "end": 1753, |
| "text": "(Shen et al., 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1842, |
| "end": 1864, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 1958, |
| "end": 1975, |
| "text": "(Li et al., 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1991, |
| "end": 2009, |
| "text": "(Ren et al., 2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 2096, |
| "end": 2116, |
| "text": "(Zhang et al., 2018)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 2149, |
| "end": 2174, |
| "text": "(Battenberg et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1591, |
| "end": 1628, |
| "text": "Table \u4f86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u5c07\u6bd4\u8f03\u5206\u5225\u4f7f\u7528\u8a9e", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| }, |
| { |
| "text": "\uf0b7 \u7de8\u78bc\u5668: \u5c07\u8f38\u5165\u7684\u6587\u5b57\u7de8\u78bc\u6210\u4e00\u7a2e\u6f5b\u5728\u8868\u793a\uff0c\u901a\u5e38\u70ba\u4e86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u591a\u8a9e\u8005\uff0c\u6703\u5c07\u6587\u5b57 \u6f5b\u5728\u8868\u793a\u8207\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4e32\u63a5\u3002 \uf0b7 \u89e3 \u78bc \u5668 : \u65bc \u8a13 \u7df4 \u671f \u9593 \uff0c \u5c07 \u6587 \u5b57 \u6f5b \u5728 \u8868 \u793a \u8207 \u76ee \u6a19 \u983b \u8b5c \u7684 \u6bcf \u500b \u97f3 \u6846 \u5efa \u7acb \u6ce8 \u610f \u529b \u5c0d \u9f4a (Chorowski et al., 2015)\uff0c\u65bc\u63a8\u8ad6\u671f\u9593\uff0c\u4f9d\u64da\u7576\u524d\u97f3\u6846\u8207\u6587\u5b57\u6f5b\u5728\u8868\u793a\u63a8\u6e2c\u51fa\u4e0b\u4e00\u500b\u97f3\u6846 \u7684\u503c\uff0c\u76f4\u81f3\u6ce8\u610f\u529b\u6a5f\u5236\u5c0d\u9f4a\u5230\u505c\u6b62\u7b26\u865f (\u4f8b\u5982\uff1a\u6587\u5b57\u4e2d\u7684\u53e5\u9ede)\u70ba\u6b62\u3002 \uf0b7 Post-Net: \u63d0\u5347\u6574\u9ad4\u983b\u8b5c\u7684\u54c1\u8cea\u3002 Tacotron 2 \u7684\u6a21\u578b\u67b6\u69cb\u5982\u5716 1 \u6240\u793a\uff1a", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| }, |
| { |
| "text": "\u9996\u5148\uff0c\u6211\u5011\u4f7f\u7528 HiFiGAN \u5f9e\u5716 11 \u7684\u7d05\u7dda\u5206\u6bb5\u8655\uff0c\u6211\u5011\u767c\u73fe Bahdanau Attention \u63d0\u4f9b\u4e86\u6bcf\u6bb5\u8a9e\u97f3\u5927\u6982\u7684\u97f3\u6846\u7bc4\u570d\uff0c \u5716\u4e2d\u865b\u7dda\u5de6\u53f3\u8655\u5206\u5225\u662f\"qi4\"\u8ddf\"hen2\"\u7684\u767c\u97f3\uff0c\u7531\u65bc\u5b83\u5011\u4e3b\u8981\u90fd\u662f\u6c23\u97f3\uff0c\u5c0e\u81f4\u5206\u6bb5\u6c92 \u6709\u5f88\u660e\u986f\uff0c\u800c\u6700\u5de6\u5074\u53ca\u6700\u53f3\u5074\u5c0d\u7a31\u6027\u7684\u689d\u7d0b\u53ef\u4ee5\u5224\u65b7\u70ba\u7a7a\u683c\u8cc7\u8a0a\uff0c\u5373\u8a72\u7247\u6bb5\u662f\u975c\u97f3\u7684\u3002 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u5be6\u9a57\u8a2d\u7f6e (Experimental Setups)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u6211 \u5011 \u9084 \u4f7f \u7528 Resemblyzer \u5206 \u6790 \u5668 \u8a08 \u7b97 \u4e0d \u540c \u6027 \u5225 \u5728 \u8a9e \u97f3 \u8f49 \u63db \u4e0a \u7684 \u8a9e \u8005 \u7a7a \u9593 \uff0c Resemblyzer \u662f\u4e00\u500b\u900f\u904e\u795e\u7d93\u7db2\u8def\u4f86\u6bd4\u8f03\u6216\u5206\u6790\u8a9e\u97f3\u7684 Python \u5957\u4ef6\u3002\u7814\u7a76\u4e2d\uff0c \u7537\u6027\u8207\u5973 \u6027\u6bcf\u4f4d\u8a9e\u8005\u7686\u5408\u6210 10 \u53e5 Post-Filter \u548c Post-Net \u500b\u97f3\u6a94\u8207\u539f\u8a9e\u8005\u6bd4\u8f03\uff0c\u5176\u7d50\u679c\u5982\u5716 8 \u548c\u5716 9\uff0c\u6211\u5011\u53ef\u4ee5\u5f9e\u9019\u5169\u5f35\u5716\u4e2d\u767c\u73fe\uff0c\u5728\u5167\u90e8\u8a9e\u8005\u4e2d\uff0c\u5408\u6210\u7684\u5973\u6027\u97f3\u6a94\u90fd\u5f88\u63a5\u8fd1\u539f\u97f3\u6a94\uff0c\u5728\u7537 \u6027\u97f3\u6a94\u4e2d\u5247\u53ef\u4ee5\u767c\u73fe Diffwave \u8f03 Post-Net \u63a5\u8fd1\u539f\u59cb\u97f3\u6a94\uff0c\u4e0d\u7ba1\u662f\u8a9e\u97f3\u5408\u6210\u6216\u8a9e\u8005\u8fa8\u8b58\u6548 \u679c\u7686\u76f8\u4f3c\uff1b\u5728\u5916\u90e8\u8a9e\u8005\u4e2d\uff0c\u53ef\u4ee5\u767c\u73fe\u8a9e\u97f3\u5408\u6210\u7684\u8a9e\u8005\u7a7a\u9593\u8f03\u70ba\u96c6\u4e2d\uff0c\u800c\u8a9e\u8005\u8fa8\u8b58\u7684\u8a9e\u8005 \u7a7a\u9593\u8f03\u70ba\u767c\u6563\uff0c\u4e26\u4e14 Diffwave \u6bd4 Post-Net \u7a0d\u5fae\u63a5\u8fd1\u539f\u59cb\u97f3\u6a94\u3002\u6211\u5011\u53ef\u4ee5\u65b7\u5b9a\u8a9e\u97f3\u5408\u6210\u4efb \u52d9\u4ee5\u53ca\u91dd\u5c0d Post-Net \u6240\u63d0\u51fa\u7684 Diffwave \u67b6\u69cb\u5c0d\u65bc\u6211\u5011\u7684\u591a\u8a9e\u8005 TTS \u7cfb\u7d71\u4f86\u8aaa\u662f\u66f4\u6709\u5e6b \u52a9\u7684\u3002", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u5be6\u9a57\u8a2d\u7f6e (Experimental Setups)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u6539\u9032\u4e86\u591a\u8a9e\u8005 Tacotron 2 \u7684\u67b6\u69cb\uff0c\u900f\u904e\u52a0\u5165\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4fbf\u53ef\u5408\u6210 \u672a\u77e5\u8a9e\u8005\u7684\u8a9e\u97f3\uff0c\u4e26\u4e14\u6bd4\u8f03\u8a9e\u97f3\u8f49\u63db\u8207\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u500b\u4e0d\u540c\u4efb\u52d9\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u7528\u65bc TTS \u7684\u6210\u6548\uff0c\u7531\u5be6\u9a57\u7d50\u679c\u5f97\u77e5\u8a9e\u97f3\u8f49\u63db\u7684\u6548\u679c\u662f\u512a\u65bc\u8a9e\u8005\u8fa8\u8b58\u7684\uff0c\u4f7f\u7528 Post-Filter \u4f86\u63d0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7d50\u8ad6 (Conclusion)", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Machine reading comprehension (MRC) is a challenge for AI research, and is frequently adopted to seek desired information from knowledge sources such as company document collections, Wikipedia or the Web for a given question. To evaluate the capability of a MRC system, different test forms have been adopted in the literature (Qiu et al., 2019; such as binary choice, multiple choice (MC), multiple selection (MS), and cloze. Which test form to adopt usually depends on the format of the given benchmark/dataset. In this paper, (3) \u9694\u4ee3\u6559\u990a\u5bb6\u5ead (4) \u5bc4\u990a\u5bb6\u5ead", |
| "cite_spans": [ |
| { |
| "start": 327, |
| "end": 345, |
| "text": "(Qiu et al., 2019;", |
| "ref_id": "BIBREF65" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "(1) \u4e09\u4ee3\u540c\u5802\u5bb6\u5ead we solve MC questions about traditional Chinese primary school social studies. In this Chinese Social Studies MC (CSSMC) QA task, the system selects the correct answer from several candidate options based on a given question and its associated lesson manually constructed by Taiwan book publishers. Table 1 shows an example of CSSMC, where the passage is the corresponding supporting evidence (SE).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 310, |
| "end": 317, |
| "text": "Table 1", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "Previous work on answering MC questions can be divided into statistics-based approaches (Kouylekov & Magnini, 2005; Heilman & Smith, 2010) and neural-network-based approaches (Parikh et al., 2016; Chen et al., 2017) . Recent pre-trained language models such as BERT (Devlin et al., 2019) , XLNET (Yang et al., 2019) , RoBERTa , and ALBERT (Lan et al., 2019) show excellent performance on different RC MC tasks. As BERT shows excellent performance on various English datasets (e.g., SQuAD 1.1 (Rajpurkar et al., 2016) , GLUE , etc.), it is adopted as our baseline. Table 6 shows its performance given the gold SE.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 115, |
| "text": "(Kouylekov & Magnini, 2005;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 116, |
| "end": 138, |
| "text": "Heilman & Smith, 2010)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 175, |
| "end": 196, |
| "text": "(Parikh et al., 2016;", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 197, |
| "end": 215, |
| "text": "Chen et al., 2017)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 266, |
| "end": 287, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 296, |
| "end": 315, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF71" |
| }, |
| { |
| "start": 339, |
| "end": 357, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 492, |
| "end": 516, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF66" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 564, |
| "end": 571, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "After analyzing error cases, we observed that BERT handles the following question types poorly: (1) Negation questions, that is, questions with negation phrases such as \u4e0d\u53ef\u80fd (unlikely). For this type of question, BERT selects the same answer for \"\u5c0f\u654f\u7684\u5abd\u5abd\u76ee\u524d\u5728 \u90f5\u5c40\u670d\u52d9\uff0c\u8acb\u554f\u5c0f\u654f\u7684\u5abd\u5abd\u53ef\u80fd\u6703\u70ba\u5c45\u6c11\u63d0\u4f9b\u4ec0\u9ebc\u670d\u52d9\uff1f (Xiaomin's mother serves at the post office. What kind of services could Xiaomin's mother provide to the residents?)\" and \"\u5c0f \u654f\u7684\u5abd\u5abd\u76ee\u524d\u5728\u90f5\u5c40\u670d\u52d9\uff0c\u8acb\u554f\u5c0f\u654f\u7684\u5abd\u5abd\u4e0d\u53ef\u80fd\u6703\u70ba\u5c45\u6c11\u63d0\u4f9b\u4ec0\u9ebc\u670d\u52d9\uff1f (Xiaomin's mother serves at the post office. What kind of service could not Xiaomin's mother provide to the residents?)\" (which differ only in the negation word \u4e0d (not)). BERT evidently pays no special attention to negative words; however, any one of them would change the desired answer;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) All-of-the-above (\u4ee5\u4e0a\u7686\u662f) and none-of-the-above (\u4ee5\u4e0a\u7686\u975e) questions, choices for which include either All of the above or None of the above. In both cases, the answer cannot be handled by simply by selecting the most likely choice without preprocessing (1)\u8001\u4eba (2)\u5c0f\u5b69 (3)\u9752\u58ef\u5e74 (4)\u4ee5\u4e0a\u7686\u975e the given choices. Table 2 shows an example of these question types.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 298, |
| "end": 305, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "The above phenomenon was also observed by Wu & Su (2020) , who reported that BERT achieves superior results mainly by utilizing surface features, and that its performance degrades significantly when the dataset involves negation words. Moreover, it is difficult for BERT to learn the semantic meaning of all-of-the-above and none-of-the-above questions, which suggests that the listed candidate options are all correct or all incorrect, with a small amount of data.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 56, |
| "text": "Wu & Su (2020)", |
| "ref_id": "BIBREF69" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "However, it is difficult to pinpoint the sources of the problem and then find corresponding remedies within BERT, due to its complicated architecture (even its basic version includes 12 heads and 12 stacked layers). We thus prefer to keep its implementation untouched if the problem can be fixed by coupling BERT with external modules. Accordingly, we here propose a framework that cascades BERT with a preprocessor module and an answer-picker/selector module. The preprocessor module revises the choices for all-of-the-above and none-of-the-above questions, and the answer-picker/selector module (a postprocessor) determines the appropriate choices under the cases mentioned above. The above approach is inspired by Lin & Su (2021) , who demonstrate that BERT learns natural language inference inefficiently, even for simple binary prediction; however, they also point out that task-related features and domain knowledge significantly help to improve BERT's learning efficiency.", |
| "cite_spans": [ |
| { |
| "start": 717, |
| "end": 732, |
| "text": "Lin & Su (2021)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "For negation-type questions, instead of picking the highest-scoring choice as usual, the answer-picker/selector module selects the candidate with the lowest score. On the other hand, for all-of-the-above or none-of-the-above questions, we use a decision tree to select the answer, as illustrated in Figure 2 . In these cases, the preprocessor module first replaces the original \"all of the above\" or \"none of the above\" choices with a new choice generated by concatenating all other choices together (before those candidates are sent to BERT). Take for example the second last row in Table 2 : we replace \"\u4ee5\u4e0a\u7686\u662f (all of the above)\", the original last choice, with \"\u5236\u5b9a\u8001\u4eba\u798f\u5229\u653f\u7b56^\u63d0\u4f9b\u826f\u597d\u7684\u5b89\u990a\u7167\u9867^\u5efa\u7acb\u5065\u5168\u7684\u91ab\u7642\u9ad4\u7cfb (Make welfare policies for elderly people^ Provide good nursing care^ Establish a sound medical system)\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 299, |
| "end": 307, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 584, |
| "end": 591, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "We evaluate the proposed framework on a CSSMC dataset. The experimental results show the proposed approaches outperform the pure BERT model. This thus constitutes a new way to supplement BERT with additional modules. We believe the same strategy could be applied to other DNN models, which -despite good overall performance -are too complicated to customize for specific problems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "In summary, in this paper we make the following contributions: (1) We propose several novel approaches to supplement BERT to solve negation, all-of-the-above, and none-of-the-above questions. (2) Experimental results show that the proposed approach effectively improves performance, and thus demonstrate the feasibility of supplementing BERT with additional modules to fix given problems. 3We construct and release a new Traditional Chinese Machine Reading Question and Answering dataset to assess the performance of RC MC models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "In comparison with our previous conference version (Lee et al., 2020) , this article describes additional \"Separately Judge then Select\" and \"Separately Judge Concatenation then Select\" experiments, which adopt a BERT entailment prediction model to handle each candidate option separately (details are provided in Sections 2.2.1 and 2.2.2) instead of jointly processing all candidate options together. We have also added Section 3 to describe the construction of the CSSMC dataset, which we adopt to compare different approaches.", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 69, |
| "text": "(Lee et al., 2020)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer", |
| "sec_num": null |
| }, |
| { |
| "text": "Given a social studies problem Q and its corresponding supporting evidence SE, our goal is to find the most likely answer from the given candidate set A = {A 1 , A 2 , \u2026 A n }, where n is the total number of available choices or candidates, and A i denotes the i-th answer candidate. This task is formulated as follows, where \u00c2 is the answer to be chosen. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Three different approaches are proposed in which we use entailment prediction (Dagan et al., 2005) to determine whether the candidate option is the correct answer to the question:", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 98, |
| "text": "(Dagan et al., 2005)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Models", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(1) Separately judge then select (SJS), which considers each individual candidate option separately and then selects the final answer based on their output scores;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Models", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(2) Separately judge with concatenation then select (SJCS), which adopts the framework of the first approach but first replaces the all-of-the-above (\u4ee5\u4e0a\u7686\u662f) and none-of-the-above (\u4ee5\u4e0a\u7686\u975e) answer choices with the concatenation of all the other remaining candidate options before entailment judgment;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Models", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(3) Jointly judge then select (JJS), which jointly considers all candidate options to make the final decision. Details are provided below. Figure 1 shows the architecture of the proposed SJS approach, which consists of two main components: (1) the YN-BERT module, a fine-tuned BERT entailment prediction model (where YN denotes its output is a yes-no binary entailment judgment), and (2) the answer-picker module, which determines the final answer given the entailment judgment scores from four different YN-BERT modules. The input sequence is the concatenation of the associated supporting evidence, a given question, and a specific individual answer candidate/option. For each answer candidate, YN-BERT outputs an entailment judgment score used to select either Entail or Not-entail (i.e., the judgment is Entail if the score exceeds 0.5, and Not-entail otherwise). Entail implies that the given answer candidate is entailed by the combination of the question and its associated supporting evidence. The answer-picker module considers the entailment judgment scores of the various choices and selects the most appropriate one based on the decision tree shown in Figure 2 . Note that this decision tree is used only by the answer picker to make the final decision and is not involved in BERT's fine-tuning process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 139, |
| "end": 147, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1164, |
| "end": 1172, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Proposed Models", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "A given question is classified as negative-type if it includes a negation word within a pre-specified negation word list, which is obtained from the CSSMC training data, and currently consists of {\"\u4e0d\u6703 (will not)\", \"\u4e0d\u80fd (cannot)\", \"\u4e0d\u5f97 (not allow)\", \"\u4e0d\u662f (is not)\", \"\u4e0d\u61c9\u8a72 (should not)\", \"\u4e0d\u53ef\u80fd (unlikely)\", \"\u4e0d\u9700 (do not need)\", \"\u4e0d\u5fc5 (do not need)\", \"\u4e0d\u7528 (do not need)\", \"\u6c92\u6709 (without)\"}. Since the proposed approaches aim to supplement BERT, these negation words are manually picked from the error cases in the training data-set, on which BERT model make mistakes. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Separately Judge then Select (SJS)", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Another approach adopts the framework of the first approach but first recasts \"\u4ee5\u4e0a\u7686\u662f (all of the above)\" and \"\u4ee5\u4e0a\u7686\u975e (none of the above)' answer candidates as the concatenation of all of the other options. Take for example the last row in Table 2 : we replace \"\u4ee5\u4e0a\u7686\u975e\", the original last choice, with \"\u8001\u4eba^\u5c0f\u5b69^\u9752\u58ef\u5e74 (elderly people^children^young people)\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 236, |
| "end": 243, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Separately Judge with Concatenation then Select (SJCS)", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Afterwards, the answer-picker module selects the most appropriate choice based on the following rule: For negation questions, we select the answer candidate with the lowest entailment score; otherwise, we select that with the highest entailment score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Separately Judge with Concatenation then Select (SJCS)", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Shown in Figure 4 , the system architecture of the JJS approach consists of three main components: (1) the preprocessor, which recasts \"\u4ee5\u4e0a\u7686\u662f (all of the above)\" and \"\u4ee5\u4e0a\u7686 \u975e (none of the above)\" answer candidates as the concatenation of the other options (associated with the same question), as shown above, before inputting the question-choice-evidence combination into the BERT model; (2) the BERT-MC model, a typical fine-tuned BERT multiple-choice prediction model (Xu et al., 2019) described in Section 4.1; and (3) the answer selector, a candidate re-selector which for negation-type questions picks that answer candidate with the lowest score as opposed to that with the highest score (as for other question types). ", |
| "cite_spans": [ |
| { |
| "start": 467, |
| "end": 484, |
| "text": "(Xu et al., 2019)", |
| "ref_id": "BIBREF70" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Jointly Judge then Select (JJS)", |
| "sec_num": "2.2.3" |
| }, |
| { |
| "text": "To evaluate the proposed approaches, we constructed a Chinese Social Studies Machine Reading and Question Answering (CSSMRQA) dataset, which is a superset of the CSSMC dataset mentioned above, to assess the capability of different Q&A systems (not just MC questions). This dataset consists of three question types: (1) yes/no questions, which ask whether the given question is a correct statement judged from the supporting evidence; (2) multiple-choice (MC) questions, which include four answer choices from which the correct one is to be chosen (here, this is the CSSMC dataset adopted in this paper); and (3) multiple-selection (MS) questions, which are similar to the multiple-choice questions but can contain more than one correct answer. Below we describe how they are constructed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Social Studies MRQA Dataset Construction", |
| "sec_num": "3." |
| }, |
| { |
| "text": "We first collected lessons for grades 3 to 6 from elementary-school social studies textbooks published in Taiwan. For each lesson, we collected relevant questions from leading publishing houses in Taiwan. We thus obtained 14,103 yes/no questions, 5347 MC questions, and 340 MS questions from a total of 255 lessons. We then annotated the supporting evidence to indicate what information is needed to answer each question. This is described in detail below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We hired two annotators to annotate the supporting evidence for each question. Supporting evidence is the content in the lesson (associated with the given question) which contains just the information necessary to answer the question. In the CSSMRQA dataset, each lesson comprises several paragraphs, and each paragraph comprises several sentences. Supporting evidence consists of one or more sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Supporting Evidence (SE) Annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We used Doccano (Nakayama et al., 2018) , an open-source text annotation tool, as the platform for annotation. Doccano allows the user to highlight supporting words in the text (i.e., those words that provide hints to find the related passage). Given a question and its corresponding answer (also the lesson associated with the question), the annotators highlighted supporting words necessary to answer the question. Usually, these supporting words were words within the given question. Annotators were not allowed to annotate supporting words across sentence splitters or delimiters. Nonetheless, some questions lack suitable supporting evidence in the lesson. For example, students may rely on common sense (instead of textbook context) to answer the question, \"\u73ed\u4e0a\u540c\u5b78\u6709\u4eba\u4e82\u4e1f\u5783\u573e\uff0c\u8eab\u70ba\u885b\u751f \u80a1\u9577\u7684\u5c0f\u7389\u53ef\u4ee5\u600e\u9ebc\u505a\uff1f (1) \u9ed8\u9ed8\u7684\u8ddf\u5728\u4ed6\u5011\u5f8c\u9762\u64bf\u5783\u573e (2) \u52f8\u544a\u4e82\u4e1f\u5783\u573e\u7684\u540c\u5b78\uff0c\u4e26 \u8acb\u4ed6\u5011\u5c07\u5783\u573e\u64bf\u8d77\u4f86 (3) \u6c92\u95dc\u4fc2\uff0c\u7b49\u6253\u6383\u6642\u9593\u518d\u6383\u5c31\u597d\u4e86 (4) \u628a\u5783\u573e\u85cf\u5728\u770b\u4e0d\u898b\u7684\u5730\u65b9 (What can Xiaoyu (the Chief of Health) do when her classmate litters? (1) Pick up trash after them silently; (2) Advise the classmate who litters and ask him/her to pick up the litter; (3) It doesn't matter, just wait until the cleaning time; or (4) Hide litter out of sight)\". In such cases, annotators found no suitable supporting words in the lesson and thus skipped SE annotation. Afterward, sentences that contain marked supporting words were annotated as supporting evidence. Table 3 shows the final results of SE annotation. Figure 5 shows an example of multiple-choice question annotation. Annotators first read both the question (qtext) and the correct answer (answer) from the right-hand side windows, and then highlight supporting words (marked with purple boxes) in the lesson. To prevent annotators from highlighting supporting word regions across sentences, we use special symbols as separators (||| for paragraphs and || for sentences).", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 39, |
| "text": "(Nakayama et al., 2018)", |
| "ref_id": "BIBREF62" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1352, |
| "end": 1359, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1402, |
| "end": 1410, |
| "text": "Figure 5", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Supporting Evidence (SE) Annotation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We conducted experiments on the above CSSMC dataset with the three proposed approaches. Table 4 shows the dataset statistics. For comparison, we used a typical BERT multiple-choice implementation (Xu et al., 2019) as our baseline.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 213, |
| "text": "(Xu et al., 2019)", |
| "ref_id": "BIBREF70" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 88, |
| "end": 95, |
| "text": "Table 4", |
| "ref_id": "TABREF18" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4." |
| }, |
| { |
| "text": "For the baseline, we used the BERT-MC model, that is, BERT (Devlin et al., 2019) fine-tuned for the multiple-choice task as our baseline, as it is the most widely adopted state-of-the-art model (Xu et al., 2019) . It was built by exporting BERT's final hidden layer into a linear layer and then taking a softmax operation. For details on the BERT-MC model, please see Xu et al. (2019) . The BERT input sequence consists of \"[CLS] SE [SEP] Question [SEP] Option-#i [SEP]\", where Option-#i denotes the i-th option and [CLS] and [SEP] are special tokens representing the classification and the passage separators, respectively, as defined in Devlin et al. (2019) . Figure 6 shows the architecture of the BERT baseline model. ", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 80, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 194, |
| "end": 211, |
| "text": "(Xu et al., 2019)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 368, |
| "end": 384, |
| "text": "Xu et al. (2019)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 639, |
| "end": 659, |
| "text": "Devlin et al. (2019)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 662, |
| "end": 670, |
| "text": "Figure 6", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baseline: BERT-MC", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "SE is the corresponding shortest passage based on which the system can answer the given question. Given the annotation results described in Section 3.2, we find many questions that involve common-sense reasoning, for which no corresponding SEs can be found in the retrieved lesson. We denote as SE1 that set of questions for which SEs can be found in the retrieved lesson (this is termed GSE1 if it is also associated with gold SEs); the set of remaining questions is SE2. Table 5 shows the statistics for GSE1. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 473, |
| "end": 480, |
| "text": "Table 5", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Retrieved Supporting Evidence (SE) Dataset", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We conducted two sets of experiments on the CSSMC dataset: (i) GSE1, based on SE1 with gold SEs, to compare the QA component performance of different models; and (ii) LSE, based on the whole dataset with all SEs directly retrieved from the Lucene search engine, to compare different approaches under a real-world situation. Each set covers six different models: (1) BERT-MC Only, (2) SJS, (3) SJCS, (4) BERT-MC+Neg, (5) BERT-MC+AllAbv&NonAbv, and (6) BERT-MC+Neg+AllAbv&NonAbv, where BERT-MC Only is the baseline model and Neg and AllAbv&NonAbv denote additional answer-selector and preprocessor modules for the negation and all-of-the-above/none-of-the-above question-types, respectively. We adopted the setting specified in Xu et al. (2019) for BERT training. All other models were trained using the following hyperparameters: (1) a maximum sequence length of 300; (2) a learning rate of 5e-5 with the AdamW optimizer (Loshchilov & Hutter, 2019) ; (3) 3 to 5 epochs. Table 6 compares the accuracy of various approaches; we report test set performance using the settings that corresponded to the best dev set performance.", |
| "cite_spans": [ |
| { |
| "start": 726, |
| "end": 742, |
| "text": "Xu et al. (2019)", |
| "ref_id": "BIBREF70" |
| }, |
| { |
| "start": 920, |
| "end": 947, |
| "text": "(Loshchilov & Hutter, 2019)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 969, |
| "end": 976, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this scenario we sought to evaluate the QA component performance of six different models on the GSE1 subset (i.e., with gold SEs). The GSE1 column in Table 6 gives the test set accuracy rates of various approaches. As the SJS model has special handling for negation and \"\u4ee5\u4e0a\u7686\u662f (all-of-the-above)\" or \"\u4ee5\u4e0a\u7686\u975e (none-of-the-above)\" questions, it yields better performance than BERT-MC Only (0.862 vs. 0.849). The SJCS model further replaces the \"\u4ee5 \u4e0a \u7686 \u662f (all-of-the-above) \" and \" \u4ee5 \u4e0a \u7686 \u975e (none-of-the-above) \" options with the concatenation of the three other options. However, this degrades the baseline performance significantly, from 0.849 to 0.822. This is because the \"\u4ee5\u4e0a\u7686\u662f (all-of-the-above)\" and Table 7 . Error case of \"BERT-MC+Neg\" on \"GSE1-Neg\" subset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 153, |
| "end": 160, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 701, |
| "end": 708, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Jointly Judge then Select (JJS)", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "Question: \u6e05\u671d\u7d71\u6cbb\u81fa\u7063\u6642\u671f\uff0c\u600e\u6a23\u7684\u4eba\u61c9\u8a72\u6bd4\u8f03\u6c92\u6709\u5171\u540c\u7684\u8840\u7de3\uff1f Options: (1)\u53c3\u52a0\u540c\u4e00\u500b\u5b97\u89aa\u6703 (2)\u53c3\u52a0\u540c\u4e00\u500b\u796d\u7940\u516c\u696d (3)\u53c3\u52a0\u540c\u4e00\u500b\u300c\u90ca\u300d (4) \u5728\u540c\u4e00\u5ea7\u5b97\u7960\u796d\u7940\u7956\u5148 Table 8 . Error case of \"BERT-MC+AllAbv&NonAbv\" on \"GSE1-AllAbv&NonAbv\" subset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 104, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SEs: \u53e6\u5916\uff0c\u96a8\u8457\u5546\u696d\u8208\u76db\uff0c\u5728\u5e9c\u57ce\u3001\u9e7f\u6e2f\u3001\u824b\u823a\u7b49\u5927\u57ce\u5e02\uff0c\u4e5f\u51fa\u73fe\u7531\u5546\u4eba\u7d44\u6210\u7684\u300c\u90ca\u300d\u3002 \u300c\u90ca\u300d\u985e\u4f3c\u73fe\u4ee3\u540c\u696d\u516c\u6703\uff0c\u6210\u54e1\u9664\u4e86\u7d93\u71df\u8cbf\u6613\u5916\uff0c\u4e5f\u7a4d\u6975\u53c3\u8207\u5730\u65b9\u7684\u516c\u5171\u4e8b\u52d9\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "Question: \"\u5fd7\u5fe0\u5bb6\u9644\u8fd1\u6709\u4e00\u9593\u5de5\u5ee0\uff0c\u6642\u5e38\u5c07\u672a\u7d93\u8655\u7406\u7684\u6c59\u6c34\u6392\u5165\u6cb3\u5ddd\u4e2d\uff0c\u9019\u6a23\u53ef\u80fd\u6703\u9020 \u6210\u4ec0\u9ebc\u5f8c\u679c\uff1f\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SE:\u5de5\u696d\u751f\u7522\u5982\u679c\u6c92\u6709\u9069\u7576\u8655\u7406\uff0c\u5f88\u5bb9\u6613\u7834\u58de\u5468\u906d\u74b0\u5883\uff0c\u9020\u6210\u7a7a\u6c23\u6c59\u67d3\u3001\u566a\u97f3\u6c59\u67d3\u3001\u6c34 \u8cea\u6c59\u67d3\u3001\u571f\u5730\u6c59\u67d3\u7b49\u3002\u4f8b\u5982\uff1a\u5de5\u696d\u5ee2\u6c34\u6216\u662f\u5bb6\u5ead\u6c59\u6c34\u76f4\u63a5\u6392\u5165\u6cb3\u6d41\uff0c\u4e0d\u50c5\u5371\u5bb3\u6cb3 \u6d41\u751f\u614b\uff0c\u6709\u6bd2\u7269\u8cea\u5982\u679c\u6d41\u5165\u5927\u6d77\uff0c\u901a\u904e\u98df\u7269\u93c8\u9032\u5165\u4eba\u9ad4\uff0c\u66f4\u6703\u56b4\u91cd\u640d\u5bb3\u5065\u5eb7\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "Options: (1)\u7a7a\u6c23\u6c59\u67d3 (2)\u566a\u97f3 (3)\u6c34\u8cea\u6c59\u67d3 (4)\u4ee5\u4e0a\u7686\u662f that the preprocessor (GSE1-Neg column) and answer-selector (GSE1-AllAbv&NonAbv column) modules effectively enhance BERT-MC on these two subsets (from 20% to 40%, and from 64.3% to 83.9%, respectively). The above experiments sufficiently demonstrate the effectiveness of our proposed approaches (unnecessary combinations are marked \"NA\" in Table 6 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 379, |
| "end": 386, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SE:\u5de5\u696d\u751f\u7522\u5982\u679c\u6c92\u6709\u9069\u7576\u8655\u7406\uff0c\u5f88\u5bb9\u6613\u7834\u58de\u5468\u906d\u74b0\u5883\uff0c\u9020\u6210\u7a7a\u6c23\u6c59\u67d3\u3001\u566a\u97f3\u6c59\u67d3\u3001\u6c34 \u8cea\u6c59\u67d3\u3001\u571f\u5730\u6c59\u67d3\u7b49\u3002\u4f8b\u5982\uff1a\u5de5\u696d\u5ee2\u6c34\u6216\u662f\u5bb6\u5ead\u6c59\u6c34\u76f4\u63a5\u6392\u5165\u6cb3\u6d41\uff0c\u4e0d\u50c5\u5371\u5bb3\u6cb3 \u6d41\u751f\u614b\uff0c\u6709\u6bd2\u7269\u8cea\u5982\u679c\u6d41\u5165\u5927\u6d77\uff0c\u901a\u904e\u98df\u7269\u93c8\u9032\u5165\u4eba\u9ad4\uff0c\u66f4\u6703\u56b4\u91cd\u640d\u5bb3\u5065\u5eb7\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "The remaining errors in the GSE1-Neg and GSE1-AllAbv&NonAbv subsets are mainly due to that answering those questions requires further inference capability. Table 7 shows that we need to know that \"\u5546\u4eba (businessmen)\" are people without \"\u5171\u540c\u7684\u8840\u7de3 (blood relations)\". Similarly, Table 8 shows that we need to know that \"\u672a\u7d93\u8655\u7406\u7684\u6c59\u6c34\u6392\u5165\u6cb3\u5ddd (untreated sewage discharged into the river)\" causes \"\u6c34\u8cea\u6c59\u67d3 (water pollution)\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 156, |
| "end": 163, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 272, |
| "end": 279, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SE:\u5de5\u696d\u751f\u7522\u5982\u679c\u6c92\u6709\u9069\u7576\u8655\u7406\uff0c\u5f88\u5bb9\u6613\u7834\u58de\u5468\u906d\u74b0\u5883\uff0c\u9020\u6210\u7a7a\u6c23\u6c59\u67d3\u3001\u566a\u97f3\u6c59\u67d3\u3001\u6c34 \u8cea\u6c59\u67d3\u3001\u571f\u5730\u6c59\u67d3\u7b49\u3002\u4f8b\u5982\uff1a\u5de5\u696d\u5ee2\u6c34\u6216\u662f\u5bb6\u5ead\u6c59\u6c34\u76f4\u63a5\u6392\u5165\u6cb3\u6d41\uff0c\u4e0d\u50c5\u5371\u5bb3\u6cb3 \u6d41\u751f\u614b\uff0c\u6709\u6bd2\u7269\u8cea\u5982\u679c\u6d41\u5165\u5927\u6d77\uff0c\u901a\u904e\u98df\u7269\u93c8\u9032\u5165\u4eba\u9ad4\uff0c\u66f4\u6703\u56b4\u91cd\u640d\u5bb3\u5065\u5eb7\u3002", |
| "sec_num": null |
| }, |
| { |
| "text": "Since the gold SE is not available for real-world applications, this scenario compares the system performance of different models in a real-world situation. That is, we evaluated various models with all the SEs retrieved from a search engine (i.e., Apache Lucene (https://lucene.apache.org/)). Furthermore, to support those questions for which no associated SEs from the lessons (i.e., the SE2 subset), we used Wikipedia as an external knowledge resource to provide SEs when possible. We first used Lucene to search the Taiwan elementary-school social studies textbook and Wikipedia separately to yield two different SEs, after which we constructed a fused SE by concatenating these two SEs with the format \"Textbook-SE [SEP] Wiki-SE\" where Textbook-SE and Wiki-SE denote the two SEs retrieved from the textbook and Wikipedia, respectively. Question: \"\u5c0f\u82b1\u5728\u8d85\u5e02\u8cb7\u5230\u904e\u671f\u7684\u9905\u4e7e\uff0c\u8acb\u554f\u8a72\u8d85\u5e02\u7684\u8ca9\u552e\u884c\u70ba\u9055 \u53cd\u4ec0\u9ebc\u6cd5\u5f8b\uff1f\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "LSE (SE1+SE2 with all SEs retrieved from Lucene)", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Options: (1)\u5211\u6cd5 (2)\u61b2\u6cd5 (3)\u6559\u80b2\u57fa\u672c\u6cd5 (4)\u98df\u54c1\u5b89\u5168\u885b\u751f\u7ba1\u7406\u6cd5 Experimental results (the LSE column in Table 6 ) show that both the preprocessor and the answer selector effectively supplement BERT-MC; performance is improved further when they are jointly adopted (3.3% = 72.5% -69.2%). Furthermore, the accuracy of the BERT-MC only model on LSE is significantly lower than that on GSE1 (69.2% vs. 84.9%), which clearly illustrates that extracting good SEs is essential in QA tasks. Last, to show the influence of incorporating Wikipedia, we conducted an experiment in which we used only Lucene to search the textbook. The BERT-MC+Neg+AllAbv&NonAbv model now drops to 70.4% (not shown in Table 6 ) from 72.5%, which shows that Wikipedia provides the required common sense for some cases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 90, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 667, |
| "end": 674, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "LSE (SE1+SE2 with all SEs retrieved from Lucene)", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "We randomly selected 40 error cases from the test set of the BERT-MC+Neg+AllAbv&NonAbv model under the \"all SEs retrieved from Lucene\" scenario. We found that all errors come from two sources: (1) the correct support evidence was not retrieved (52%), and (2) the answer requires deep inference (48%). Table 9 shows an example for each category. For the first example, the retrieved SE is irrelevant to the question; our model thus fails to produce the correct answer. The second example illustrates that the model requires further inference capability to know that both \"\u725b\u5976\u7684\u4fdd\u5b58\u671f\u9650\u904e\u4e86\u6c92 (Has the milk expired?)\" and \"\u5728\u8d85\u5e02\u8cb7\u5230\u904e\u671f\u7684\u9905\u4e7e (I bought expired cookies in the supermarket)\" are similar events related to \"\u98df\u54c1\u5b89\u5168\u885b\u751f\u7ba1\u7406\u6cd5 (Act Governing Food Safety and Sanitation)\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 301, |
| "end": 308, |
| "text": "Table 9", |
| "ref_id": "TABREF20" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis and Discussion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Before 2015, most work on entailment judgment adopted statistical approaches (Kouylekov & Magnini, 2005; Heilman & Smith, 2010) . In subsequent work, neural network models were widely adopted due to the availability of large datasets such as RACE (Lai et al., 2017) and SNLI (Bowman et al., 2015) . Parikh et al. (2017) propose the first alignment-and-attention mechanism, achieving state-of-the-art (SOTA) results on the SNLI dataset. Chen et al. (2017) further propose a sequential inference model based on chain LSTMs which outperforms previous models. In recent work, pre-trained language models such as BERT (Devlin et al., 2019) , XLNET (Yang et al., 2019) , RoBERTa and ALBERT (Lan et al., 2019) yield superior performance on MC RC tasks. However, these results are obtained mainly by utilizing surface features (Jiang & Marneffe, 2019) . Besides, Zhang et al. (2020) propose a dual co-matching network to model relationships among passages, questions, and answer candidates to achieve SOTA results for MC questions. Also, Jin et al. (2020) propose two-stage transfer learning for coarse-tuning on out-of-domain datasets and fine-tuning on larger in-domain datasets to further improve performance. In comparison with those previous approaches, instead of adopting a new inference NN, our proposed approaches supplement the original BERT with additional modules to address two specific problems that BERT handles poorly.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 104, |
| "text": "(Kouylekov & Magnini, 2005;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 105, |
| "end": 127, |
| "text": "Heilman & Smith, 2010)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 247, |
| "end": 265, |
| "text": "(Lai et al., 2017)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 275, |
| "end": 296, |
| "text": "(Bowman et al., 2015)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 299, |
| "end": 319, |
| "text": "Parikh et al. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 436, |
| "end": 454, |
| "text": "Chen et al. (2017)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 613, |
| "end": 634, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 643, |
| "end": 662, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF71" |
| }, |
| { |
| "start": 684, |
| "end": 702, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 819, |
| "end": 843, |
| "text": "(Jiang & Marneffe, 2019)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 855, |
| "end": 874, |
| "text": "Zhang et al. (2020)", |
| "ref_id": "BIBREF72" |
| }, |
| { |
| "start": 1030, |
| "end": 1047, |
| "text": "Jin et al. (2020)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "We present several novel approaches to supplement BERT with additional modules to address problems with three specific types of questions that BERT-MC handles poorly (i.e., negation, all-of-the-above, and none-of-the-above). The proposed approach constitutes a new way to enhance a complicated DNN model with additional modules to pinpoint problems found in error analysis. Experimental results show the proposed approaches effectively improve performance, and thus demonstrate the feasibility of supplementing BERT with additional modules to fix specific problems. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7." |
| }, |
| { |
| "text": "This index covers all technical items---papers, correspondence, reviews, etc.---that appeared in this periodical during 2021", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Vol. 26", |
| "sec_num": null |
| }, |
| { |
| "text": "The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the coauthors' names, the title of paper or other item, and its location, specified by the publication volume, number, and inclusive pages. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication volume, number, and inclusive pages. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Vol. 26", |
| "sec_num": null |
| }, |
| { |
| "text": "Matlab toolbox for DNN based speech separation .Retrieved from http://web.cse.ohio-state.edu/pnl/DNN_toolbox/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "b GSE1-Neg: Only negation-type questions within GSE1.c GSE1-AllAbv&NonAbv: Only AllAbv&NonAbv-type questions within GSE1.d LSE: <SE1+SE2> with all SEs retrieved from the Lucene search engine.\"\u4ee5\u4e0a\u7686\u975e (none-of-the-above)\" options are closely related to the other three options. However, as it considers the concatenation option and the other three options independently, or separately, without using a complicated decision tree (specified in Figure 3 ), this approach is unable to take such correlation into account.The JJS model (i.e., the last row in Table 6 ) addresses this problem by considering all of the options together simultaneously. Table 6 shows that it considerably outperforms the SJCS model by 5.7% (87.9% -82.2%) on the test set, which shows that jointly processing all options together is essential after the concatenation step. The BERT-MC+Neg and BERT-MC+AllAbv&NonAbv models are also evaluated as an ablation analysis. Table 6 indicates they also outperform the BERT-MC only baseline by 2.1% (87.0% -84.9%) and 3.0% (87.9% -84.9%) on the test set, respectively, which shows the necessity of both the preprocessor and answer-selector modules.Last, to explore the effects of the proposed approaches on specific question types, we conducted two additional experiments on two GSE1 subsets: (1) the Neg-type only subset, which contains only negation questions, to compare the performance between the BERT-MC only and BERT-MC+Neg approaches to evaluate the effectiveness of the answer-selector module; (2) the AllAbv&NonAbv only subset, which contains only AllAbv or NonAbv questions, to compare the BERT-MC only and BERT-MC+AllAbv&NonAbv approaches to evaluate the effectiveness of the proposed preprocessor. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 438, |
| "end": 446, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 549, |
| "end": 556, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 641, |
| "end": 648, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 936, |
| "end": 943, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "Please send application to:The Association for Computational Linguistics and Chinese Language Processing Institute of Information Science, Academia Sinica 1F., No. 34, Ln. 3, Sec. 1, Jiuzhuang St., Nankang Dist., Taipei City, 115022, Taiwan, R.O.C. payment\uff1a Credit cards(please fill in the order form), cheque, or money orders. NT$ = \u5408\u8a08", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 248, |
| "text": "Academia Sinica 1F., No. 34, Ln. 3, Sec. 1, Jiuzhuang St., Nankang Dist., Taipei City, 115022, Taiwan, R.O.C.", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "To Register\uff1a", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Improving the accuracy using pre-trained word embeddings on deep neural networks for Turkish text classification", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Aydo\u011fan", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Karci", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Physica A-statistical Mechanics and Its Applications", |
| "volume": "541", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aydo\u011fan, M., & Karci, A. (2020). Improving the accuracy using pre-trained word embeddings on deep neural networks for Turkish text classification. Physica A-statistical Mechanics and Its Applications, 541, 123288.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Amazon Review Classification and Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bhatt", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Chheda", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Gawande", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Journal of Computer Science and Information Technologies", |
| "volume": "6", |
| "issue": "6", |
| "pages": "5107--5110", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bhatt, A., Patel, A., Chheda, H., & Gawande, K. (2015). Amazon Review Classification and Sentiment Analysis. International Journal of Computer Science and Information Technologies, 6(6), 5107-5110.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Sentiment Analysis of YouTube Video Comments Using Deep Neural Networks", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "A L" |
| ], |
| "last": "Cunha", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "C" |
| ], |
| "last": "Costa", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "A C" |
| ], |
| "last": "Pacheco", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 18th International Conference on Artificial Intelligence and Soft Computing (ICAISC)", |
| "volume": "", |
| "issue": "", |
| "pages": "561--570", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-20912-4_51" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cunha, A.A.L., Costa, M.C., & Pacheco, M.A.C. (2019). Sentiment Analysis of YouTube Video Comments Using Deep Neural Networks. In Proceedings of the 18th International Conference on Artificial Intelligence and Soft Computing (ICAISC), 561-570. https://doi.org/10.1007/978-3-030-20912-4_51", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Comparing BERT against traditional machine learning text classification", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Gonz\u00e1lez-Carvajal", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "C" |
| ], |
| "last": "Garrido-Merch\u00e1n", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.13012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gonz\u00e1lez-Carvajal, S., & Garrido-Merch\u00e1n, E.C. (2020). Comparing BERT against traditional machine learning text classification. arXiv preprint arXiv:2005.13012", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Deep learning approach for sentiment analysis of short texts", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mahmood", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 3rd international conference on control, automation and robotics", |
| "volume": "", |
| "issue": "", |
| "pages": "705--710", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICCAR.2017.7942788" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hassan, A., & Mahmood, A. (2017). Deep learning approach for sentiment analysis of short texts. In Proceedings of the 3rd international conference on control, automation and robotics (ICCAR 2017), 705-710. https://doi.org/10.1109/ICCAR.2017.7942788", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Cross-domain sentiment analysis: An empirical investigation", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Heredia", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Khoshgoftaar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Prusa", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Crawford", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE 17th International Conference on Information Reuse and Integration", |
| "volume": "", |
| "issue": "", |
| "pages": "160--165", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/IRI.2016.28" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heredia, B., Khoshgoftaar, T. M., Prusa, J., & Crawford, M. (2016). Cross-domain sentiment analysis: An empirical investigation. In Proceedings of the IEEE 17th International Conference on Information Reuse and Integration (IRI 2016), 160-165. https://doi.org/10.1109/IRI.2016.28", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [ |
| "L" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1802.05365" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peters, M. E., Neumann, M., Iyyer, M., Gardner, M., Clark, C., Lee, K., & Zettlemoyer. L. (2018). Deep contextualized word representations. arXiv preprint arXiv:1802.05365.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Improving the Accuracy of Pre-trained Word Embeddings for Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rezaeinia", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghodsi", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Rahmani", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.08609" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rezaeinia, S.M., Ghodsi, A., & Rahmani, R. (2017). Improving the Accuracy of Pre-trained Word Embeddings for Sentiment Analysis. arXiv preprint arXiv:1711.08609.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Perceptual evaluation of speech quality (PESQ) -a new method for speech quality assessment of telephone networks and codecs", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Rix", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "G" |
| ], |
| "last": "Beerends", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "P" |
| ], |
| "last": "Hollier", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "P" |
| ], |
| "last": "Hekstra", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of of 26th IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "749--752", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rix, A. W., Beerends, J. G., Hollier, M. P., & Hekstra, A. P. (2001). Perceptual evaluation of speech quality (PESQ) -a new method for speech quality assessment of telephone networks and codecs. In Proceedings of of 26th IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2001), 749-752.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Binary and ratio time-frequency masks for robust speech recognition", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Roman", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Speech Communications", |
| "volume": "48", |
| "issue": "11", |
| "pages": "1486--1501", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.specom.2006.09.003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Srinivasan, S., Roman, N., & Wang, D. (2006). Binary and ratio time-frequency masks for robust speech recognition. Speech Communications, 48(11), 1486-1501. https://doi.org/10.1016/j.specom.2006.09.003", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "An Algorithm for Intelligibility Prediction of Time-Frequency Weighted Noisy Speech", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "H" |
| ], |
| "last": "Taal", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Hendriks", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Heusdens", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jensen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
| "volume": "19", |
| "issue": "7", |
| "pages": "2125--2136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taal, C. H., Hendriks, R. C., Heusdens, R., & Jensen, J. (2011). An Algorithm for Intelligibility Prediction of Time-Frequency Weighted Noisy Speech. IEEE Transactions on Audio, Speech, and Language Processing, 19(7), 2125-2136.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "On ideal binary mask as the computational goal of auditory scene analysis", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Speech Separation by Humans and Machines", |
| "volume": "", |
| "issue": "", |
| "pages": "181--197", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/0-387-22794-6_12" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, D. (2005). On ideal binary mask as the computational goal of auditory scene analysis. In: Divenyi P. (eds) Speech Separation by Humans and Machines, (pp. 181-197). Springer. https://doi.org/10.1007/0-387-22794-6_12", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Suppression by selecting wavelets for feature compression in distributed speech recognition", |
| "authors": [ |
| { |
| "first": "S.-S", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Tsao", |
| "suffix": "" |
| }, |
| { |
| "first": "J.-W", |
| "middle": [], |
| "last": "Hung", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IEEE/ACM Trans. on Audio, Speech, and Language Processing", |
| "volume": "26", |
| "issue": "3", |
| "pages": "564--579", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, S.-S., Lin, P., Tsao, Y., Hung, J.-W., & Su, B. (2018). Suppression by selecting wavelets for feature compression in distributed speech recognition. IEEE/ACM Trans. on Audio, Speech, and Language Processing, 26(3), 564-579.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "On training targets for supervised speech separation", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech, and Language Processing", |
| "volume": "22", |
| "issue": "", |
| "pages": "1849--1858", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TASLP.2014.2352935" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, Y., Narayanan, A., & Wang, D. (2014). On training targets for supervised speech separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 22(12), 1849-1858. https://doi.org/10.1109/TASLP.2014.2352935", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Complex ratio masking for monaural speech separation", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "S" |
| ], |
| "last": "Williamson", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE/ACM Transactions on Audio, Speech, and Language Processing", |
| "volume": "24", |
| "issue": "3", |
| "pages": "483--492", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TASLP.2015.2512042" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Williamson, D.S., Wang, Y., & Wang, D. (2016). Complex ratio masking for monaural speech separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 24(3), 483-492. https://doi.org/10.1109/TASLP.2015.2512042", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Net\uff0c\u6700\u5f8c\uff0c\u65bc\u89e3\u78bc\u5c64\u4e2d\u6dfb\u52a0 \u7b2c\u4e8c\u500b\u6ce8\u610f\u529b\u6a5f\u5236\u6709\u52a9\u65bc\u6a21\u578b\u5feb\u901f\u5f15\u767c\u6ce8\u610f\u529b\u5c0d\u9f4a\u3002\u672a\u4f86\u7684\u7814\u7a76\u65b9\u5411\u6703\u52a0\u5165\u66f4\u591a\u8cc7\u8a0a\u4f86 \u5e6b\u52a9\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5728 TTS \u7684\u6548\u80fd\u4e0a\u6539\u9032\uff0c\u4f8b\u5982\uff1a\u97f3\u97fb(Prosody)\u8cc7\u8a0a\u3001\u767c\u97f3(Articulation) \u8cc7\u8a0a\u3002", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "\u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "\u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d \u5347\u5408\u6210\u8a9e\u97f3\u7684\u8a9e\u8005\u76f8\u4f3c\u5ea6\u4ee5\u53ca\u8a9e\u97f3\u54c1\u8cea\u7686\u512a\u65bc\u539f\u59cb\u7684 Post-Net\uff0c\u6700\u5f8c\uff0c\u65bc\u89e3\u78bc\u5c64\u4e2d\u6dfb\u52a0 \u7b2c\u4e8c\u500b\u6ce8\u610f\u529b\u6a5f\u5236\u6709\u52a9\u65bc\u6a21\u578b\u5feb\u901f\u5f15\u767c\u6ce8\u610f\u529b\u5c0d\u9f4a\u3002\u672a\u4f86\u7684\u7814\u7a76\u65b9\u5411\u6703\u52a0\u5165\u66f4\u591a\u8cc7\u8a0a\u4f86 \u5e6b\u52a9\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5728 TTS \u7684\u6548\u80fd\u4e0a\u6539\u9032\uff0c\u4f8b\u5982\uff1a\u97f3\u97fb(Prosody)\u8cc7\u8a0a\u3001\u767c\u97f3(Articulation) \u8cc7\u8a0a\u3002", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Location-relative attention mechanisms for robust long-form speech synthesis", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Battenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Skerry-Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mariooryad", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Stanton", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kao", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Shannon", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Bagby", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6194--6198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Battenberg, E., Skerry-Ryan, R. J., Mariooryad, S., Stanton, D., Kao, D., Shannon, M., & Bagby, T. (2020). Location-relative attention mechanisms for robust long-form speech synthesis. In Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 6194-6198.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Investigating on incorporating pretrained and learnable speaker representations for multi-speaker multistyle text-to-speech", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "M" |
| ], |
| "last": "Chien", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "H" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "8588--8592", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chien, C. M., Lin, J. H., Huang, C. Y., Hsu, P. C., & Lee, H. Y. (2021). Investigating on incorporating pretrained and learnable speaker representations for multi-speaker multi- style text-to-speech. In Proceedings of ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 8588-8592.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Attention-based models for speech recognition", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Serdyuk", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.07503" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chorowski, J., Bahdanau, D., Serdyuk, D., Cho, K., & Bengio, Y. (2015). Attention-based models for speech recognition. arXiv preprint arXiv:1506.07503.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "One-shot voice conversion by separating speaker and content representations with instance normalization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Chou", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "C" |
| ], |
| "last": "Yeh", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.05742" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chou, J. C., Yeh, C. C., & Lee, H. Y. (2019). One-shot voice conversion by separating speaker and content representations with instance normalization. arXiv preprint arXiv:1904.05742.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Zeroshot multi-speaker text-to-speech with state-of-the-art neural speaker embeddings", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "I" |
| ], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yasuda", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Yamagishi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6184--6188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cooper, E., Lai, C. I., Yasuda, Y., Fang, F., Wang, X., Chen, N., & Yamagishi, J. (2020). Zero- shot multi-speaker text-to-speech with state-of-the-art neural speaker embeddings. In Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 6184-6188.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Denoising diffusion probabilistic models", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ho", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Abbeel", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.11239" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ho, J., Jain, A., & Abbeel, P. (2020). Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Transfer learning from speaker verification to multispeaker text-to-speech synthesis", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "L" |
| ], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.04558" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia, Y., Zhang, Y., Weiss, R. J., Wang, Q., Shen, J., Ren, F., Chen, Z., Nguyen, P., Pang, R., Moreno, I. L., & Wu, Y. (2018). Transfer learning from speaker verification to multispeaker text-to-speech synthesis. arXiv preprint arXiv:1806.04558.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Stargan-vc: Non-parallel many-tomany voice conversion using star generative adversarial networks", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kameoka", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kaneko", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Tanaka", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Hojo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE Spoken Language Technology Workshop (SLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "266--273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kameoka, H., Kaneko, T., Tanaka, K., & Hojo, N. (2018). Stargan-vc: Non-parallel many-to- many voice conversion using star generative adversarial networks. In Proceedings of 2018 IEEE Spoken Language Technology Workshop (SLT), 266-273.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Cyclegan-vc: Non-parallel voice conversion using cycleconsistent adversarial networks", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kaneko", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kameoka", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 26th European Signal Processing Conference (EUSIPCO)", |
| "volume": "", |
| "issue": "", |
| "pages": "2100--2104", |
| "other_ids": { |
| "DOI": [ |
| "10.23919/EUSIPCO.2018.8553236" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaneko, T., & Kameoka, H. (2018). Cyclegan-vc: Non-parallel voice conversion using cycle- consistent adversarial networks. In Proceedings of 2018 26th European Signal Processing Conference (EUSIPCO), 2100-2104. https://doi.org/10.23919/EUSIPCO.2018.8553236", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.05646.\u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea665" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kong, J., Kim, J., & Bae, J. (2020). Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. arXiv preprint arXiv:2010.05646. \u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6 65", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Diffwave: A versatile diffusion model for audio synthesis", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Ping", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Catanzaro", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.09761" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kong, Z., Ping, W., Huang, J., Zhao, K., & Catanzaro, B. (2020). Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Neural speech synthesis with transformer network", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6706--6713", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v33i01.33016706" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, N., Liu, S., Liu, Y., Zhao, S., & Liu, M. (2019, July). Neural speech synthesis with transformer network. In Proceedings of the AAAI Conference on Artificial Intelligence, 33(01), 6706-6713. https://doi.org/10.1609/aaai.v33i01.33016706", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Natural tts synthesis by conditioning wavenet on mel spectrogram predictions", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Skerrv-Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "A" |
| ], |
| "last": "Saurous", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Agiomvrgiannakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4779--4783", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8461368" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., Chen, Z., Zhang, Y., Wang, Y., Skerrv-Ryan, R., Saurous, R. A., Agiomvrgiannakis, Y., & Wu, Y. (2018). Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),4779-4783. https://doi.org/10.1109/ICASSP.2018.8461368", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "X-vectors: Robust dnn embeddings for speaker recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Snyder", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Garcia-Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Sell", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5329--5333", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8461375" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Snyder, D., Garcia-Romero, D., Sell, G., Povey, D., & Khudanpur, S. (2018). X-vectors: Robust dnn embeddings for speaker recognition. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5329-5333. https://doi.org/10.1109/ICASSP.2018.8461375", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Autovc: Zero-shot voice style transfer with only autoencoder loss", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hasegawa-Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 36th International Conference on Machine Learning(PMLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "5210--5219", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian, K., Zhang, Y., Chang, S., Yang, X., & Hasegawa-Johnson, M. (2019). Autovc: Zero-shot voice style transfer with only autoencoder loss. In Proceedings of the 36th International Conference on Machine Learning(PMLR), 5210-5219.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Fastspeech 2: Fast and high-quality end-to-end text to speech", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.04558" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ren, Y., Hu, C., Tan, X., Qin, T., Zhao, S., Zhao, Z., & Liu, T. Y. (2020). Fastspeech 2: Fast and high-quality end-to-end text to speech. arXiv preprint arXiv:2006.04558.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems(NIPS'17", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., & Polosukhin, I. (2017). Attention is all you need. In Proceedings of the 31st International Conference on Neural Information Processing Systems(NIPS'17), 5998-6008.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Forward attention in sequence-to-sequence acoustic modeling for speech synthesis", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "X" |
| ], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "R" |
| ], |
| "last": "Dai", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4789--4793", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8462020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, J. X., Ling, Z. H., & Dai, L. R. (2018, April). Forward attention in sequence-to-sequence acoustic modeling for speech synthesis. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 4789-4793. https://doi.org/10.1109/ICASSP.2018.8462020", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "Reference", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "632--642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reference Bowman, S. R., Angeli, G., Potts, C. & Manning, C. D. (2015). A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, 632-642.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Neural Reading Comprehension and Beyond. (Doctoral Dissertation)", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, D. (2018). Neural Reading Comprehension and Beyond. (Doctoral Dissertation). Stanford Univ..", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Enhanced LSTM for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Answering Chinese Elementary School Social Studies Multiple Choice Questions 83 Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1657--1668", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, Q., Zhu, X., Ling, Z., Wei, S., Jiang, H. & Inkpen, D. (2017). Enhanced LSTM for Natural Language Inference. In Proceedings of the 55th Annual Meeting of the Answering Chinese Elementary School Social Studies Multiple Choice Questions 83 Association for Computational Linguistics, 1657-1668.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "The PASCAL Recognising Textual Entailment Challenge", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Machine Learning Challenges. Evaluating Predictive Uncertainty, Visual Object Classification, and Recognising Tectual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "177--190", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/11736790_9" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dagan, I., Glickman, O., & Magnini, B. (2005) The PASCAL Recognising Textual Entailment Challenge. In Machine Learning Challenges. Evaluating Predictive Uncertainty, Visual Object Classification, and Recognising Tectual Entailment, Springer, 177-190. https://doi.org/10.1007/11736790_9", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Devlin, J., Chang, M. W., Lee, K. & Toutanova, K. (2019). BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), 4171-4186. https://doi.org/10.18653/v1/N19-1423", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Tree edit models for recognizing textual entailments, paraphrases, and answers to questions", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1011--1019", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heilman, M. & Smith, N. A. (2010). Tree edit models for recognizing textual entailments, paraphrases, and answers to questions. In Proceedings of Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, 1011-1019.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Evaluating BERT for natural language inference: A case study on the CommitmentBank", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Marneffe", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6086--6091", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang, N. & Marneffe, M.-C. D. (2019). Evaluating BERT for natural language inference: A case study on the CommitmentBank. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, 6086-6091.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "MMM: Multi-stage multi-task learning for multi-choice reading comprehension", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Kao", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hakkani-Tur", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "8010--8017", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jin, D., Gao, S., Kao, J. Y., Chung, T., & Hakkani-tur, D. (2020). MMM: Multi-stage multi-task learning for multi-choice reading comprehension. In Proceedings of the AAAI Conference on Artificial Intelligence, 34(05), 8010-8017.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Recognizing textual entailment with tree edit distance algorithms", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kouylekov", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the First Challenge Workshop Recognising Textual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "17--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kouylekov, M. & Magnini, B. (2005). Recognizing textual entailment with tree edit distance algorithms. In Proceedings of the First Challenge Workshop Recognising Textual Entailment 2005, 17-20.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "RACE: Large-scale ReAding Comprehension Dataset From Examinations", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "785--794", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lai, G., Xie, Q., Liu, H., Yang, Y. & Hovy, E. (2017). RACE: Large-scale ReAding Comprehension Dataset From Examinations. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, 785-794.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.11942" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P. & Soricut, R. (2019). ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. arXiv preprint arXiv:1909.11942.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Answering Chinese Elementary School Social Study Multiple Choice Questions", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "C" |
| ], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 International Conference on Technologies and Applications of Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lee, D., Liang, C. C. & Su, K. Y. (2020). Answering Chinese Elementary School Social Study Multiple Choice Questions. In Proceedings of the 2020 International Conference on Technologies and Applications of Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "How Fast can BERT Learn Simple Natural Language Inference?", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [ |
| "C" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "626--633", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.eacl-main.51" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, Y.C. & Su, K.Y. (2021). How Fast can BERT Learn Simple Natural Language Inference? In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics, 626-633. https://doi.org/10.18653/v1/2021.eacl-main.51", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liu, Y., Ott, M., Goyal, N., Du, J., Joshi, M., Chen, D., Levy, O., Lewis, M., Zettlemoyer, L. & Stoyanov, V. (2019). RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Neural Machine Reading Comprehension", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Methods and Trends. Applied Sciences", |
| "volume": "9", |
| "issue": "18", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3390/app9183698" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liu, S., Zhang, X., Zhang, S., Wang, H., & Zhang, W. (2019). Neural Machine Reading Comprehension: Methods and Trends. Applied Sciences, 9(18), 3698. https://doi.org/10.3390/app9183698", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Decoupled Weight Decay Regularization", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Loshchilov", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Loshchilov, I. & Hutter, F. (2019). Decoupled Weight Decay Regularization. In Proceedings of International Conference on Learning Representations 2019.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Doccano: Text annotation tool for human", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kubo", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Taniguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nakayama, H., Kubo, T., Kamura, J., Taniguchi, Y., & Liang, X. (2018). Doccano: Text annotation tool for human. Software available from https://github.com/doccano/doccano", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "A Decomposable Attention Model for Natural Language Inference", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "P" |
| ], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Tackstrom", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2249--2255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parikh, A. P., Tackstrom, O., Das, D. & Uszkoreit, J. (2016). A Decomposable Attention Model for Natural Language Inference. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2249-2255.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "A Survey on Neural Machine Reading Comprehension", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.03824" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiu, B., Chen, X., Xu, J., & Sun, Y. (2019). A Survey on Neural Machine Reading Comprehension. arXiv preprint arXiv:1906.03824.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rajpurkar, P., Zhang, J., Lopyrev, K. & Liang, P. (2016). SQuAD: 100,000+ Questions for Machine Comprehension of Text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2383-2392.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Blackbox NLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, A., Singh, A., Michael, J., Hill, F., Levy, O. & Bowman, S. (2018). GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding. In Proceedings of the 2018 EMNLP Workshop Blackbox NLP: Analyzing and Interpreting Neural Networks for NLP, 353-355. https://doi.org/10.18653/v1/W18-5446", |
| "links": null |
| }, |
| "BIBREF69": { |
| "ref_id": "b69", |
| "title": "Making Negation-word Entailment Judgment via Supplementing BERT with Aggregative Pattern", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Technologies and Applications of Artificial Intelligence", |
| "volume": "2020", |
| "issue": "", |
| "pages": "17--22", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TAAI51410.2020.00012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wu, T. M. & Su, K. Y. (2020). Making Negation-word Entailment Judgment via Supplementing BERT with Aggregative Pattern. In International Conference on Technologies and Applications of Artificial Intelligence (TAAI 2020), 17-22. https://doi.org/10.1109/TAAI51410.2020.00012", |
| "links": null |
| }, |
| "BIBREF70": { |
| "ref_id": "b70", |
| "title": "A BERT based model for Multiple-Choice Reading Comprehension", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xu, K., Tin, J., & Kim, J. (2019). A BERT based model for Multiple-Choice Reading Comprehension. Retrieved from http://cs229.stanford.edu/proj2019spr/report/72.pdf", |
| "links": null |
| }, |
| "BIBREF71": { |
| "ref_id": "b71", |
| "title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [ |
| "C" |
| ], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of Advances in neural information processing systems 32 (NIPS 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang, Z., Dai, Z., Yang, Y., Carbonell, J., Salakhutdinov, R. & Le, Q. C. (2019). XLNet: Generalized Autoregressive Pretraining for Language Understanding. In Proceedings of Advances in neural information processing systems 32 (NIPS 2019), 5753-5763.", |
| "links": null |
| }, |
| "BIBREF72": { |
| "ref_id": "b72", |
| "title": "DCMN+: Dual co-matching network for multi-choice reading comprehension", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "9563--9570", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, S., Zhao, H., Wu, Y., Zhang, Z., Zhou, X., & Zhou, X. (2020). DCMN+: Dual co-matching network for multi-choice reading comprehension. In Proceedings of the AAAI Conference on Artificial Intelligence, 34(05), 9563-9570.", |
| "links": null |
| }, |
| "BIBREF75": { |
| "ref_id": "b75", |
| "title": "The individuals listed below are reviewers of this journal during the year of 2021. The IJCLCLP Editorial Board extends its gratitude to these volunteers for their important contributions to this publication, to our association, and to the profession", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "The individuals listed below are reviewers of this journal during the year of 2021. The IJCLCLP Editorial Board extends its gratitude to these volunteers for their important contributions to this publication, to our association, and to the profession. Tao-Hsing Chang Win Ping Kuo Jia-Wei Chang Wen-Hsing Lai", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "\u91cf \u6a5f (Support Vector Machine, SVM) \u548c \u652f \u6301 \u5411 \u91cf \u56de \u6b78 (Support Vector Regression, SVR)\u7b49\u6a5f\u5668\u5b78\u7fd2\u65b9\u6cd5(Han et al., 2009)\u3002\u57fa\u65bc\u6b4c\u8a5e\u548c\u97f3\u8a0a\u7684\u6b4c\u66f2\u60c5\u7dd2\u6aa2\u6e2c\u65b9\u6cd5 \u4f86\u8a08\u7b97\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u9032\u884c\u97f3\u6a02\u4e4b\u60c5\u7dd2\u5206\u985e(Jamdar et al., 2015)\u3002\u7528\u5377\u7a4d\u795e \u7d93\u7db2\u8def\u9810\u8a13\u7df4\u6a21\u578b\u5c0d\u6bcf 30 \u79d2\u526a\u8f2f\u7684\u5370\u5ea6\u53e4\u5178\u97f3\u6a02\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u5206\u985e(Sarkar et al., 2021) \u3002 \u4e0a\u8ff0\u7814\u7a76\u5927\u591a\u90fd\u96c6\u4e2d\u5229\u7528\u8072\u5b78\u7279\u5fb5\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u8fa8\u8b58\u4e26\u7121\u8a0e\u8ad6\u6b4c\u8a5e\u5c0d\u65bc\u60c5\u7dd2\u7684\u5f71\u97ff\u3002\u6b4c \u8a5e\u88ab\u8ce6\u4e88\u60c5\u7dd2\uff0c\u5728\u5f15\u767c\u4eba\u985e\u7684\u60c5\u7dd2\u4ee5\u53ca\u9810\u6e2c\u97f3\u6a02\u60c5\u7dd2\u626e\u6f14\u8457\u91cd\u8981\u7684\u89d2\u8272(Hu & Downie, 2010)\u3002\u96d6\u7136\u65cb\u5f8b\u548c\u6b4c\u8a5e\u6703\u540c\u6642\u5c0d\u807d\u773e\u7522\u751f\u5f71\u97ff\uff0c\u4f46\u807d\u773e\u5c0d\u65bc\u6b4c\u8a5e\u5167\u5bb9\u7684\u504f\u597d\u80fd\u9032\u4e00\u6b65\u53cd \u6620\u807d\u773e\u7684\u7279\u5fb5\u548c\u50be\u5411(Qiu et al., 2019)\u3002Agrawal et al. (2021)\u63d0\u51fa\u6b4c\u8a5e\u53ef\u8996\u70ba\u4e00\u9023\u4e32\u5f7c\u6b64 \u76f8\u95dc\u7684\u53e5\u5b50\uff0c\u9700\u6355\u6349\u4e0a\u4e0b\u6587\u548c\u9577\u671f\u4f9d\u8cf4\u7684\u95dc\u4fc2\uff0c\u4e26\u5728\u7814\u7a76\u4f7f\u7528 Transformer-based \u7684\u6a21\u578b \u9032\u884c\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58\uff0c\u5728\u591a\u500b\u82f1\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8cc7\u6599\u96c6\u4e0a\u53d6\u5f97\u826f\u597d\u7684\u6210\u679c\uff0c\u4e0a\u8ff0\u7684\u82f1\u6587\u6b4c\u8a5e\u8cc7 \u6599\u96c6\u7686\u57fa\u65bc Russell (1980)\u7684 Valence-Arousal \u5fc3\u7406\u5b78\u74b0\u7e5e\u60c5\u611f\u6a21\u578b\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u7684\u6a19\u8a3b\u3002 \u672c\u7814\u7a76\u63d0\u51fa\u4e00\u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u65b9\u6cd5\u3002\u9996\u5148\uff0c\u904b\u7528\u57fa\u65bc Transformer \u8a9e\u8a00\u9810\u8a13\u7df4\u6a21\u578b \u5c0d\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u5b57\u5178(CVAW)\u8207\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e(CVAP)\u9032\u884c\u5efa\u6a21\uff0c\u5176\u6b21\u5c07\u6a21\u578b\u9077\u79fb\u81f3 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8a9e\u6599et al., 2008)\u3002\u73fe\u6709\u7684\u7814\u7a76\u5927\u591a\u63a1\u7528 Russell (1980) \u6240\u63d0\u51fa\u7684\u5fc3\u7406\u5b78\u74b0\u7e5e\u6a21\u578b\u3002Laurier et al. (2009) \u7684\u7814\u7a76\u4e2d\u8868\u660e\uff0cRussell \u5fc3\u7406\u5b78\u60c5\u7dd2\u6a21\u578b\u53ef\u4ee5\u7528\u65bc\u60c5\u7dd2\u5206\u6790\u6216\u97f3\u6a02\u60c5\u7dd2\u8fa8\u8b58 \u4efb\u52d9\u3002\u8a72\u7dad\u5ea6\u6a21\u578b\u7684\u5169\u500b\u7dad\u5ea6\u7684\u9023\u7e8c\u6578\u503c\uff0c\u5206\u5225\u70ba\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u3002\u6548\u50f9 (Valence)\u4ee3\u8868\u6240\u6709\u60c5\u7dd2\u9ad4\u9a57\u6240\u56fa\u6709\u7684\u7a4d\u6975\u6216\u6d88\u6975\uff0c\u9ad8\u6548\u50f9(Valence)\u7684\u6b4c\u66f2\u807d\u8d77\u4f86\u66f4\u70ba\u7a4d \u6975\u3001\u5feb\u6a02\uff0c\u4f4e\u6548\u50f9(Valence)\u7684\u6b4c\u66f2\u807d\u8d77\u4f86\u8f03\u6cae\u55aa\u3001\u61a4\u6012\u3002\u559a\u9192(Arousal)\u4ee3\u8868\u60c5\u7dd2\u7684\u6fc0\u52d5\u7a0b \u5ea6\uff0c\u6b4c\u66f2\u7684\u80fd\u91cf(energy)\u80fd\u5c0d\u61c9\u65bc\u559a\u9192(Arousal)\u503c\uff0c\u4ee3\u8868\u6b4c\u66f2\u5f37\u5ea6\uff0c\u80fd\u91cf\u9ad8\u7684\u6b4c\u66f2\u901a\u5e38\u8d8a \u5feb\u901f\u3001\u97ff\u4eae\u548c\u5f37\u70c8(Kim et al., 2011)\u3002 \u5716 1. Russell \u63d0\u51fa\u4e4b\u5fc3\u7406\u5b78\u7dad\u5ea6\u60c5\u7dd2\u6a21\u578b [Figure 1. The circumplex model of affect (Russell, 1980)] \u5982\u5716 1 \u6240\u793a\uff0c\u60c5\u7dd2\u7531\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u5169\u500b\u7dad\u5ea6\u8868\u793a\uff0c\u60c5\u7dd2\u5e73\u9762\u88ab\u5206\u70ba \u56db\u500b\u8c61\u9650\uff0c\u5275\u5efa\u4e86\u56db\u500b\u60c5\u7dd2\u985e\u5225\u7a7a\u9593\u3002\u5728 \u00c7ano & Morisio (2017a)\u7684\u7814\u7a76\u4e2d\u57fa\u65bc Russell \u7dad \u5ea6\u60c5\u7dd2\u6a21\u578b\u7684\u56db\u500b\u8c61\u9650\u5c07\u60c5\u7dd2\u5206\u70ba\u56db\u985e\u5225(Q1\u3001Q2\u3001Q3\u3001Q4)\uff0c\u5206\u5225\u70ba\u5feb\u6a02\u3001\u61a4\u6012\u3001\u60b2\u50b7 \u548c\u8f15\u9b06\u3002\u56e0\u6b64\uff0c\u672c\u7814\u7a76\u5728\u6b4c\u8a5e\u9a57\u8b49\u7684\u90e8\u5206\u4e5f\u4f9d\u6b64\u65b9\u6cd5\u5c07\u6b4c\u8a5e\u60c5\u7dd2\u5206\u70ba\u56db\u500b\u8c61\u9650\u985e\u5225\u3002 4 \u5ed6\u5bb6\u8abc \u7b49" |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "The process of the proposed sentiment analysis in this paper" |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Structure of BERT model with YouTuber embedding." |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' accuracy on whether comments is related to YouTubers. A Pretrained YouTuber Embeddings for 29 Improving Sentiment Classification of YouTube Comments" |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' F1-score on whether comments is related to YouTubers." |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 5. and Figure 6 show the result of the audience's sentiment towards YouTubers. The data in this detection task is comment about YouTubers' affairs, so we expected that adding YouTuber embedding after each comments can increase overall accuracy and F1-score. Machine learning-based classifiers proved the same result with our exception. The models 'performances have at least increased 7% in overall accuracy and 8% in F1-score. However, BERT, the variance seen from M2 to M4 surprisingly decrease." |
| }, |
| "FIGREF6": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' accuracy on audience's sentiment towards YouTubers." |
| }, |
| "FIGREF7": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' F1-score on audience's sentiment towards YouTubers." |
| }, |
| "FIGREF8": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 7 and Figure 8 are the result of predicting the target, whether comments are related to videos. We notice that overall accuracy in all models is upscale to nearly 90%. However, the" |
| }, |
| "FIGREF9": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' accuracy on whether comments is related to videos Figure 8. Models' F1-score on whether comments is related to videosFigure 9andFigure 10are the result of predicting the audience's sentiment towards videos." |
| }, |
| "FIGREF10": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' accuracy on audience's sentiment towards videos. A Pretrained YouTuber Embeddings for 31 Improving Sentiment Classification of YouTube Comments Figure 10. Models' F1-score on audience's sentiment towards videos." |
| }, |
| "FIGREF11": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 11 and Figure 12 shows the result of predicting the audience's emotional ups and downs from their leaving comments. Compared with adding YouTuber embedding and without YouTuber embedding, the former method can improve model performance in machine learningbased methods. We deduce that the improvement may result from different types of YouTubers having different audiences. The more controversial YouTuber, the more excitement level may show in their audience's comments. For example, a YouTuber who prefers talking about political issues may vary their audience emotional variance than educational channels." |
| }, |
| "FIGREF12": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' accuracy on emotional ups and downs." |
| }, |
| "FIGREF13": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Models' F1-score on emotional ups and downs." |
| }, |
| "FIGREF14": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Tacotron 2 \u6a21\u578b\u67b6\u69cb [Figure 1. Tacotron 2 model architecture] 52 \u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d" |
| }, |
| "FIGREF15": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 8. Speaker space for inside and outside female speakers.] Figure 9. Speaker space for inside and outside male speakers.]" |
| }, |
| "FIGREF16": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Architecture of proposed SJS approach." |
| }, |
| "FIGREF17": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Decision tree for SJS approach. Each \"act-xxx\" is a specific action to be taken." |
| }, |
| "FIGREF18": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 3shows the examples under two different inference mechanisms: (1) for a negation-type question (left figure), and (2) a question with all of the above option (right figure)." |
| }, |
| "FIGREF19": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Two inference mechanisms under SJS framework." |
| }, |
| "FIGREF20": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "System architecture of proposed \"Jointly Judge then Select\" framework." |
| }, |
| "FIGREF21": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Multiple-choice question annotation." |
| }, |
| "FIGREF22": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "The architecture of the BERT-MC model(Xu et al., 2019)." |
| }, |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u9810\u8a13\u7df4\u6a21\u578b\u5728\u591a\u9805\u4efb\u52d9\u4e2d\u53d6\u5f97\u7a81\u7834\uff0c\u5305\u542b\u5be6\u9ad4 \u8faf\u8b58\u3001\u5e8f\u5217\u6216\u53e5\u5b50\u5c0d\u5206\u985e\u3001\u554f\u7b54\u7b49 11 \u7a2e\u4efb\u52d9\uff0c\u4f7f\u5f97 Transformer-based \u7684\u6a21\u578b\u67b6\u69cb\u5728\u81ea\u7136 \u8a9e\u8a00\u9818\u57df\u4e2d\u6210\u70ba\u4e3b\u6d41\u3002\u5728\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58\u7684\u61c9\u7528\u4e0a\uff0cAgrawal et al. (2021)\u7684\u7814\u7a76\u4fbf\u662f\u4f7f\u7528\u57fa \u65bc Transformer \u7684\u8a9e\u8a00\u6a21\u578b\u4f5c\u70ba\u60c5\u7dd2\u5206\u985e\u7684\u57fa\u790e\u67b6\u69cb\uff0c\u5728\u591a\u500b\u82f1\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8cc7\u6599\u96c6\u4e0a\u9054\u5230 \u826f\u597d\u7684\u6210\u679c\uff0c\u5c55\u793a Transformer-based \u65b9\u6cd5\u7684\u9ad8\u6548\u80fd\u3002 Hung et al., 2019)\u3001\u6642\u9593\u5e8f\u5217\u4efb\u52d9(Fawaz et al., 2018)\u30013D \u91ab\u5b78\u5f71\u50cf \u5206\u6790(Chen et al., 2019)\u3002\u5728\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u9818\u57df\uff0c\u4e5f\u5e38\u904b\u7528\u9077\u79fb\u5b78\u7fd2\u7684\u6280\u5de7\u5c0d\u65bc\u9810\u8a13\u7df4\u6a21\u578b \u9032\u884c\u6a21\u578b\u5fae\u8abf\u6216\u7279\u5fb5\u8403\u53d6\uff0cTransformer-based \u7684\u9810\u8a13\u7df4\u6a21\u578b\uff0c\u8b49\u660e\u5fae\u8abf\u5728\u7121\u8a3b\u91cb\u8a9e\u6599\u4e0a \u9810\u8a13\u7df4\u5927\u898f\u6a21\u8a9e\u8a00\u6a21\u578b\u7684\u6709\u6548\u6027\u3002Hung & Chang (2021)\u5247\u63d0\u5230\u591a\u5c64\u9077\u79fb\u5b78\u7fd2\u7684\u6709\u6548\u6027\uff0c \u8868\u660e\u4e86\u4e0d\u7ba1\u5728\u96fb\u8166\u8996\u89ba\u4efb\u52d9\u6216\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u4efb\u52d9\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u7d50\u679c\u6703\u512a\u65bc\u672a\u7d93\u904e\u9077\u79fb \u7684\u7d50\u679c\uff0c\u56e0\u6b64\uff0c\u672c\u7bc7\u7814\u7a76\u63d0\u51fa\u7684\u6a21\u578b\u67b6\u69cb\u57fa\u65bc\u8a9e\u8a00\u9810\u8a13\u7df4\u6a21\u578b\u5c0d\u6587\u672c\u9032\u884c\u9077\u79fb\u5b78\u7fd2\u3002 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8cc7\u6599\u96c6(Yu et al., 2016; Yu et al., 2017)\uff1a\u8cc7\u6599\u5982\u8868 1 \u6240\u793a\uff0c\u5305\u542b\u4e2d\u6587\u60c5 \u7dd2\u5b57\u5178(Chinese Valence-Arousal Words, CVAW)\u3001\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e(Chinese Valence-Arousal Phrases, CVAP)\u4ee5\u53ca\u4e2d\u6587\u60c5\u7dd2\u8a9e\u6599\u5eab(Chinese Valence-Arousal Text, CVAT)\u4e09 \u500b\u3002CVAW \u5305\u542b 5,512 \u500b\u4e2d\u6587\u60c5\u7dd2\u8a5e\uff1bCVAP \u4e2d\u6bcf\u500b\u7247\u8a9e\u7d50\u5408\u4fee\u98fe\u7b26\u548c\u4f86\u81ea CVAW \u4e2d\u7684\u8a5e\uff0c\u5171 2,998 \u500b\u4e2d\u6587\u60c5\u7dd2\u7247\u8a9e\uff1bCVAT \u5247\u5f9e 720 \u7bc7\u4f86\u81ea 6 \u7a2e\u4e0d\u540c\u985e\u5225\u7684\u7db2\u8def\u6587 \u7ae0\u8490\u96c6\u800c\u4f86\uff0c\u5171 2,009 \u500b\u53e5\u5b50\u3002\u4e09\u500b\u8cc7\u6599\u96c6\u7684\u6bcf\u500b\u8a5e\u6216\u53e5\u5b50\u7686\u5305\u542b\u6548\u50f9(Valence)\u548c \u559a\u9192(Arousal)\u3002\u6548\u50f9(Valence)\u7684\u7bc4\u570d\u5f9e 1 \u5230 9 \u5176\u5206\u5225\u4ee3\u8868\u6975\u7aef\u8ca0\u9762\u548c\u6975\u7aef\u6b63\u9762\u7684\u60c5 \u7dd2\uff0c\u559a\u9192(Arousal)\u7684\u7bc4\u570d\u5f9e 1 \u5230 9 \u5176\u5206\u5225\u4ee3\u8868\u5e73\u975c\u548c\u6fc0\u52d5\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192 (Arousal)\u82e5\u70ba 5 \u5247\u4ee3\u8868\u6c92\u6709\u7279\u5b9a\u50be\u5411\u7684\u4e2d\u6027\u60c5\u7dd2\u3002 \u4ee5\u9077\u79fb\u5b78\u7fd2\u6539\u5584\u6df1\u5ea6\u795e\u7d93\u7db2\u8def\u6a21\u578b\u65bc\u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58 5 \uf0b7 \u6b4c\u8a5e\u8cc7\u6599\u96c6\uff1a\u70ba\u672c\u7814\u7a76\u81ea\u884c\u6536\u96c6\u4e26\u6a19\u7c64\u7684\u8cc7\u6599\u96c6\u3002\u6a19\u7c64\u5305\u542b\u8c61\u9650\u4e00(Q1) \u3001\u8c61\u9650\u4e8c(Q2)\u3001 \u8c61\u9650\u4e09(Q3)\u53ca\u8c61\u9650\u56db(Q4)\u3002Q1 \u4ee3\u8868\u6b63\u5411\u6fc0\u6602\u5171 43 \u9996\uff0cQ2 \u4ee3\u8868\u8ca0\u5411\u6fc0\u6602\u5171 45 \u9996\uff0c Q3 \u4ee3\u8868\u8ca0\u5411\u5e73\u975c\u5171 43 \u9996\uff0cQ4 \u4ee3\u8868\u6b63\u5411\u5e73\u975c\u5171 39 \u9996\u3002V \u548c A \u5206\u5225\u4ee3\u8868\u6548\u50f9(Valence) \u548c\u559a\u9192(Arousal)\uff0cV \u6a19\u8a18 1 \u4ee3\u8868\u6b63\u5411\u60c5\u7dd2\u30010 \u4ee3\u8868\u8ca0\u5411\u60c5\u7dd2\uff0cA \u6a19\u8a18\u70ba 1 \u4ee3\u8868\u6fc0\u6602 \u60c5\u7dd2\u30010 \u4ee3\u8868\u5e73\u975c\u60c5\u7dd2\u3002 \u8868 CVAW\u3001CVAP \u548c CVAT \u7686\u63a1\u7528\u8cc7\u6599\u96c6\u5167\u7684\u6587\u5b57\u3001\u6548\u50f9(Valence)\u5e73\u5747\u548c\u559a\u9192(Arousal)\u5e73 \u5747\u3002\u7531\u65bc CVAW\u3001CVAP \u7684\u6587\u5b57\u8f03\u77ed\u4e14\u985e\u4f3c\uff0c\u56e0\u6b64\u5c07\u5169\u500b\u8cc7\u6599\u96c6\u5408\u4f75\u6210\u4e00\u500b\u8cc7\u6599\u96c6\uff0c\u7a31 CVAW+CVAP\uff0c\u4ee5 8 \u6bd4 2 \u62c6\u5206\u70ba\u8a13\u7df4\u96c6\u8ddf\u6e2c\u8a66\u96c6\u3002BERT \u6a21\u578b\u6709\u5225\u65bc\u50b3\u7d71\u6587\u672c\u7684\u65b9\u6cd5\uff0c \u6703\u5c07\u6a19\u9ede\u7b26\u865f\u8996\u70ba\u4e00\u500b\u7279\u5fb5\u503c\u9032\u884c\u8a13\u7df4\uff0c\u56e0\u6b64 CVAT \u6587\u5b57\u4e0d\u9032\u884c\u522a\u9664\u6a19\u9ede\u7b26\u865f\u7684\u9810\u8655\u7406\u3002 \u6b4c\u8a5e\u7684\u8cc7\u6599\u96c6\u5171 170 \u9996\uff0c\u7531\u4e09\u4f4d\u6a19\u8a3b\u8005\u5c07\u6bcf\u9996\u6b4c\u66f2\u7684\u91dd\u5c0d\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) \u5206\u5225\u6a19\u8a3b\u70ba\u6b63\u6216\u8ca0\uff0c\u4ee5\u4e2d\u6027\u60c5\u7dd2\u70ba\u539f\u9ede\uff0c\u4f9d\u7167\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u7684\u6b63\u8ddf\u8ca0\u5206 \u6a19\u8a18\u5230\u56db\u500b\u8c61\u9650\u3002BERT \u80fd\u5920\u8a13\u7df4\u7684\u6700\u5927\u6587\u672c\u9577\u5ea6\u70ba 512\uff0c\u8003\u616e\u5230 CVAP \u548c CVAW \u7684\u6587 \u5b57\u90fd\u5728 10 \u5b57\u4ee5\u5167\uff0c\u800c CVAT \u7684\u6587\u672c\u5206\u4f48\u5927\u591a\u96c6\u4e2d\u5728 100 \u5b57\u4ee5\u5167\uff0c\u70ba\u4e86\u907f\u514d\u7522\u751f\u904e\u65bc\u7a00 \u758f\u5411\u91cf\uff0c\u6700\u5927\u6587\u672c\u9577\u5ea6\u8a2d\u5b9a\u70ba 256 \u800c\u975e 512\u3002\u8f38\u5165 BERT \u6a21\u578b\u524d\u5fc5\u9808\u5728\u6bcf\u500b\u5e8f\u5217\u958b\u982d\u52a0 \u4e0a\u7279\u6b8a\u5b57\u5143\u7b26\u865f[CLS]\uff0c\u6b64\u7279\u6b8a\u5b57\u5143\u4ee3\u8868\u6574\u500b\u8f38\u5165\u5e8f\u5217\u7684\u5411\u91cf\u8868\u793a\uff0c\u5728\u5e8f\u5217\u5c3e\u5df4\u5247\u52a0\u4e0a\u7279 \u6b8a\u5b57\u5143\u7b26\u865f[SEP]\u4f5c\u70ba\u6587\u672c\u7684\u7d50\u675f\uff0c\u6bcf\u500b\u4e2d\u6587\u5b57\u6703\u5c0d\u61c9\u5230 BERT \u4e2d\u6587\u5b57\u5178\u7684\u4e00\u500b\u7d22\u5f15\u503c\u7a31 \u70ba Token id\uff0c\u70ba\u4e86\u8b93\u6bcf\u4e00\u5247\u8f38\u5165\u5e8f\u5217\u7684\u9577\u5ea6\u4fdd\u6301\u4e00\u81f4\uff0c\u82e5\u6587\u5b57\u9577\u5ea6\u4e0d\u8db3\u5247\u6703\u5728\u6587\u5b57\u5e8f\u5217 \u5f8c\u7aef\u586b\u5145\u7279\u6b8a\u5b57\u5143[PAD]\uff0c\u6700\u5f8c\u8f49\u70ba\u5411\u91cf\u7684\u5e8f\u5217\u548c\u76ee\u6a19\u503c\u8f49\u70ba\u5f35\u91cf(Tensor)\u81f3 BERT \u6a21\u578b \u9032\u884c\u8a13\u7df4\u3002 \u672c\u7814\u7a76\u4ee5\u77e5\u540d\u7684\u6df1\u5ea6\u795e\u7d93\u7db2\u8def\u6a21\u578b\u2500BERT(Devlin et al., 2018)\u70ba\u57fa\u790e\u67b6\u69cb\uff0c\u4e26\u9032\u4e00\u6b65\u63d0 \u51fa\u4e86\u591a\u8f38\u51fa(Multi-output)\u8207\u55ae\u8f38\u51fa(Single-output)\u5169\u7a2e\u6a21\u578b\u8a13\u7df4\u67b6\u69cb\u3002\u5982\u5716 2 \u6240\u793a\uff0c\u591a\u8f38 \u51fa(Multi-output)\u67b6\u69cb\u70ba\u4e00\u500b BERT \u6a21\u578b\u5171\u4eab\u6b0a\u91cd\uff0c\u4e00\u6b21\u8f38\u51fa\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) \u5169\u500b\u9023\u7e8c\u503c\uff0c\u55ae\u8f38\u51fa(Single-output)\u70ba\u4e00\u500b BERT \u6a21\u578b\u8f38\u51fa\u55ae\u4e00\u500b\u503c(\u4f8b\u5982 Valence)\u3002\u7531\u65bc \u662f\u6548\u50f9(Valence)\u3001\u559a\u9192(Arousal)\u7684\u6578\u503c\u9810\u6e2c\uff0c\u56e0\u6b64\u6a21\u578b\u8a13\u7df4\u6642\u7684\u640d\u5931\u51fd\u6578\u9078\u64c7\u4f7f\u7528\u5747\u65b9 \u8aa4\u5dee(Mean square error, MSE)\u3002\u5169\u7a2e\u6a21\u578b\u67b6\u69cb\u90fd\u5be6\u9a57\u5169\u7a2e\u65b9\u6cd5\uff1a(a)\u4f7f\u7528\u5f9e CVAW+CVAP \u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6\u7684\u9077\u79fb\u5b78\u7fd2\u65b9\u6cd5\u3002(b)\u5f9e\u96f6\u8a13\u7df4 CVAT \u672a\u9077\u79fb\u7684\u65b9\u6cd5\u3002\u6700\u5f8c\u9032\u884c\u5169\u7a2e \u65b9\u6cd5\u7684\u6bd4\u8f03\u3002\u672c\u7814\u7a76\u57fa\u65bc\u5fae\u8abf\u65b9\u6cd5\u9032\u884c\u5be6\u9a57\uff0c\u5fae\u8abf\u65b9\u6cd5\u7684\u512a\u9ede\u5728\u65bc\u6a21\u578b\u7684\u8a31\u591a\u53c3\u6578\u4e0d\u9700 \u8981\u91cd\u65b0\u5b78\u7fd2\uff0c\u5373\u4f7f\u53ea\u6709\u5c11\u91cf\u8a13\u7df4\u6a23\u672c\u4e5f\u80fd\u9054\u5230\u826f\u597d\u7684\u6548\u679c\u3002\u5728\u6a21\u578b\u5fae\u8abf\u65b9\u9762\uff0c\u5728 BERT \u9810\u8a13\u7df4\u6a21\u578b\u52a0\u4e0a\u4e00\u5c64 Dropout \u548c\u4e00\u5c64\u7dda\u6027\u5206\u985e\u5c64\uff0c\u512a\u5316\u5668\u70ba Adam\uff0c\u5b78\u7fd2\u901f\u7387\u672c\u7814\u7a76\u5617\u8a66 \u591a\u7a2e\u5b78\u7fd2\u901f\u7387\u9032\u884c\u5be6\u9a57\uff0c\u5fae\u8abf\u6a21\u578b\u9069\u5408\u8f03\u5c0f\u7684\u5b78\u7fd2\u901f\u7387\u907f\u514d\u9810\u8a13\u7df4\u7684\u6b0a\u91cd\u88ab\u4fee\u6539\u7834\u58de\uff0c \u4e5f\u5728\u5be6\u9a57\u4e2d\u767c\u73fe\uff0c\u82e5\u5b78\u7fd2\u901f\u7387\u4e0d\u5920\u5c0f\u6703\u5c0e\u81f4\u640d\u5931(loss)\u7121\u6cd5\u964d\u4f4e\uff0c\u6700\u5f8c\u9078\u64c7\u4e86 1e-05\u30011e-06 \u548c 5e-05 \u4e09\u500b\u8d85\u53c3\u6578\u9032\u884c\u8fd1\u4e00\u6b65\u5be6\u9a57\u53ca\u6bd4\u8f03\uff0c\u6bcf\u6b21\u8a13\u7df4\u6700\u5927 Epoch \u8a2d\u5b9a\u70ba 100\uff0c\u52a0\u5165 Early Stopping \u7684\u6a5f\u5236\uff0c\u5c07\u8010\u5fc3(Patience)\u8a2d\u81f3\u70ba 10\u3002 \u5716 2. \u672c\u7814\u7a76\u63d0\u51fa\u4e4b\u6a21\u578b\u67b6\u69cb\uff1a\u5305\u542b\u5169\u7a2e\u67b6\u69cb\u5206\u5225\u70ba\u55ae\u8f38\u51fa\u8207\u591a\u8f38\u51fa \u8a13\u7df4\u6a21\u578b\u7684\u8cc7\u6599\u96c6\u5207\u5206\u7686\u4ee5 8 \u6bd4 2 \u9032\u884c\uff0cCVAP+CVAW \u7684\u8a13\u7df4\u96c6\u548c\u6e2c\u8a66\u96c6\u5206\u5225\u70ba 6808 \u7b46\u548c 1702 \u7b46\u3002\u5728\u591a\u8f38\u51fa(Multi-output)\u8207\u55ae\u8f38\u51fa(Single-output)\u67b6\u69cb\u7684\u8a13\u7df4\u7d50\u679c\uff0c\u5982\u8868 2 \u6240 \u793a\uff0c\u591a\u8f38\u51fa\u67b6\u69cb\u6a21\u578b\u7684\u5747\u65b9\u8aa4\u5dee\u70ba 0.59126\uff0c\u55ae\u8f38\u51fa\u6a21\u578b\u7684\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) \u7684\u5747\u65b9\u8aa4\u5dee(MSE)\u5206\u5225\u70ba 0.3788 \u548c 0.77339\uff0c\u5169\u500b\u6a21\u578b\u67b6\u69cb\u6700\u4f73\u7684\u5b78\u7fd2\u901f\u7387\u7686\u70ba 1e-05\u3002 \u6240\u793a\u5728\u591a\u8f38\u51fa\u67b6\u69cb\u5e95\u4e0b\uff0c\u5f9e\u96f6\u8a13\u7df4 CVAT \u8cc7\u6599\u96c6(Training From Scratch)\u548c\u5f9e CVAP+CVAW \u8cc7\u6599\u96c6\u6a21\u578b\u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6(Transfer Learning)\u7684\u7d50\u679c\u4f86\u770b\uff0c\u5169\u8005\u540c \u6a23\u90fd\u5728\u5b78\u7fd2\u901f\u7387\u7686\u70ba 1e-05 \u7684\u8a13\u7df4\u6548\u679c\u6700\u4f73\uff0c\u7d93\u904e\u9077\u79fb\u7684\u5747\u65b9\u8aa4\u5dee\u70ba 0.65696 \u512a\u65bc\u672a\u7d93 \u9077\u79fb\u7684 0.72025\u3002\u7d93\u904e\u9077\u79fb\u7684\u6a21\u578b\u7d50\u679c\u6bd4\u672a\u7d93\u9077\u79fb\u6548\u679c\u597d\uff0c\u5728\u4e0d\u540c\u5b78\u7fd2\u901f\u7387\u4e0b\uff0c\u7d93\u9077\u79fb\u7684 CVAT \u5728\u4e0d\u540c\u7684\u5b78\u7fd2\u901f\u7387\u4e0b\u90fd\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u7d50\u679c\uff0c\u6709\u7d93\u904e\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6536\u6582\u901f\u5ea6 \u4e5f\u6bd4\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u5feb\u3002\u8868 4 \u70ba\u5728\u55ae\u8f38\u51fa\u67b6\u69cb\u5e95\u4e0b\uff0c\u5f9e\u96f6\u8a13\u7df4 CVAT \u8cc7\u6599\u96c6(Training From Scratch)\u548c\u5f9e CVAP+CVAW \u8cc7\u6599\u96c6\u6a21\u578b\u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6(Transfer Learning)\u7684 \u7d50\u679c\uff0c\u55ae\u8f38\u51fa(Single-output)\u67b6\u69cb\u662f\u5c07\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u4f5c\u70ba\u7368\u7acb\u7684\u5169\u500b\u8f38\u51fa\uff0c \u9996\u5148\u6bd4\u8f03\u6548\u50f9(Valence)\u8f38\u51fa\u7684\u7d50\u679c\uff0c\u672a\u7d93\u9077\u79fb(Training From Scratch)\u7684\u5747\u65b9\u8aa4\u5dee(MSE)\u70ba 0.50338\uff0c\u800c\u7d93\u9077\u79fb\u5b78\u7fd2(Transfer Learning)\u7684\u5747\u65b9\u8aa4\u5dee(MSE)\u70ba 0.46624\uff0c\u986f\u793a\u7d93\u9077\u79fb\u5b78\u7fd2 \u7684 CVAT \u5176\u7d50\u679c\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u7d50\u679c\u3002\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6700\u4f73\u5b78\u7fd2\u901f\u7387\u70ba 1e-06\uff0c\u672a\u7d93\u9077\u79fb \u7684\u6700\u4f73\u5b78\u7fd2\u901f\u7387\u70ba 1e-5\uff0c\u5c31\u7b97\u540c\u6a23\u90fd\u5728 1e-5 \u7684\u5b78\u7fd2\u901f\u7387\u5e95\u4e0b\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u5747\u65b9\u8aa4\u5dee 0.47898 \u4f9d\u7136\u662f\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u5747\u65b9\u8aa4\u5dee 0.50338\u3002\u6bd4\u8f03\u8f38\u51fa\u70ba\u559a\u9192(Arousal)\u7684\u7d50\u679c\uff0c\u7d93 \u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u5176\u5747\u65b9\u8aa4\u5dee\u70ba 0.84259 \u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684 0.87107\uff0c\u5169\u8005\u540c\u6a23\u90fd\u5728\u5b78\u7fd2\u901f \u7387\u70ba 1e-05 \u7684\u6642\u5019\u5f97\u5230\u6700\u4f73\u7d50\u679c\u3002 \u8868 3. \u591a\u8f38\u51fa\u67b6\u69cb(Multi-output)\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u5728 CVAP \u8cc7\u6599\u96c6\u4e4b\u7d50 \u679c \u55ae\u8f38\u51fa\u67b6\u69cb(Single-output)\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u5728 CVAP \u8cc7\u6599\u96c6\u4e4b\u7d50 \u679c \u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u7684\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6df7\u6dc6\u77e9\u9663\u7684\u7d50\u679c\uff0c \u5982\u8868 5 \u6240\u793a\uff0c\u5728\u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u5206\u985e\uff0cQ1 \u6709 26%\u5bb9\u6613\u88ab\u932f\u5206\u6210 Q2\uff0c19%\u6703\u88ab\u932f\u5206 \u6210 Q4\uff0c\u50c5\u6709 2.3%\u6703\u88ab\u5206\u6210 Q3\uff0c\u4e5f\u5c31\u662f\u5728 Q1 \u7684\u60c5\u7dd2\u985e\u5225\u4e2d\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) \u90fd\u6709\u88ab\u5206\u985e\u932f\u7684\u53ef\u80fd\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u540c\u6642\u88ab\u932f\u5206\u7684\u6a5f\u7387\u50c5 2.3%\u3002Q4 \u6709 56% \u88ab\u932f\u5206\u70ba Q3\uff0c\u88ab\u5206\u6210 Q2 \u7684\u53ef\u80fd\u70ba 5%\uff0c\u50c5\u6709 2.5%\u6703\u88ab\u5206\u6210 Q1\uff0c\u4e5f\u5c31\u662f\u5728 Q4 \u7684", |
| "content": "<table><tr><td colspan=\"4\">\u5728\u67d0\u4e9b\u9818\u57df\u4e2d\u6a19\u7c64\u7684\u6a19\u8a18\u6602\u8cb4\uff0c\u82e5\u539f\u59cb\u8cc7\u6599\u4e2d\u542b\u6709\u6a19\u7c64\u7684\u6578\u91cf\u592a\u5c11\uff0c\u5bb9\u6613\u9020\u6210\u6a21\u578b\u904e\u5ea6 \u64ec\u5408\u3002\u9077\u79fb\u5b78\u7fd2\u5e38\u7528\u7684\u5169\u500b\u6280\u5de7\uff1a\u7279\u5fb5\u8403\u53d6\u548c\u5fae\u8abf\u3002\u9077\u79fb\u5b78\u7fd2\u7684\u6709\u6548\u6027\u50ac\u751f\u4e86\u591a\u7a2e\u61c9\u7528\uff0c 5e-5 0.69301 17 \u4f8b\u5982\uff1a\u5b78\u7fd2\u60c5\u7dd2\u8faf\u8b58(\uf0b7 \u540d\u7a31 \u7e3d\u6578 \u7bc4\u4f8b\u6587\u5b57 Valence Arousal \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u5b57\u5178 CVAW 5,512 \u4e0d\u723d 2.8 7.2 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e CVAP 2,998 \u975e\u5e38\u53ef\u611b 8 7.313 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8a9e\u6599 \u5eab CVAT 2,009 \u9019\u7a2e\u8a18\u9304\u96e3\u514d\u7a7a\u6d1e\uff0c\u865b\u69cb\u4e5f\u986f\u5f97\u8584\u5f31\u3002 3 3.5 3.2 \u63d0\u51fa\u4e4b\u67b6\u69cb (Proposed Architecture) \u672c\u7814\u7a76\u63d0\u51fa\u7684\u6a21\u578b\u67b6\u69cb\u5982\u5716 2\uff0c\u900f\u904e BERT \u9810\u8a13\u7df4\u6a21\u578b\u5efa\u7acb CVAT \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u6a21\u578b\uff0c \u5c07\u6b64\u6a21\u578b\u76f4\u63a5\u7528\u65bc\u6b4c\u8a5e\u60c5\u7dd2\u7684\u6a19\u8a18\uff0c\u9a57\u8b49\u5728\u672a\u5b78\u7fd2\u904e\u6b4c\u8a5e\u6587\u672c\u7684\u60c5\u6cc1\u4e0b\u6a21\u578b\u7684\u6210\u6548\u3002\u672c \u7ae0\u7e3d\u5171\u6709\u4e09\u500b\u5c0f\u7bc0\uff0c\u7b2c\u4e00\u5c0f\u7bc0\u8aaa\u660e\u8cc7\u6599\u9810\u8655\u7406\uff0c\u7b2c\u4e8c\u5c0f\u7bc0\u4ecb\u7d39\u6a21\u578b\u5be6\u4f5c\u7684\u7d30\u7bc0\u4ee5\u53ca\u5be6\u9a57 \u7684\u53c3\u6578\u8a2d\u5b9a\uff0c\u7b2c\u4e09\u5c0f\u7d50\u8a0e\u8ad6\u5c07\u6a21\u578b\u61c9\u7528\u65bc\u6b4c\u8a5e\u6587\u672c\u60c5\u7dd2\u9a57\u8b49\u7684\u65b9\u6cd5\u3002 \u5ed6\u5bb6\u8abc \u7b49 \u4ee5\u9077\u79fb\u5b78\u7fd2\u6539\u5584\u6df1\u5ea6\u795e\u7d93\u7db2\u8def\u6a21\u578b\u65bc\u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58 7 \u67b6\u69cb\u540d\u7a31 \u8f38\u51fa \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch \u591a\u8f38\u51fa\u67b6\u69cb Multi-output -1e-5 0.59126 14 1e-6 0.65283 32 8 \u5ed6\u5bb6\u8abc \u7b49 \u5f9e 0 \u8a13\u7df4 CVAT From Scratch 1e-5 0.72025 10 1e-6 0.73979 58 5e-5 0.80925 5e-5 0.70594 \u8a5e\u3001\u6a21\u578b\u9810\u6e2c\u7684 Valence \u6578\u503c\u548c Arousal \u6578\u503c\u3001\u9810\u6e2c\u7684\u6a19\u7c64\u548c\u771f\u5be6\u6a19\u7c64\u3002 2 \u9762\u4e0a\u7684\u56db\u500b\u8c61\u9650\u985e\u5225(Q1\u3001Q2\u3001Q3\u3001Q4)\u3002\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7d50\u679c\u5982\u8868 8 \u6240\u793a\uff0c\u5305\u542b\u6b4c\u540d\u3001\u6b4c Transfer Learning 1e-6 0.67836 22 \u985e\u662f\u5c07\u6a21\u578b\u8f38\u51fa\u7684\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u57fa\u65bc\u4e2d\u6027\u503c 5 \u4f5c\u70ba\u95be\u503c\uff0c\u8f49\u63db\u70ba\u5ea7\u6a19\u5e73 \u7d93\u9077\u79fb\u5b78\u7fd2 1e-5 0.65696 \u5206\u985e\u7d50\u679c\uff0c\u7b2c\u4e8c\u5c0f\u7bc0\u66f4\u9032\u4e00\u6b65\u8aaa\u660e Valence-Arousal \u5e73\u9762\u7684\u5206\u985e\u7d50\u679c\u3002\u4e2d\u6587\u6b4c\u8a5e\u7684\u60c5\u7dd2\u5206 3 \u6b64\u6bb5\u843d\u8a0e\u8ad6\u524d\u8ff0\u7684\u4e2d\u6587\u60c5\u7dd2\u6a21\u578b\u61c9\u7528\u65bc\u4e2d\u6587\u6b4c\u8a5e\u5206\u985e\u4e4b\u7d50\u679c\uff0c\u7b2c\u4e00\u5c0f\u7bc0\u63cf\u8ff0\u4e2d\u6587\u6b4c\u8a5e\u7684 10 \u4ee5\u9077\u79fb\u5b78\u7fd2\u6539\u5584\u6df1\u5ea6\u795e\u7d93\u7db2\u8def\u6a21\u578b\u65bc\u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58 9 4.2 \u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u6a21\u578b\u4e4b\u9a57\u8b49 (Verification of Chinese Lyrics Emotion Model) \u5ed6\u5bb6\u8abc \u7b49 3.26 3.2.2 \u5be6\u65bd\u7d30\u7bc0 (Implementation Details) 3.2.3 \u6b4c\u8a5e\u60c5\u7dd2\u4e4b\u5206\u985e (Lyrics Emotion Classification) \u6b64\u968e\u6bb5\u7684\u76ee\u7684\u5728\u65bc\u9a57\u8b49\u672c\u7814\u7a76\u63d0\u51fa\u7684\u65b9\u6cd5\u80fd\u5728\u672a\u5b78\u7fd2\u904e\u6b4c\u8a5e\u6587\u672c\u7684\u60c5\u6cc1\u4e0b\uff0c\u80fd\u5c0d\u65bc\u6b4c\u8a5e \u6587\u672c\u9032\u884c\u60c5\u7dd2\u7684\u6a19\u8a3b\u3002\u9996\u5148\uff0c\u5c07\u6b4c\u8a5e\u6587\u672c\u9032\u884c\u8207\u7b2c\u4e00\u5c0f\u7bc0\u540c\u6a23\u7684\u9810\u8655\u7406\u5f8c\u9001\u5165\u6a21\u578b\u9032\u884c \u9810\u6e2c\uff0c\u8f38\u51fa\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\uff0c\u5176\u7bc4\u570d\u70ba 0 \u5230 9\u3002\u672c\u7814\u7a76\u4f9d\u7167\u539f\u8cc7\u6599\u96c6\u7684\u6558\u8ff0 (Yu et al., 2016; Yu et al., 2017)\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u90fd\u4ee5\u4e2d\u6027\u503c 5 \u70ba\u95be\u503c\uff0c\u56e0 \u6b64\uff0c\u82e5\u9810\u6e2c\u51fa\u7684\u6548\u50f9(Valence)\u6578\u503c\u5927\u65bc 5 \u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u6b63\u5411\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 1\u3001 \u6548\u50f9(Valence)\u6578\u503c\u5c0f\u65bc 5 \u5247\u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u8ca0\u5411\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 0\uff0c\u82e5\u9810\u6e2c\u559a\u9192 (Arousal)\u6578\u503c\u5927\u65bc 5 \u5247\u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u6fc0\u52d5\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 1\u3001\u559a\u9192(Arousal)\u503c\u5c0f\u65bc 5 \u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u5e73\u975c\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 0\u3002\u6211\u5011\u5c07\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u6a19\u8a18 \u4e4b\u5f8c\u7684\u7d50\u679c\u8f49\u70ba\u56db\u500b\u8c61\u9650 Q1\u3001Q2\u3001Q3 \u548c Q4 \u7684\u60c5\u7dd2\u5206\u985e\u4e4b\u7d50\u679c\uff0c\u6700\u5f8c\u9a57\u8b49\u5176\u5206\u985e\u6548\u679c\u3002 4. \u5be6\u9a57\u7d50\u679c (Experimental Result) \u672c\u7ae0\u7bc0\u5c07\u5be6\u9a57\u7d50\u679c\u5206\u70ba\u5169\u500b\u968e\u6bb5\uff0c\u7b2c\u4e00\u968e\u6bb5\u662f\u4e2d\u6587\u60c5\u7dd2\u7684\u6a21\u578b\u8a13\u7df4\u7d50\u679c\uff0c\u7b2c\u4e8c\u968e\u6bb5\u662f\u9a57 \u8b49\u6a21\u578b\u9810\u6e2c\u6b4c\u8a5e\u60c5\u7dd2\u7684\u6210\u6548\uff0c\u6bcf\u500b\u6bb5\u843d\u5305\u542b\u5728\u4e0d\u540c\u7684\u6a21\u578b\u67b6\u69cb\u548c\u4e0d\u540c\u8a13\u7df4\u65b9\u5f0f\u7684\u5be6\u9a57\u7d50 \u679c\u3002 \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch \u5f9e 0 \u8a13\u7df4 CVAT From Scratch Valence 1e-5 0.50338 12 1e-6 0.51199 44 5e-5 0.55236 6 Arousal 1e-5 0.87107 5 1e-6 0.93317 28 5e-5 0.9303 10 \u7d93\u9077\u79fb\u5b78\u7fd2 Transfer Learning Valence 1e-5 0.47898 4 1e-6 0.46624 15 5e-5 0.53422 5 Arousal 1e-5 0.84259 1 1e-6 0.88142 7 5e-5 0.93479 11 \u5982\u8868 3 [\u65b9\u6cd5 \u672a\u7d93\u9077\u79fb\u7684 CVAT \u5206\u985e\u7d50\u679c\u4e2d\uff0cQ1 \u6709 14%\u88ab\u932f\u5206\u6210 Q2\uff0c25.6%\u88ab\u932f\u5206\u6210 Q3\uff0c35%\u88ab\u932f \u5206\u6210 Q4\uff0cQ2 \u53ea\u6709 62%\u5206\u985e\u6b63\u78ba\uff0c\u5176\u9918 37%\u7686\u88ab\u932f\u5206\u6210 Q3\uff0cQ3 \u6709 14%\u88ab\u932f\u5206\u6210 Q2\uff0c\u5176 \u8868 4. [Table 4. CVAP results on the single-output model with/without transfer learning] \u65b9\u6cd5 \u8f38\u51fa \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch \u9918\u5206\u985e\u6b63\u78ba\uff0cQ4 \u6709 7.7%\u88ab\u932f\u5206\u6210 Q2\uff0c69%\u5bb9\u6613\u88ab\u932f\u5206\u6210 Q3 \uff0c\u53ea\u6709 29%\u5206\u985e\u6b63\u78ba\u3002</td></tr><tr><td/><td>1e-5</td><td>0.3788</td><td>24</td></tr><tr><td>Valence</td><td>1e-6</td><td>0.39498</td><td>35</td></tr><tr><td colspan=\"4\">5e-5 [Figure 2. Training architecture of multi-output and single-output models] 0.51918 4 \u55ae\u8f38\u51fa\u67b6\u69cb Single-output 1e-5 0.77339 12</td></tr><tr><td>Arousal</td><td>1e-6</td><td>0.92874</td><td>19</td></tr><tr><td/><td>5e-5</td><td>1.8867</td><td>12</td></tr></table>", |
| "num": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td/><td colspan=\"3\">Prediction by CVAT: Transfer Learning</td><td/><td/></tr><tr><td/><td/><td>Q1</td><td>Q2</td><td>Q3</td><td>Q4</td></tr><tr><td/><td>Q1</td><td>23</td><td>8</td><td>1</td><td>11</td></tr><tr><td>True</td><td>Q2</td><td>0</td><td>44</td><td>1</td><td>0</td></tr><tr><td/><td>Q3</td><td>0</td><td>7</td><td>36</td><td>0</td></tr><tr><td/><td>Q4</td><td>1</td><td>2</td><td>19</td><td>17</td></tr><tr><td/><td colspan=\"3\">Prediction by CVAT: Training from Scratch</td><td/><td/></tr><tr><td/><td/><td>Q1</td><td>Q2</td><td>Q3</td><td>Q4</td></tr><tr><td/><td>Q1</td><td>11</td><td>6</td><td>11</td><td>15</td></tr><tr><td>True</td><td>Q2</td><td>0</td><td>28</td><td>17</td><td>0</td></tr><tr><td/><td>Q3</td><td>0</td><td>6</td><td>37</td><td>0</td></tr><tr><td/><td>Q4</td><td>0</td><td>3</td><td>27</td><td>9</td></tr></table>", |
| "num": null |
| }, |
| "TABREF4": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td/><td>YouTuber preference</td><td>Video preference</td><td>Excitement level</td></tr><tr><td>Krippendorff's Alpha</td><td>0.5829</td><td>0.4545</td><td>0.3898</td></tr><tr><td>Fleiss's Kappa</td><td>0.5840</td><td>0.4594</td><td>0.3928</td></tr><tr><td>Cronbach's Alpha</td><td>0.8520</td><td>0.7264</td><td>0.900</td></tr></table>", |
| "num": null |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>Task</td><td>Class</td><td>Number</td></tr><tr><td>T1</td><td>Non-Relative Non-Relative</td><td>8223 (75%) 2776 (25%)</td></tr><tr><td/><td>Unlike</td><td>287 (10%)</td></tr><tr><td>T2</td><td>Neutral</td><td>784 (28%)</td></tr><tr><td/><td>Like</td><td>1705 (61%)</td></tr><tr><td>T3</td><td>Non-Relative Relative</td><td>1036 (10%) 9775 (90%)</td></tr><tr><td/><td>Unlike</td><td>659 (7%)</td></tr><tr><td>T4</td><td>Neutral</td><td>5842 (60%)</td></tr><tr><td/><td>Like</td><td>3274 (33%)</td></tr><tr><td/><td>Barely excited</td><td>2788 (30%)</td></tr><tr><td/><td>Slightly excited</td><td>2478 (27%)</td></tr><tr><td>T5</td><td>Excited</td><td>2341 (25%)</td></tr><tr><td/><td>Fairly excited</td><td>1136 (12%)</td></tr><tr><td/><td>Hyper excited</td><td>471 (5%)</td></tr></table>", |
| "num": null |
| }, |
| "TABREF6": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td colspan=\"2\">Model Description</td></tr><tr><td>M1</td><td>BERT model using bert-base-multilingual-cased pre-trained model</td></tr><tr><td>M2</td><td>BERT model using distilbert-base-multilingual-cased pre-trained model</td></tr><tr><td>M3</td><td>BERT model using bert-base-multilingual-cased pre-trained model + YouTuber embedding</td></tr><tr><td>M4</td><td>BERT model using distilbert-base-multilingual-cased pre-trained model + YouTuber embedding</td></tr><tr><td>M5</td><td>RandomForest</td></tr><tr><td>M6</td><td>Xgboost</td></tr><tr><td>M7</td><td>SVM</td></tr><tr><td>M8</td><td>RandomForest + YouTuber embedding</td></tr><tr><td>M9</td><td>Xgboost + YouTuber embedding</td></tr><tr><td>M10</td><td>SVM + YouTuber embedding</td></tr></table>", |
| "num": null |
| }, |
| "TABREF7": { |
| "html": null, |
| "type_str": "table", |
| "text": ". How to Fine-Tune BERT for Text Classification? In M. Sun, X. Huang, H. Ji, Z. Liu & Y. Liu (eds.), Chinese Computational Linguistics (p./pp. 194-206), Cham: Springer International Publishing. Usherwood, P., & Smit, S. (2019). Low-Shot Classification: A Comparison of Classical and Deep Transfer Machine Learning Approaches. arXiv preprint arXiv:1907.07543. Zhu, Y., Yan, E. & Wang, F (2017). Semantic relatedness and similarity of biomedical terms: examining the effects of recency, size, and section of biomedical publications on the performance of word2vec. BMC Med Inform Decis Mak, 17, 95. The Association for Computational Linguistics and Chinese Language Processing", |
| "content": "<table><tr><td colspan=\"2\">Computational Linguistics and Chinese Language Processing</td></tr><tr><td colspan=\"2\">Vol. 26, No. 2, December 2021, pp. 35-48</td><td>35</td></tr><tr><td colspan=\"3\">\u55ae\u4f86\u8aaa\u5c0d\u6620\u5f0f\u6240\u6c42\u53d6\u7684\u51fd\u6578\uff0c\u5c0d\u65bc\u8f38\u5165\u8a0a\u865f\u7279\u5fb5\u7684\u904b\u7b97\u53ef\u4ee5\u662f\u4efb\u610f\u7531\u6240\u4f7f\u7528\u4e4b\u6df1\u5ea6\u5b78\u7fd2 \uf0d3 \u4f7f\u7528\u4f4e\u901a\u6642\u5e8f\u5217\u8a9e\u97f3\u7279\u5fb5 \u6a21\u578b\u5b9a\u7fa9\u7684\u975e\u7dda\u6027\u904b\u7b97\uff0c\u800c\u906e\u7f69\u5f0f\u6240\u6c42\u53d6\u7684\u51fd\u6578\u904b\u7b97\uff0c\u5247\u7c21\u5316\u6216\u9650\u5236\u70ba\u5c0d\u8f38\u5165\u8a0a\u865f\u7279\u5fb5</td></tr><tr><td>\u4f5c\u4e58\u6cd5</td><td>\u8a13\u7df4\u7406\u60f3\u6bd4\u7387\u906e\u7f69\u6cd5\u4e4b\u8a9e\u97f3\u5f37\u5316</td></tr><tr><td colspan=\"3\">https://doi.org/10.1186/s12911-017-0498-1 Zhang, X., & Zheng, X. (2016). Comparison of text sentiment analysis based on machine learning. In Proceedings of the 15th international symposium on parallel and distributed computing (ISPDC 2016), 230-233. https://doi.org/10.1109/ISPDC.2016.39 Employing Low-\u6458\u8981</td></tr><tr><td/><td colspan=\"2\">\u5728\u8af8\u591a\u57fa\u65bc\u6df1\u5ea6\u5b78\u7fd2\u4e4b\u8a9e\u97f3\u5f37\u5316\u6cd5\u4e2d\uff0c\u906e\u7f69\u5f0f(masking-based)\u5f37\u5316\u6cd5\u6c42\u53d6\u4e00\u500b</td></tr><tr><td/><td colspan=\"2\">\u906e\u7f69\u8207\u96dc\u8a0a\u8a9e\u97f3\u4e4b\u6642\u983b\u5716\u76f8\u4e58\u3001\u85c9\u6b64\u4f7f\u6240\u5f97\u4e58\u7a4d\u4e4b\u65b0\u6642\u983b\u5716\u6240\u542b\u96dc\u8a0a\u6210\u5206\u964d\u4f4e\u3001</td></tr><tr><td/><td colspan=\"2\">\u4ee5\u91cd\u5efa\u76f8\u5c0d\u4e7e\u6de8\u7684\u8a9e\u97f3\u8a0a\u865f\u3002\u5728\u7528\u4ee5\u8a13\u7df4\u906e\u7f69\u4e4b\u6df1\u5ea6\u6a21\u578b\u5176\u8f38\u5165\u7279\u5fb5\u7684\u9078\u53d6\u4e0a\uff0c</td></tr><tr><td colspan=\"3\">\u8a31\u591a\u9577\u671f\u4ee5\u4f86\u7528\u4ee5\u8a9e\u97f3\u8fa8\u8b58\u7684\u7279\u5fb5\u3001\u5982\u6885\u723e\u5012\u5012\u983b\u8b5c\u3001\u632f\u5e45\u8abf\u8b8a\u6642\u983b\u5716\u3001\u611f\u77e5 \u6df1\u5ea6\u985e\u795e\u7d93\u6a21\u578b\u8207\u76f8\u95dc\u4e4b\u5b78\u7fd2\u6f14\u7b97\u6cd5\u7684\u9ad8\u5ea6\u767c\u5c55\uff0c\u5f15\u767c\u8a31\u591a\u79d1\u6280\u7814\u7a76\u7684\u7a7a\u524d\u7a81\u7834\u8207\u5275\u65b0\uff0c \u7dda\u6027\u4f30\u6e2c\u4fc2\u6578\u7b49\u90fd\u662f\u9069\u5408\u7684\u9078\u64c7\u3001\u53ef\u4f7f\u8a13\u7df4\u6240\u5f97\u7684\u906e\u7f69\u9054\u5230\u6709\u6548\u7684\u8a9e\u97f3\u5f37\u5316\u6548 \u904e\u5f80\u7684\u8a31\u591a\u6280\u8853\u958b\u767c\uff0c\u5e38\u662f\u57fa\u65bc\u89e3\u91cb\u601d\u7dad\u3001\u5728\u591a\u6b21\u8a66\u932f\u4e4b\u5f8c\u627e\u5230\u4e00\u500b\u53ef\u884c\u65b9\u6848\uff0c\u518d\u5c0d\u6b64 \u679c\u3002\u53e6\u5916\uff0c\u50b3\u7d71\u4e0a\u82e5\u5c07\u8a9e\u97f3\u7279\u5fb5\u4e4b\u6642\u5e8f\u5217\u4f5c\u4f4e\u901a\u6ffe\u6ce2\u8655\u7406\uff0c\u53ef\u4ee5\u6291\u5236\u96dc\u8a0a\u6240\u5e36 \u53ef\u884c\u65b9\u6848\u8ce6\u4e88\u4eba\u5011\u5c08\u696d\u7684\u89e3\u91cb\uff0c\u7136\u800c\u6df1\u5ea6\u5b78\u7fd2\u5247\u666e\u904d\u57fa\u65bc\u7d71\u8a08\u601d\u7dad\u3001\u4e26\u4e0d\u8457\u91cd\u65bc\u65b9\u6cd5\u5728 \u4f86\u7684\u5931\u771f\uff0c\u56e0\u6b64\uff0c\u5728\u672c\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u5617\u8a66\u5c07\u5404\u7a2e\u8a9e\u97f3\u7279\u5fb5\u6642\u5e8f\u5217\uff0c\u85c9\u7531\u96e2\u6563\u5c0f \u89e3\u91cb\u4e0a\u7684\u5408\u7406\u6027\uff0c\u800c\u662f\u5617\u8a66\u5c07\u5927\u91cf\u89c0\u5bdf(\u8f38\u5165)\u548c\u5c0d\u61c9\u7d50\u679c(\u8f38\u51fa)\u7684\u95dc\u806f\u6027\u85c9\u7531\u6df1\u5ea6 \u6ce2\u8f49\u63db\u7684\u65b9\u5f0f\u52a0\u4ee5\u4f4e\u901a\u6ffe\u6ce2\uff0c\u518d\u7528\u5b83\u5011\u4f86\u8a13\u7df4\u8a9e\u97f3\u906e\u7f69\u7684\u6df1\u5ea6\u6a21\u578b\uff0c\u63a2\u7a76\u5176\u662f \u5426\u80fd\u4f7f\u6240\u5b78\u7fd2\u4e4b\u906e\u7f69\u80fd\u5c0d\u65bc\u539f\u59cb\u96dc\u8a0a\u8a9e\u97f3\u4e4b\u6642\u983b\u5716\u6709\u66f4\u4f73\u7684\u8a9e\u97f3\u5f37\u5316\u6548\u679c\u3002\u5728 \u985e\u795e\u7d93\u7db2\u8def\u52a0\u4ee5\u8a6e\u91cb\uff0c\u4ee5\u671f\u5c0d\u65bc\u65b0\u7684\u89c0\u5bdf\u80fd\u7cbe\u6e96\u9810\u6e2c\u51fa\u5c0d\u61c9\u7684\u7d50\u679c\u3002</td></tr><tr><td colspan=\"3\">\u6211\u5011\u7684\u521d\u6b65\u5be6\u9a57\u88e1\uff0c\u5728\u4eba\u8072\u96dc\u8a0a\u74b0\u5883\u4e2d\uff0c\u6211\u5011\u767c\u73fe\u4e0a\u8ff0\u4e4b\u4f4e\u901a\u6ffe\u6ce2\u6240\u5f97\u4e4b\u7279\u5fb5 \u5728\u8a9e\u97f3\u8655\u7406\u7684\u9818\u57df\u4e2d\uff0c\u8fd1\u5e74\u4f86\u57fa\u65bc\u6df1\u5ea6\u5b78\u7fd2\u6240\u958b\u767c\u51fa\u7684\u6f14\u7b97\u6cd5\u4e5f\u7433\u746f\u6eff\u76ee\uff0c\u4e14\u56e0\u8a13</td></tr><tr><td colspan=\"3\">\u5e8f\u5217\u3001\u76f8\u8f03\u65bc\u539f\u59cb\u7279\u5fb5\u5e8f\u5217\u800c\u8a00\u6240\u5b78\u7fd2\u800c\u5f97\u7684\u6df1\u5ea6\u6a21\u578b\uff0c\u80fd\u66f4\u6709\u6548\u5730\u63d0\u5347\u6e2c\u8a66 \u7df4\u8cc7\u6599\u7684\u53ef\u53d6\u5f97\u6027\u8d8a\u4f86\u8d8a\u9ad8\uff0c\u9019\u4e9b\u6f14\u7b97\u6cd5\u5728\u5b78\u7fd2\u8207\u9810\u6e2c\u7d50\u679c\u7684\u80fd\u529b\u4e5f\u96a8\u4e4b\u589e\u5f37\u3002\u4ee5\u672c\u7814</td></tr><tr><td colspan=\"3\">\u8a9e\u97f3\u4e4b\u54c1\u8cea\u8207\u53ef\u8b80\u6027\u3002 \u7a76\u8457\u91cd\u7684\u8a9e\u97f3\u5f37\u5316\u6cd5\u70ba\u4f8b\uff0c\u57fa\u65bc\u6df1\u5ea6\u985e\u795e\u7d93\u6a21\u578b\u4e4b\u5404\u5f0f\u8a9e\u97f3\u5f37\u5316\u67b6\u69cb\u5176\u8868\u73fe\u5e38\u8d85\u8d8a\u7d93\u5178</td></tr><tr><td colspan=\"3\">\u4e14\u5bcc\u6709\u9ad8\u5ea6\u7406\u8ad6\u6839\u64da\u7684\u6f14\u7b97\u6cd5\uff0c\u6216\u662f\u4ee5\u5f8c\u8005\u7684\u6f14\u7b97\u6cd5\u7684\u539f\u578b (prototype) \u51fa\u767c\uff0c\u4f46\u914d\u5408\u6df1</td></tr><tr><td colspan=\"2\">\u5ea6\u985e\u795e\u7d93\u7db2\u8def\u4f86\u6709\u6548\u5b78\u7fd2\u8a72\u6f14\u7b97\u6cd5\u7684\u5404\u9805\u53c3\u6578\uff0c\u4f7f\u5176\u8a9e\u97f3\u5f37\u5316\u6548\u679c\u66f4\u4f73\u3002</td></tr><tr><td colspan=\"3\">\u6839\u64da\u6587\u737b(Wang et al., 2014)\uff0c\u8a31\u591a\u57fa\u65bc\u6df1\u5ea6\u5b78\u7fd2\u4e4b\u8a9e\u97f3\u5f37\u5316\u6cd5\u6839\u64da\u5176\u8a13\u7df4\u76ee\u6a19\u5927\u81f4</td></tr><tr><td colspan=\"3\">\u53ef\u4ee5\u5206\u70ba\u5169\u5927\u7bc4\u7587\uff1a\u5c0d\u6620\u5f0f (mapping) \u8207\u906e\u7f69\u5f0f (masking)\uff0c\u524d\u8005\u76f4\u63a5\u6c42\u53d6\u4e00\u500b\u5c0d\u6620\u51fd</td></tr><tr><td colspan=\"3\">\u6578\uff0c\u4f7f\u6b64\u5c0d\u6620\u51fd\u6578\u4e4b\u7406\u60f3\u8f38\u51fa\u70ba\u4e7e\u6de8\u8a9e\u97f3\u7684\u5448\u73fe\u5f0f(\u7279\u5fb5)\uff0c\u5982\u6642\u57df\u8a0a\u865f\u6ce2\u5f62\u3001\u6642\u983b\u5716</td></tr><tr><td colspan=\"3\">(spectrogram) \u6216\u8033\u8778\u6642\u983b\u8b5c\u5716 (cochleagram)\uff0c\u5f8c\u8005\u662f\u6c42\u53d6\u4e00\u500b\u906e\u7f69 (mask)\uff0c\u7528\u4ee5\u8207\u539f\u59cb</td></tr><tr><td colspan=\"3\">\u8f38\u5165\u8a0a\u865f\u6216\u7279\u5fb5\u5448\u73fe\u4f5c\u9ede\u5c0d\u9ede\u7684\u76f8\u4e58\uff0c\u4f7f\u76f8\u4e58\u5f8c\u7684\u8a0a\u865f\u5448\u73fe\u5f0f\u80fd\u8da8\u8fd1\u4e7e\u6de8\u6642\u7684\u72c0\u614b\u3002\u7c21</td></tr></table>", |
| "num": null |
| }, |
| "TABREF8": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u3001\u983b \u8b5c\u5f37\u5ea6\u906e\u7f69 (spectral magnitude mask, SMM) (Wang et al., 2014)\u3001\u8907\u6578\u7406\u60f3\u6bd4\u4f8b\u906e\u7f69 (complex ideal ratio mask, cIRM) (Williamson et al., 2016)\u3001\u76f8\u4f4d\u654f\u611f\u578b\u906e\u7f69 (phase-sensitive mask, PSM) (Erdogan et al., 2015) \u7b49\u3002", |
| "content": "<table><tr><td>\u5728\u672c\u7814\u7a76\u4e2d\uff0c\u4e3b\u8981\u662f\u91dd\u5c0d\u4e0a\u8ff0\u4e4b\u906e\u7f69\u5f0f\u8a9e\u97f3\u5f37\u5316\u6cd5\u52a0\u4ee5\u6539\u9032\uff0c\u6211\u5011\u63d0\u51fa\u5c0d\u65bc\u8a13\u7df4\u906e</td></tr><tr><td>\u7f69\u6a21\u578b\u7684\u8f38\u5165\u96dc\u8a0a\u8a9e\u97f3\u7684\u7279\u5fb5\u6642\u5e8f\u5217\u4f5c\u7c21\u55ae\u7684\u9810\u8655\u7406 (pre-processing)\uff0c\u4f7f\u5176\u5305\u542b\u7684\u96dc\u8a0a</td></tr><tr><td>\u5931\u771f\u8f03\u4f4e\uff0c\u4ee5\u671f\u5728\u4e4b\u5f8c\u7684\u8a13\u7df4\u906e\u7f69\u6b65\u9a5f\u80fd\u66f4\u52a0\u7cbe\u78ba\u3002\u800c\u4f7f\u7528\u7684\u9810\u8655\u7406\u65b9\u6cd5\uff0c\u662f\u900f\u904e\u7c21\u6613</td></tr><tr><td>\u7684\u4e00\u968e\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db (discrete wavelet transform, DWT) (Mallat, 1999)]\uff0c\u5c07\u7279\u5fb5\u6642\u5e8f\u5217\u5206</td></tr><tr><td>\u70ba\u9ad8\u4f4e\u5169\u8abf\u8b8a\u983b\u5e36 (modulation frequency bands)\uff0c\u7136\u5f8c\u85c9\u7531\u4e00\u6b0a\u91cd\u7684\u76f8\u4e58\u4f86\u964d\u4f4e\u9ad8\u8abf\u8b8a</td></tr><tr><td>\u983b\u5e36\u4e4b\u5e8f\u5217\u7684\u632f\u5e45\uff0c\u518d\u5c07\u5176\u8207\u539f\u59cb\u4f4e\u8abf\u8b8a\u983b\u5e36\u5e8f\u5217\u642d\u914d\u3001\u900f\u904e\u4e00\u968e\u53cd\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db</td></tr><tr><td>(inverse discrete wavelet transform, IDWT) \u91cd\u5efa\u7279\u5fb5\u5e8f\u5217\uff0c\u518d\u4f7f\u7528\u6b64\u76f8\u7576\u65bc\u900f\u904e\u4f4e\u901a\u6ffe\u6ce2</td></tr><tr><td>\u8655\u7406\u5f8c\u7684\u7279\u5fb5\u5e8f\u5217\u4f86\u8a13\u7df4\u906e\u7f69\u6a21\u578b\u3002</td></tr><tr><td>\u4e0a\u8ff0\u4f4e\u901a\u6ffe\u6ce2\u4e4b\u8655\u7406\uff0c\u4e3b\u8981\u662f\u57fa\u65bc\u5148\u524d\u8af8\u591a\u5b78\u8005\u6240\u63d0\u51fa\u7684\u89c0\u5bdf(Kanedera et al., 1997;</td></tr><tr><td>Chen & Bilmes, 2007)\uff1a\u4e7e\u6de8\u8a9e\u97f3\u7279\u5fb5\u6642\u5e8f\u5217\u4e3b\u8981\u5206\u5e03\u983b\u7387\u5728 1 Hz \u81f3 16 Hz \u4e4b\u9593\uff0c\u4ee5\u4e00\u822c</td></tr><tr><td>\u7684\u97f3\u6846\u53d6\u6a23\u7387 100 Hz \u800c\u8a00\uff0c\u7279\u5fb5\u5e8f\u5217\u53ef\u5305\u542b\u7684(\u8abf\u8b8a)\u983b\u5e36\u70ba[0,50 Hz]\uff0c\u56e0\u6b64\u5f8c\u534a\u983b\u5e36</td></tr><tr><td>\u9bae\u5c11\u5305\u542b\u8a9e\u97f3\u6210\u5206\uff0c\u6291\u5236\u6b64\u983b\u5e36\u4e0d\u6703\u5c0d\u8a9e\u97f3\u9020\u6210\u660e\u986f\u5931\u771f\uff0c\u4f46\u53ef\u6709\u6548\u6291\u5236\u96dc\u8a0a\u7684\u5e72\u64fe\u3002</td></tr><tr><td>\u53e6\u5916\uff0c\u57fa\u65bc\u6587\u737b(Wang et al., 2018)\u6240\u8ff0\uff0c\u4f7f\u7528\u5c0f\u6ce2\u8f49\u63db\u5206\u89e3\u8a9e\u97f3\u7279\u5fb5\u6642\u5e8f\u5217\u3001\u6d88\u9664</td></tr><tr><td>\u5176\u7d30\u7bc0\u4fc2\u6578 (detail coefficients\uff0c\u76f8\u7576\u65bc\u8abf\u8b8a\u9ad8\u983b\u6210\u5206) \u5f8c\u91cd\u5efa\u4e4b\u8a9e\u97f3\u7279\u5fb5\uff0c\u5728\u96dc\u8a0a\u74b0\u5883</td></tr><tr><td>\u4e0b\u6709\u660e\u986f\u9032\u6b65\u7684\u8a9e\u97f3\u8fa8\u8b58\u7387\uff0c\u6211\u5011\u53c3\u7167\u9019\u6a23\u7684\u505a\u6cd5\u4f86\u5be6\u73fe\u524d\u8ff0\u4e4b\u8a9e\u97f3\u7279\u5fb5\u5e8f\u5217\u7684\u4f4e\u901a\u6ffe</td></tr><tr><td>\u6ce2\u8655\u7406\uff0c\u671f\u8a31\u5b83\u5c0d\u61c9\u7684\u906e\u7f69\u6df1\u5ea6\u6a21\u578b\u80fd\u5f97\u5230\u66f4\u4f73\u7684\u8a9e\u97f3\u5f37\u5316\u6548\u679c\u3002</td></tr></table>", |
| "num": null |
| }, |
| "TABREF10": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u7684\u5ba2 \u89c0\u6307\u6a19\u3001STOI \u5206\u6578(Taal et al., 2011)\u4f5c\u70ba\u8a9e\u97f3\u53ef\u8b80\u6027(intelligibility) \u7684\u5ba2\u89c0\u6307\u6a19\uff0cPESQ \u5206\u6578\u4ecb\u65bc-0.5 \u8207 4.5 \u4e4b\u9593\uff0c STOI \u5206\u6578\u4ecb\u65bc 0 \u8207 1 \u4e4b\u9593\uff0c\u5206\u6578\u8d8a\u9ad8\u4ee3\u8868\u8a9e\u97f3\u7684\u54c1\u8cea/\u53ef\u8b80 \u6027\u8d8a\u4f73\u3002 \u5217\u51fa\u4e86\u5728\u984d\u5916\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u6642\uff0c\u7d66\u5b9a\u8f38\u5165\u7279\u5fb5\u4e4b\u6642\u5e8f\u5217\u4e4b\u9ad8\u983b\u4fc2\u6578\u4e0d\u540c \u7684\u6b0a\u91cd \uff0c\u7d93\u8a13\u7df4\u4e4b IRM \u6240\u5c0d\u61c9\u7684 STOI \u8207 PESQ \u5206\u6578\uff0c\u5f9e\u6b64\u8868\u4e2d\uff0c\u6211\u5011\u6709\u4ee5\u4e0b\u7684\u767c\u73fe\uff1a 1. \u76f8\u8f03\u65bc\u539f\u59cb IRM \u800c\u8a00\uff0c\u4f7f\u7528\u8f03\u5927\u6b0a\u91cd (0.75) \u5728 STOI \u8207 PESQ \u4e0a\u90fd\u6709\u8f03\u660e\u986f\u7684\u6539 \u628a\u8868 6\u30017 \u8207\u8868 4\u30015 \u7684\u6578\u64da\u76f8\u6bd4\u8f03\uff0c\u6211\u5011\u53ef\u4ee5\u770b\u5230\u589e\u52a0\u8a13\u7df4\u8cc7\u6599\u91cf\u53ef\u4ee5\u540c\u6642\u4f7f\u6e2c\u8a66\u8cc7 \u6599\u7684 PESQ \u8207 STOI \u7684\u5206\u6578\u90fd\u660e\u986f\u9032\u6b65\uff0c\u9032\u800c\u9a57\u8b49\u8a13\u7df4\u8cc7\u6599\u7684\u589e\u52a0\u53ef\u4ee5\u4f7f IRM \u6a21\u578b\u5728 \u8a9e\u97f3\u5f37\u5316\u7684\u6548\u679c\u66f4\u597d\u3002 2. \u7576\u6c92\u6709\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u6642\uff0c\u82e5\u589e\u52a0\u8a13\u7df4\u8a9e\u6599\uff0c\u5728 STOI \u5206\u6578\u4e0a\uff0c\u539f\u59cb\u7684 IRM \u6bd4\u4f7f\u7528\u4f4e\u901a \u6ffe\u6ce2\u6cd5\u5c0d\u61c9\u7684 IRM \u6548\u679c\u8f03\u4f73\uff0c\u4ee3\u8868\u6b64\u6642\u4f4e\u901a\u7387\u6ce2\u8655\u7406\u4e26\u672a\u5e36\u4f86 STOI \u5206\u6578\u7684\u9032\u6b65\uff0c\u7136 \u800c\u5728 PESQ \u5206\u6578\u4e0a\uff0c\u7576\u914d\u5408\u4f4e\u901a\u6ffe\u6ce2\u6642\uff0c\u53ef\u4ee5\u6bd4\u539f\u59cb IRM \u9054\u5230\u66f4\u4f73\u7684\u7d50\u679c\uff0c\u4f8b\u5982\u7576 \u4f7f\u7528 0.75\u7684\u6b0a\u91cd\u6642\uff0cMFCC \u5c0d\u61c9\u4e4b PESQ \u503c\u53ef\u4ee5\u9032\u4e00\u6b65\u63d0\u5347\u81f3 1.8192\u3002\u7136\u800c\uff0c\u7372 The Association for Computational Linguistics and Chinese Language Processing \u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc \u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6", |
| "content": "<table><tr><td>\u4f7f\u7528\u4f4e\u901a\u6642\u5e8f\u5217\u8a9e\u97f3\u7279\u5fb5\u8a13\u7df4\u7406\u60f3\u6bd4\u7387\u906e\u7f69\u6cd5\u4e4b\u8a9e\u97f3\u5f37\u5316 \u9673\u5f65\u540c\u8207\u6d2a\u5fd7\u5049 39 \u9673\u5f65\u540c\u8207\u6d2a\u5fd7\u5049 \u9673\u5f65\u540c\u8207\u6d2a\u5fd7\u5049 \u4f7f\u7528\u4f4e\u901a\u6642\u5e8f\u5217\u8a9e\u97f3\u7279\u5fb5\u8a13\u7df4\u7406\u60f3\u6bd4\u7387\u906e\u7f69\u6cd5\u4e4b\u8a9e\u97f3\u5f37\u5316 45 \u9673\u5f65\u540c\u8207\u6d2a\u5fd7\u5049</td></tr><tr><td>\u6b65\u9a5f\u56db\uff1a\u53c3\u7167\u4e00\u822c IRM \u6df1\u5ea6\u6a21\u578b\u7684\u8a13\u7df4\u6cd5\uff0c\u6211\u5011\u6539\u4ee5\u65b0\u7684\u7279\u5fb5\u5e8f\u5217 X \u70ba\u8f38\u5165\uff0c\u4ee5\u7406\u60f3 IRM \u906e\u7f69\u503c\u70ba\u76ee\u6a19\u8f38\u51fa\uff0c\u8a13\u7df4 IRM \u6df1\u5ea6\u6a21\u578b\u3002\u503c\u5f97\u6ce8\u610f\u7684\u662f\uff0c\u82e5\u5f0f(4) , 1 \u4f5c \u4e2d\u7684\u6b0a\u91cd 1\uff0c\u5247\u6240\u8a13\u7df4\u7684 IRM \u6a21\u578b\u8207\u539f\u59cb(\u5373\u4f7f\u7528\u539f\u59cb\u7279\u5fb5\u8a13\u7df4)IRM \u6a21\u578b\u5b8c\u5168\u4e00 \u81f4\u3002 \u6e2c\u8a66\u968e\u6bb5\uff1a \u5c07\u6e2c\u8a66\u4e4b\u8a9e\u53e5\u5982\u540c\u8a13\u7df4\u8a9e\u53e5\u4e4b\u8655\u7406\u7684\u524d\u4e09\u500b\u6b65\u9a5f\u3001\u6c42\u53d6\u4f4e\u901a\u6ffe\u6ce2\u4e4b\u7279\u5fb5\u6642\u5e8f\u5217\uff0c\u5c07\u5176\u901a \u904e\u8a13\u7df4\u5b8c\u6210\u7684 IRM \u6a21\u578b\u6c42\u53d6\u906e\u7f69\u503c\uff0c\u5c07\u906e\u7f69\u503c\u8207\u539f\u8a2d\u5b9a\u4e4b\u5c0d\u61c9\u7684\u6642\u983b\u5716\u4f5c\u9ede\u4e58\u7a4d (dot product)\uff0c\u5373\u53ef\u5f97\u5f37\u5316\u5f8c\u7684\u6642\u983b\u5716\uff0c\u7d93\u7531\u9069\u7576\u7684\u53cd\u8f49\u63db\u91cd\u5efa\u6210\u5f37\u5316\u7248\u7684\u6642\u57df\u8a0a\u865f\u3002 3. \u5be6\u9a57\u8a2d\u7f6e (Experimental Setup) \u5f37\u5316\u7684\u8a9e\u97f3\u8a0a\u865f\u7684\u5dee\u7570\u3002 4.1 \u4f7f\u7528\u6240\u6709\u7a2e\u985e\u4e4b\u8f38\u5165\u7279\u5fb5\u6240\u5f97\u7684IRM\u6548\u80fd\u5206\u6790 \u9996\u5148\uff0c\u8868 1 \u5217\u51fa\u4e86\u6e2c\u8a66\u96dc\u8a0a\u8a9e\u53e5\u5728\u8655\u7406\u524d\u3001\u7d93\u7531\u7406\u60f3 IRM(\u906e\u7f69\u76f4\u63a5\u7531\u4e7e\u6de8\u8a9e\u97f3\u8207\u647b\u96dc \u4e4b\u96dc\u8a0a\u6c42\u5f97)\u53ca\u539f\u59cb IRM(\u4f7f\u7528\u539f\u59cb\u8f38\u5165\u7279\u5fb5\u8a13\u7df4\uff0c\u4e26\u53ef\u80fd\u984d\u5916\u52a0\u5165\u5dee\u91cf\u7279\u5fb5)\u8655\u7406\u5f8c \u6240\u5c0d\u61c9\u7684 PESQ \u8207 STOI \u7684\u5e73\u5747\u503c\u3002\u5f9e\u6b64\u8868\u4e2d\uff0c\u6211\u5011\u53ef\u4ee5\u770b\u5230\uff1a \u8868 2. \u672a\u8655\u7406\u8a9e\u97f3\u8207\u7d93\u904e\u7406\u60f3 IRM\u3001\u539f\u59cb IRM 1 (\u4f7f\u7528\u539f\u7279\u5fb5\u6c42\u53d6) \u3001\u4e0d\u540c\u6b0a\u91cd\u03b1 \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM(\u672a\u642d\u914d\u5dee\u91cf\u7279\u5fb5)\u8655\u7406\u5f8c\u5c0d\u61c9\u7684 STOI \u8207 PESQ \u5e73\u5747\u5206 \u6578\u3002\u539f\u7279\u5fb5\u7531\u56db\u7a2e\u7279\u5fb5 (AMS, RASTA-PLP, MFCC, GF) \u6392\u5217\u800c\u5f97 [Table 2. The PESQ and STOI results for the baseline, oracle IRM, original IRM 1 (using the original combo static features) and the lowpass-filtered IRM 1 (using the lowpass filtered combo static features with different assignments of parameter \u03b1)] \u539f\u59cb IRM 1 \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 1 0 0.25 0.50 0.75 STOI 0.6763 0.6767 0.6728 0.6799 \u5217\u3001\u9032\u800c\u6bd4\u8f03\u6ffe\u6ce2\u524d\u8207\u6ffe\u6ce2\u5f8c\u5c0d\u65bc IRM \u6548\u80fd\u7684\u5f71\u97ff\uff0c\u8868 4 \u8207\u8868 5 \u5206\u5225\u5217\u51fa\u5404\u7a2e\u4e0d\u540c\u7279\u5fb5 \u642d\u914d\u4f4e\u901a\u6ffe\u6ce2\u5c0d\u61c9\u4e4b IRM \u6240\u5f97\u4e4b\u6e2c\u8a66\u8a9e\u53e5\u7684 STOI \u8207 PESQ \u5206\u6578\uff0c\u70ba\u4e86\u4f7f\u6574\u9ad4\u6548\u80fd\u512a\u5316 \u8d77\u898b\uff0c\u9019\u88e1\u6211\u5011\u628a\u5dee\u91cf\u7279\u5fb5\u4e00\u4f75\u52a0\u5165\uff0c\u540c\u6642\uff0c\u6211\u5011\u5c07\u524d\u4e00\u7bc0\u56db\u985e\u7279\u5fb5\u7684\u7d44\u5408(\u4ee5\"combo\" \u8868\u793a)\u4e4b\u7d50\u679c\u5217\u5728\u8868\u7684\u6700\u4e0b\u4e00\u5217\uff0c\u4ee5\u4f9b\u6bd4\u8f03\u3002\u5f9e\u9019\u5169\u500b\u8868\u4e4b\u6578\u64da\uff0c\u6211\u5011\u6709\u4ee5\u4e0b\u5e7e\u9ede\u7684\u89c0 \u5bdf\u8207\u8a0e\u8ad6\uff1a 1. \u5c0d\u65bc\u8a9e\u97f3\u53ef\u8b80\u5ea6\u6307\u6a19 STOI \u800c\u8a00\uff0c\u4e0d\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u4e4b\u56db\u985e\u7279\u5fb5\u4e2d\uff0c\u4ee5 MFCC \u8868\u73fe\u6700\u4f73 (0.6740) \uff0c\u751a\u81f3\u8d85\u8d8a\u4e86\u7d44\u5408\u7279\u5fb5\u7684\u7d50\u679c(0.6658) \uff0c\u7136\u800c\uff0c\u7576\u914d\u5408\u4f4e\u901a\u6ffe\u6ce2\u6642\uff0cMFCC \u53ef\u4ee5\u9054\u5230\u66f4\u4f73\u7684 STOI \u503c\uff0c\u4f8b\u5982\u7576\u4f7f\u7528 0.25\u7684\u6b0a\u91cd\u6642\uff0cMFCC \u5c0d\u61c9\u4e4b STOI \u503c\u53ef \u8868 5. \u55ae\u4e00\u7a2e\u985e\u7279\u5fb5\u7684 PESQ \u5206\u6578\u6bd4\u8f03\uff0c\u672a\u8655\u7406\u8a9e\u97f3\u8207\u7d93\u904e\u539f\u59cb IRM 2 (\u4f7f\u7528\u539f\u7279 \u539f\u59cb\u7684 IRM \u6bd4\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u6cd5\u5c0d\u61c9\u7684 IRM \u6548\u679c\u8f03\u4f73\uff0c\u800c\u5728 STOI \u5206\u6578\u4e0a\uff0c\u7576\u914d\u5408\u4f4e \u7522\u751f\u986f\u8457\u7684\u5931\u771f\uff0c\u63a5\u8457\uff0c\u6bd4\u8f03\u5716 1(b)\u8207\u5716 1(c)\u53ef\u770b\u51fa\uff0c\u7406\u60f3\u7684 IRM \u53ef\u5e36\u4f86\u986f\u8457\u7684\u8a9e\u97f3\u5f37 \u5fb5\u8207\u5176\u5dee\u91cf\u7279\u5fb5\u6c42\u53d6) \u3001\u4e0d\u540c\u6b0a\u91cd\u03b1\u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM(\u6709\u642d\u914d\u5dee\u91cf\u7279\u5fb5)\u8655 \u901a\u6ffe\u6ce2\u6642\uff0c\u53ef\u4ee5\u6bd4\u539f\u59cb IRM \u9054\u5230\u66f4\u4f73\u7684\u7d50\u679c\uff0c\u4f8b\u5982\u7576\u4f7f\u7528 0.5\u7684\u6b0a\u91cd\u6642\uff0cMFCC \u5316\u6548\u679c\uff0c\u6700\u5f8c\uff0c\u89c0\u5bdf\u539f\u59cb IRM \u8207\u4f4e\u901a\u6ffe\u6ce2\u7279\u5fb5\u4e4b IRM \u6240\u5c0d\u61c9\u7684\u5716 1(d) \u8207 \u5716 2(e) \uff0c\u76f8 \u7406\u5f8c\u5c0d\u61c9\u7684 PESQ \u5e73\u5747\u5206\u6578\uff0c\u5176\u4e2d\"combo\"\u8868\u793a\u56db\u985e\u7279\u5fb5\u4e4b\u7d44\u5408 \u5c0d\u61c9\u4e4b STOI \u503c\u53ef\u4ee5\u9032\u4e00\u6b65\u63d0\u5347\u81f3 0.6880\u3002\u7136\u800c\uff0c\u7372\u5f97 PESQ \u6700\u4f73\u6b0a\u91cd\u662f 0\u4e4b\u4f4e\u901a \u5c0d\u65bc\u5716 1(b)\uff0c\u96dc\u8a0a\u6240\u9020\u6210\u7684\u5931\u771f\u660e\u986f\u964d\u4f4e\uff0c\u4f46\u6548\u679c\u4e26\u4e0d\u5982\u7406\u60f3 IRM \u6240\u5c0d\u61c9\u7684\u5716 1(c)\uff0c\u4f8b [Table 5. The averaged PESQ results for the original IRM 2 (using the original static \u6ffe\u6ce2\u6cd5\uff0c\u53ef\u9054\u5230 1.8214\u3002 \u5982\u5728\u6642\u9593 0.1-0.3 \u79d2\u4e4b\u9593\u7684\u983b\u8b5c\u5f37\u5ea6\u4e26\u672a\u6709\u6548\u91cd\u5efa(\u5728\u7d05\u8272\u6846\u6240\u6a19\u793a\u5340\u57df)\uff0c\u7136\u800c\u5716 1(e) and delta features of single type) and the lowpass filtered IRM 2 (using the lowpass filtered static and delta features of single type with different assignments of 4. \u7576\u6bd4\u8f03\u8868 6 \u8207\u8868 7 \u7684\u6578\u64da\uff0c\u6211\u5011\u53ef\u4ee5\u6e05\u695a\u770b\u5230\uff0c\u984d\u5916\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u53cd\u800c\u540c\u6642\u4f7f PESQ \u7684\u5728\u6b64\u5340\u57df\u7684\u983b\u8b5c\u91cd\u5efa\u7a0b\u5ea6\u7a0d\u512a\u65bc\u5716 1(d)\uff0c\u6839\u64da\u6b64\u6bd4\u8f03\u7d50\u679c\uff0c\u6211\u5011\u4f3c\u4e4e\u53ef\u770b\u51fa\uff0c\u4f4e\u901a\u6ffe parameter \u03b1)] \u6ce2\u7279\u5fb5\u4e4b IRM \u5728\u6b64\u8a9e\u53e5\u7684\u8655\u7406\u4e0a\u7565\u512a\u65bc\u539f\u59cb IRM\u3002 \u8207 STOI \u7684\u5206\u6578\u90fd\u964d\u4f4e\uff0c\u9019\u7d50\u679c\u4f3c\u4e4e\u8868\u660e\uff0c\u5728\u8a13\u7df4\u8cc7\u6599\u589e\u52a0\u6642\uff0c\u5dee\u91cf\u7279\u5fb5\u7684\u53c3\u8207\u4e26\u672a \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 2 \u5c0d\u65bc IRM \u6a21\u578b\u4e4b\u8a13\u7df4\u6709\u6b63\u9762\u7684\u5f71\u97ff\uff0c\u9019\u80cc\u5f8c\u539f\u56e0\u53ef\u80fd\u662f\u6b64\u6642 IRM \u6a21\u578b\u4e4b\u8907\u96dc\u5ea6\u61c9\u8a72 PESQ \u5206\u6578 \u539f\u59cb IRM 2 0 0.25 0.50 0.75 \u9032\u4e00\u6b65\u63d0\u9ad8\u3001\u4ee5\u56e0\u61c9\u984d\u5916\u7684\u5dee\u91cf\u7279\u5fb5\u5e36\u4f86\u7684\u8cc7\u6599\u591a\u6a23\u6027\u3002\u5982\u679c\u5728\u539f\u59cb IRM \u6a21\u578b\u67b6\u69cb 0.6789 PESQ 1.7755 1.7844 1.7612 1.7717 \u4ee5\u9032\u4e00\u6b65\u63d0\u5347\u81f3 0.6772\u3002\u6b64\u5916\uff0c\u4f4e\u901a\u6ffe\u6ce2\u8655\u7406\u4e26\u975e\u5c0d\u6bcf\u4e00\u7a2e\u7279\u5fb5\u90fd\u80fd\u5e36\u4f86\u6539\u9032\uff0c\u4f8b\u5982 AMS 1.6721 1.6705 1.6712 1.6731 1.6758 \u7684\u8a2d\u5b9a\u4e0b\uff0c\u4e0d\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u53ef\u80fd\u662f\u8f03\u4f73\u7684\u9078\u64c7\uff0c\u540c\u6642\u914d\u5408\u4f4e\u901a\u6ffe\u6ce2\u8655\u7406\uff0c\u53ef\u4f7f PESQ 1.7760 \u5c0d\u65bc AMS \u7279\u5fb5\u800c\u8a00\uff0c\u4e0d\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u6240\u5c0d\u61c9\u7684\u539f\u59cb IRM \u8868\u73fe\u6700\u597d\u3002 0.75\u7684\u6b0a\u91cd\u6642\uff0cMFCC \u5c0d\u61c9\u4e4b PESQ \u503c\u53ef\u4ee5\u9032\u4e00\u6b65\u63d0\u5347\u81f3 1.7977\u3002\u7136\u800c\uff0c\u7372\u5f97 RASTA-PLP 1.7463 1.7634 1.7634 1.7630 \u5206\u6578\u9032\u4e00\u6b65\u63d0\u5347\u3002 1.7426 combo 1.7748 1.7819 1.7916 1.7589 1.7996 \u5176\u6b21\uff0c\u8868 3 \u9032\uff0c\u5176\u4ed6\u8f03\u5c0f\u503c\u7684 \u8a2d\u5b9a\u503c\u5247\u4e26\u672a\u4e00\u5236\u6027\u5730\u5f97\u5230\u660e\u986f\u9032\u6b65\u7684\u6548\u679c\uff0c\u9019\u53ef\u80fd\u539f\u56e0\u662f\uff0c\u7576 2. \u5c0d\u65bc\u8a9e\u97f3\u54c1\u8cea\u6307\u6a19 PESQ \u800c\u8a00\uff0c\u5728\u4e0d\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u4e4b\u56db\u985e\u7279\u5fb5\u4e2d\uff0cMFCC \u4ecd\u8868\u73fe\u6700\u4f73 \u503c\u3002\u7136\u800c\uff0c\u7576\u914d\u5408\u4f4e\u901a\u6ffe\u6ce2\u6642\uff0c\u5404\u7a2e\u985e\u7279\u5fb5\u7686\u53ef\u4ee5\u9054\u5230\u66f4\u4f73\u7684 PESQ \u503c\uff0c\u4f8b\u5982\u7576\u4f7f\u7528 GF 1.7641 1.7791 1.7669 1.7635 1.7633 (1.7966) \uff0c\u8d85\u8d8a\u4e86\u7d44\u5408\u7279\u5fb5 (1.7748) \uff0c\u800c AMS \u7279\u5fb5\u8868\u73fe\u8f03\u4e0d\u597d\uff0c\u53ea\u6709 1.6721 \u4e4b PESQ MFCC 1.7966 1.7870 1.7916 1.7946 1.7977 (e) \u96dc\u8a0a\u8a9e\u97f3\u7d93\u7531\u4f4e\u901a\u6ffe\u6ce2 IRM \u8655\u7406\u5f8c\u4e4b\u8a9e\u97f3</td></tr><tr><td>\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u6642\uff0c\u5dee\u91cf\u7279\u5fb5\u672c\u8eab\u5c31\u5df2\u7d93\u6291\u5236\u539f\u59cb\u7279\u5fb5\u7684\u8abf\u8b8a\u9ad8\u983b\u6210\u5206\uff0c\u56e0\u6b64\u6b64\u6642\u7528\u8f03 PESQ \u6700\u4f73\u4e4b\u7279\u5fb5\u662f\u7d44\u5408\u7279\u5fb5\u914d\u5408 0.75\u4e4b\u4f4e\u901a\u6ffe\u6ce2\u6cd5\uff0c\u53ef\u9054\u5230 1.7996\u3002</td></tr><tr><td>\u4ee5X \u6211\u5011\u5c07\u4efb\u4e00\u7dad\u7279\u5fb5\u6642\u5e8f\u5217X \u4ee3\u8868\u4e4b\uff0c\u5176\u7a31\u4f5cX\u7684\u7b2c \u7dad\u7279\u5fb5\u6642\u5e8f\u5217\uff0c\u5c3a\u5bf8\u70ba1 \u5728\u6211\u5011\u6240\u63d0\u7684\u65b0 IRM \u8a13\u7df4\u6cd5\u4e0a\uff0c\u5c0d\u65bc\u8f38\u5165\u7279\u5fb5\u4e4b\u6642\u5e8f\u5217\u4e4b\u7d30\u7bc0\u4fc2\u6578(\u9ad8\u983b\u4fc2\u6578)\u6240 \uff0c\u5176\u4e2d1 \u3002 channel)\u3002 \u7d66\u4e88\u7684\u6b0a\u91cd \uff0c\u5206\u5225\u8a2d\u5b9a\u70ba 0, 0.25, 0.50, 0.75\uff0c\u85c9\u6b64\u89c0\u5bdf\u7d30\u7bc0\u4fc2\u6578\u4e4b\u58d3\u6291\u7a0b\u5ea6\u5c0d\u65bc IRM \u6548 \u679c\u4e4b\u5f71\u97ff(\u539f\u59cb IRM \u6240\u5c0d\u61c9\u4e4b\u6b0a\u91cd \u672a\u8655\u7406\u8a9e\u97f3 \u7406\u60f3 IRM \u539f\u59cb IRM 1 \u539f\u59cb IRM 2 STOI 0.6130 0.9004 0.6763 0.6658 PESQ 1.6081 2.6408 1.7755 \u5927\u7684 \u503c\u518d\u5c0d\u539f\u59cb\u7279\u5fb5\u7684\u8abf\u8b8a\u9ad8\u983b\u6210\u5206\u5c0f\u5e45\u6291\u5236\uff0c\u5373\u53ef\u9054\u5230\u9810\u671f\u4e4b\u9032\u6b65\u6548\u679c\u3002 2. \u82e5\u6211\u5011\u5c07\u8868 2 \u8207\u8868 3 \u7684\u6578\u64da\u540c\u6642\u6bd4\u8f03\uff0c\u767c\u73fe\u9054\u5230\u6700\u4f73 STOI \u503c (0.6799) \u7684\u662f\u300c\u4e0d\u4f7f\u7528 \u5dee\u91cf\u7279\u5fb5\u3001\u4f7f\u7528 0.50 \u4e4b\u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u300d\u7684 IRM \u6cd5\uff0c\u800c\u9054\u5230\u6700\u4f73 PESQ \u503c (1.7996) \u7684\u5247\u662f\u300c\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u3001\u4f7f\u7528 0.75 \u4e4b\u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u300d\u7684 IRM \u6cd5\u3002 \u6839\u64da\u4ee5\u4e0a\u89c0\u5bdf\uff0c\u56db\u985e\u7279\u5fb5\u7684\u7d44\u5408\u672a\u5fc5\u5728 STOI \u8868\u73fe\u4e0a\u512a\u65bc\u55ae\u985e\u7279\u5fb5\uff0c\u800c\u5728 PESQ \u8868\u73fe\u4e0a \u53ea\u80fd\u4e9b\u8a31\u8d85\u8d8a\u500b\u5225\u55ae\u985e\u7279\u5fb5\uff0c\u9019\u53ef\u80fd\u539f\u56e0\u5728\u65bc\u67d0\u985e\u7279\u5fb5(\u5982 AMS)\u5728\u8868\u73fe\u4e0a\u8207\u5176\u4ed6\u7279\u5fb5 \u5dee\u7570\u8f03\u5927\uff0c\u5373\u4f7f\u5f8c\u7aef\u7684\u6df1\u5ea6\u6a21\u578b\u5728\u5b78\u7fd2\u4e2d\u7406\u61c9\u80fd\u6de1\u5316\u9019\u985e\u7279\u5fb5\u7684\u8ca0\u9762\u5f71\u97ff\uff0c\u4f46\u662f\u5f9e\u6e2c\u8a66 \u7d50\u679c\u4e0a\uff0c\u591a\u985e\u7279\u5fb5\u7684\u7d44\u5408\u4e26\u672a\u767c\u63ee\u986f\u8457\u7684\u52a0\u6210\u6027\u3002 4.3 \u589e\u52a0\u8a13\u7df4\u53ca\u6e2c\u8a66\u8cc7\u6599\u4e14\u4f7f\u7528\u55ae\u4e00\u7a2e\u985e\u4e4b\u8f38\u5165\u7279\u5fb5\u6240\u5f97\u7684IRM\u6548\u80fd\u5206 \u5728\u524d\u4e00\u7bc0\u4e2d\uff0c\u6211\u5011\u53ef\u89c0\u5bdf\u51fa\u5728\u5404\u500b\u985e\u5225\u7684\u7279\u5fb5\u4e2d\uff0c\u55ae\u7368\u4f7f\u7528 MFCC \u7279\u5fb5\u7684 IRM \u6548\u80fd\u660e \u986f\u512a\u65bc\u5176\u4ed6\u7279\u5fb5\uff0c\u5176\u540c\u6642\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u8207\u5dee\u91cf\u7279\u5fb5\u8655\u7406\u5176\u5e8f\u5217\u53ef\u5f97\u5230\u8f03\u4f73\u7684 STOI ( 0.25) \u8207 PESQ ( 0.75) \u5206\u6578\u3002\u5728\u672c\u7bc0\u88e1\uff0c\u6211\u5011\u60f3\u9032\u4e00\u6b65\u89c0\u5bdf\u6b64\u8868\u73fe\u826f\u597d\u7684\u7684 MFCC \u7279 MFCC \u7279\u5fb5 \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 1 \u539f\u59cb IRM 1 0 0.25 0.50 0.75 STOI 0.6947 0.6900 0.6926 0.6918 (a) \u539f\u59cb\u4e7e\u6de8\u8a9e\u97f3 [a. the original clean utterance] (b) \u647b\u5165-2 dB SNR \u4e4b babble \u96dc\u8a0a\u4e4b\u8a9e\u97f3 [b. the -2 dB SNR utterance with babble \u5728\u672c\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u63d0\u51fa\u4e26\u521d\u6b65\u9a57\u8b49\u4e86\u7576\u7406\u60f3\u6bd4\u4f8b\u906e\u7f69(IRM) \u4e4b\u6df1\u5ea6\u6a21\u578b\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u4e4b\u8a9e \u97f3\u7279\u5fb5\u6642\u5e8f\u5217\u4f86\u8a13\u7df4\u6642\uff0c\u76f8\u8f03\u65bc\u4f7f\u7528\u539f\u7279\u5fb5\u6642\u5e8f\u5217\u8a13\u7df4\uff0c\u53ef\u4ee5\u5f97\u5230\u66f4\u4f73\u7684\u8a9e\u97f3\u5f37\u5316\u6548\u679c\u3002 \u6458\u8981 0.6928 PESQ 1.8182 1.8214 1.7996 1.8056 noise] \u6211\u5011\u4f7f\u7528\u5c0f\u6ce2\u8f49\u63db\u4f86\u5be6\u73fe\u4f4e\u901a\u6ffe\u6ce2\u7684\u8655\u7406\uff0c\u5176\u57f7\u884c\u7c21\u6613\u4f46\u6548\u679c\u660e\u986f\uff0c\u5728\u672a\u4f86\u5de5\u4f5c\u4e0a\uff0c\u6211\u5011 \u8fd1\u5e74\u4f86\u5728\u8a9e\u97f3\u5408\u6210\u7684\u7814\u7a76\u4e4b\u4e2d\uff0c\u55ae\u4e00\u8a9e\u8005\u7684\u5408\u6210\u7cfb\u7d71\u5df2\u7d93\u6709\u8457\u9ad8\u54c1\u8cea\u7684\u8868\u73fe\uff0c 1.8192 \u521d\u6b65\u898f\u5283\u5c07\u6b64\u4f4e\u901a\u6ffe\u6ce2\u7684\u6642\u5e8f\u5217\u8655\u7406\u7528\u5728\u8a13\u7df4\u5176\u4ed6\u7a2e\u985e\u7684\u8a9e\u97f3\u5f37\u5316\u6df1\u5ea6\u6a21\u578b\u4e4b\u7279\u5fb5\u4e0a\uff0c\u6aa2 \u4f46\u5c0d\u65bc\u591a\u8a9e\u8005\u7cfb\u7d71\u4f86\u8aaa\uff0c\u5408\u6210\u8a9e\u97f3\u7684\u54c1\u8cea\u8207\u8a9e\u8005\u76f8\u4f3c\u5ea6\u4ecd\u662f\u4e00\u5927\u6311\u6230\uff0c\u672c\u7814\u7a76 1.7748 \u8996\u5176\u662f\u5426\u4e5f\u80fd\u66f4\u6709\u6548\u6539\u9032\u8a72\u6a21\u578b\u7684\u6548\u80fd\u3001\u63d0\u5347\u8a9e\u97f3\u4e4b\u54c1\u8cea\u8207\u53ef\u8b80\u6027\u3002 \u91dd\u5c0d\u5408\u6210\u8a9e\u97f3\u7684\u54c1\u8cea\u8207\u8a9e\u8005\u76f8\u4f3c\u5ea6\u5169\u500b\u8b70\u984c\u4f86\u5efa\u7acb\u51fa\u4e00\u5957\u53ef\u5408\u6210\u591a\u8a9e\u8005\u4e4b\u6587 \u5fb5\uff0c\u82e5\u518d\u589e\u52a0 1 \u500d\u7684\u8cc7\u6599\u6578\u91cf (\u5176\u4e2d\uff0c\u8a13\u7df4\u96c6\u5305\u542b\u4e86 10 \u4f4d\u8a9e\u8005\u3001\u6bcf\u4eba 10 \u53e5\u5171 100 \u500b\u8a9e 1)\u3002 \u5728\u4f7f\u7528\u7684\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db\u8207\u53cd\u8f49\u63db\u4e2d\uff0c\u6211\u5011\u4f7f\u7528 db2 \u5c0f\u6ce2\u51fd\u6578\u3002 \u63a5\u4e0b\u4f86\uff0c\u6211\u5011\u958b\u59cb\u8a55\u4f30\u6240\u63d0\u4e4b\u65b0 IRM \u8a13\u7df4\u6cd5\uff0c\u8868 2 \u5217\u51fa\u4e86\u5728\u4e0d\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u6642\uff0c\u7d66 \u5b9a\u8f38\u5165\u7279\u5fb5\u4e4b\u6642\u5e8f\u5217\u4e4b\u9ad8\u983b\u4fc2\u6578\u4e0d\u540c\u7684\u6b0a\u91cd \uff0c\u7d93\u8a13\u7df4\u4e4b IRM \u6240\u5c0d\u61c9\u7684 STOI \u8207 PESQ \u5b57\u8f49\u8a9e\u97f3\u7cfb\u7d71\uff0c\u9996\u5148\u91dd\u5c0d\u591a\u8a9e\u8005\u7684\u8b70\u984c\u4e2d\uff0c\u76ee\u6a19\u70ba\u900f\u904e\u5c11\u91cf\u6a23\u672c(Zero-Shot)\u4f86 \u53e5\uff0c\u800c\u6e2c\u8a66\u96c6\u5247\u5305\u542b\u4e86\u8207\u8a13\u7df4\u96c6\u4e0d\u540c\u7684 6 \u4f4d\u8a9e\u8005\u3001\u6bcf\u4eba 10 \u53e5\u5171 60 \u500b\u8a9e\u53e5) \u7684\u60c5\u6cc1\u4e0b\uff0c \u5176 IRM \u7684\u6548\u80fd\uff0c\u540c\u6642\u89c0\u5bdf\u5728\u4f7f\u7528\u6211\u5011\u6240\u63d0\u51fa\u7684\u4f4e\u901a\u6ffe\u6ce2\u6cd5\u5c0d\u65bc MFCC \u7279\u5fb5\u5728\u6b64\u72c0\u614b\u4e0b \u81f4\u8b1d (Acknowledgement) \u9054\u6210\u8a9e\u8005\u8f49\u63db\uff0c\u6211\u5011\u900f\u904e\u8a9e\u8005\u5d4c\u5165\u5411\u91cf(Speaker Embedding)\u7684\u5f15\u5165\u4f86\u5be6\u4f5c\u591a\u8a9e \u4ee5\u4e00\u968e\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db\u52a0\u4ee5\u5206\u89e3\u5982\u4e0b\uff1a cA , cD X \u5206\u6578\uff0c\u5f9e\u6b64\u8868\u4e2d\uff0c\u6211\u5011\u6709\u4ee5\u4e0b\u7684\u767c\u73fe\uff1a \u4e4b IRM \u6548\u80fd\u7684\u5f71\u97ff\uff0c\u9019\u4e00\u7cfb\u5217\u5be6\u9a57\u7d50\u679c\u5206\u5225\u5217\u5728\u8868 6(\u7121\u5dee\u91cf\u7279\u5fb5)\u8207\u8868 7(\u6709\u5dee\u91cf\u7279 \u8005\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71\uff0c\u4e26\u6bd4\u8f03\u91dd\u5c0d\u4e0d\u540c\u4efb\u52d9\u6240\u5efa\u7acb\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u7684\u6548\u679c\u5dee\u7570\u3002\u5728 \u672c\u8ad6\u6587\u5176\u90e8\u5206\u521d\u968e\u5be6\u9a57\u7531\u672c\u6821\u7562\u696d\u540c\u5b78\u6797\u5b50\u5f37 (Mr. Zi-Qiang Lin) \u52a0\u4ee5\u57f7\u884c\uff0c\u7279\u6b64\u81f4\u8b1d\u3002 (4) \u5176\u4e2d (.) \u4ee3\u8868\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db (discrete wavelet transform)\u3001 cA \u8207 c \u5206\u5225\u70ba \u8f49\u63db\u5206\u89e3\u800c\u5f97\u7684\u8fd1\u4f3c\u4fc2\u6578(approximation coefficients)\u8207\u7d30\u7bc0\u4fc2\u6578(detail coefficients)\uff0c\u5176\u53ef \u8996\u70ba\u539f\u59cb\u5e8f\u5217X \u4e4b\u4f4e\u901a\u6210\u5206\u8207\u9ad8\u901a\u6210\u5206\uff0c\u4e8c\u8005\u983b\u5bec\u5747\u7d04\u7b49\u65bc\u539f\u59cb\u5e8f\u5217\u983b\u5bec\u7684\u4e00\u534a\uff0c\u4e14 \u9ede\u6578\u6e1b\u534a\u3002 \u6b65\u9a5f\u4e09\uff1a\u6211\u5011\u5c07\u4e0a\u4e00\u6b65\u9a5f\u6240\u5f97\u7684\u7d30\u7bc0\u4fc2\u6578c ]\u4e58\u4e0a\u4e00\u500b\u5c0f\u65bc 1 \u7684\u6b0a\u91cd \uff0c\u518d\u8207\u539f\u8fd1\u4f3c \u4fc2\u6578\u76f8\u7d44\u5408\u3001\u7d93\u904e\u53cd\u96e2\u6563\u5c0f\u6ce2\u8f49\u63db\u91cd\u5efa\u7b2c \u7dad\u7279\u5fb5\u6642\u5e8f\u5217\uff0c\u8868\u793a\u5982\u4e0b\uff1a X cA , cD (5) \u5176\u4e2dX \u70ba\u66f4\u65b0\u7684\u7279\u5fb5\u6642\u5e8f\u5217\uff0c\u76f8\u8f03\u65bc\u539f\u59cb\u7279\u5fb5\u6642\u5e8f\u5217X \uff0cX \u5305\u542b\u8f03\u4f4e\u7684\u9ad8\u901a \u6210\u5206\uff0c\u56e0\u6b64\u61c9\u7576\u5305\u542b\u8f03\u5c11\u96dc\u8a0a\u9020\u6210\u7684\u5931\u771f\u3002 \u5728\u6211\u5011\u7684\u8a55\u4f30\u5be6\u9a57\u4e0a\uff0c\u6211\u5011\u5c07\u5206\u70ba\u4e09\u90e8\u5206\u4f86\u5448\u73fe\u4e26\u8a0e\u8ad6\uff0c\u7b2c\u4e00\u90e8\u5206\u662f\u5c0d\u61c9\u65bc\u4f7f\u7528\u6240\u6709\u7a2e \u985e\u4e4b\u8f38\u5165\u7279\u5fb5\u7d44\u5408\u6240\u8a13\u7df4\u53ca\u6e2c\u8a66\u4e4b IRM \u6a21\u578b\uff0c\u7b2c\u4e8c\u90e8\u5206\u662f\u5c0d\u61c9\u65bc\u4f7f\u7528\u55ae\u4e00\u7a2e\u985e\u4e4b\u8f38\u5165\u7279 \u5fb5\u6240\u8a13\u7df4\u53ca\u6e2c\u8a66\u4e4b IRM \u6a21\u578b\uff0c\u6211\u5011\u5c07\u5728\u9019\u5169\u90e8\u5206\u4e2d\uff0c\u63a2\u7a76\u6240\u63d0\u65b0\u65b9\u6cd5\u4e4b\u4f4e\u901a\u6ffe\u6ce2\u7279\u5fb5\u6642 \u5e8f\u5217\u5c0d\u65bc IRM \u6548\u80fd\u7684\u6539\u8b8a\uff0c\u7b2c\u4e09\u90e8\u5206\u5247\u662f\u85c9\u7531\u6642\u983b\u5716\u7684\u5c55\u793a\uff0c\u89c0\u5bdf\u539f\u59cb\u8207\u66f4\u65b0\u4e4b IRM \u6240 \u5fb5)\u3002 \u6b64 \u6211 \u5011 \u6bd4 \u8f03 \u4e86 \u7528 \u65bc \u8a9e \u8005 \u8fa8 \u8b58 (Speaker Verification) \u4ee5 \u53ca \u55ae \u7d14 \u7528 \u65bc \u8a9e \u97f3 \u8f49 \u63db 1. \u7576\u4f7f\u7528\u6211\u5011\u63d0\u51fa\u4e4b\u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u7684\u7279\u5fb5\u6cd5\u6642\uff0c\u591a\u6578 \u6b0a\u91cd\u8a2d\u5b9a\u90fd\u5f97\u5230\u4e86\u66f4\u4f73\u7684 STOI \u8207 PESQ \u503c( 0.25\u5728 STOI \u5206\u6578\u9664\u5916\uff0c 0.25, 0.50 \u5728 PESQ \u5206\u6578\u9664\u5916) \uff0c\u6b64\u521d\u6b65 \u9a57\u8b49\u4e86\u6b64\u65b9\u6cd5\u5c0d\u65bc\u8a13\u7df4\u66f4\u4f73 IRM \u6a21\u578b\u3001\u4ee5\u6291\u5236\u96dc\u8a0a\u5e72\u64fe\u6709\u66f4\u597d\u7684\u6548\u679c\u3002 2. \u5168\u7136\u79fb\u9664 (\u8a2d\u5b9a 0) \u6216\u5c11\u91cf\u79fb\u9664 (\u8a2d\u5b9a 0.75) \u8abf\u8b8a\u9ad8\u983b\u6210\u5206\u4f3c\u4e4e\u662f\u8f03\u4f73\u9078\u9805\uff0c \u4e8c\u8005\u81f3\u5c11\u7686\u53ef\u4f7f PESQ \u8207 STOI \u503c\u63d0\u5347\uff0c 0\u5f97\u5230\u6700\u4f73\u7684 PESQ \u503c\uff0c\u800c 0.75\u5247\u4f7f STOI \u9032\u6b65\u6700\u5927\u3002 \u539f\u59cb IRM 2 \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 2 0 0.25 0.50 0.75 STOI 0.6658 0.6639 0.6671 0.6615 0.6682 PESQ 1.7748 1.7819 1.7916 1.7589 1.7996 4.2 \u5728\u524d\u4e00\u7bc0\u4e2d\uff0c\u6211\u5011\u5df2\u7d93\u5448\u73fe\u7d9c\u5408\u56db\u985e\u7279\u5fb5\u6240\u5f97\u4e4b IRM \u7684\u6548\u679c\uff0c\u4e26\u521d\u6b65\u9a57\u8b49\u5c07\u7279\u5fb5\u6642\u5e8f\u5217 RASTA-PLP, MFCC, GF)\u5c0d\u65bc IRM \u6548\u80fd\u4e4b\u5f71\u97ff\uff0c\u540c\u6642\u6211\u5011\u4e5f\u4f7f\u7528\u4f4e\u901a\u6ffe\u6ce2\u4f86\u8655\u7406\u5176\u5e8f 3. \u7576\u4f7f\u7528\u5dee\u91cf\u7279\u5fb5\u6642\uff0c\u4e0a\u4e00\u9ede\u7684\u7d50\u679c\u5247\u525b\u597d\u5c0d\u8abf\uff1a\u5373\u82e5\u589e\u52a0\u8a13\u7df4\u8a9e\u6599\uff0c\u5728 PESQ \u5206\u6578\u4e0a\uff0c \u6240\u5c0d\u61c9\u7684\u5f37\u5ea6\u6642\u983b\u5716\uff0c\u9996\u5148\uff0c\u6211\u5011\u6bd4\u8f03\u5716 1(a)\u8207\u5716 1(b)\uff0c\u767c\u73fe\u96dc\u8a0a\u5c0d\u65bc\u8a9e\u97f3\u5728\u6642\u983b\u5716\u4e0a \u4f4e\u901a\u6ffe\u6ce2\u53ef\u4ee5\u9032\u4e00\u6b65\u5f37\u5316 IRM\u3002\u5728\u672c\u7bc0\u88e1\uff0c\u6211\u5011\u60f3\u9032\u4e00\u6b65\u89c0\u5bdf\u5404\u500b\u985e\u5225\u7684\u7279\u5fb5 (\u5305\u542b AMS, STOI \u5206\u6578 \u539f\u59cb IRM 2 \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 2 0 0.25 0.50 0.75 AMS 0.6472 0.6430 0.6435 0.6458 0.6466 RASTAPLP 0.6559 0.6600 0.6607 0.6611 0.6556 MFCC 0.6740 0.6771 0.6772 0.6761 0.6770 GF 0.6695 0.6698 0.6667 0.6672 0.6692 combo 0.6658 0.6639 0.6671 0.6615 0.6682 \u5f9e\u8868 6 \u8207\u8868 7 \u6211\u5011\u53ef\u4ee5\u89c0\u5bdf\u51fa\u4ee5\u4e0b\u5e7e\u9ede\uff1a 0\u4e4b\u4f4e\u901a\u6ffe\u6ce2\u6cd5\uff0c\u53ef\u9054\u5230 1.8214\u3002 \u6700\u5f8c\u5728\u9019\u4e00\u5c0f\u7bc0\uff0c\u6211\u5011\u4f7f\u7528\u8a9e\u97f3\u8a0a\u865f\u7684\u5f37\u5ea6\u6642\u983b\u5716(magnitude spectrogram)\uff0c\u4f86\u6aa2\u8996\u539f\u59cb IRM \u8207\u6211\u5011\u63d0\u51fa\u4e4b\u4f4e\u901a\u6ffe\u6ce2\u7279\u5fb5\u4e4b IRM \u7684\u5f37\u5316\u6548\u80fd\uff0c\u5716 1(a)-(f) \u70ba\u4e00\u8a9e\u53e5\u5728\u5404\u7a2e\u72c0\u614b\u4e0b 1. \u5f97 PESQ \u6700\u4f73\u6b0a\u91cd\u662f MFCC \u7279\u5fb5 \u539f\u59cb IRM 2 \u4e0d\u540c\u6b0a\u91cd \u6291\u5236\u8abf\u8b8a\u9ad8\u983b\u4e4b IRM 2 0 0.25 0.50 0.75 STOI 0.6863 0.6841 0.6840 0.6880 (Voice Conversion)\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u3002\u63a5\u8457\uff0c\u70ba\u4e86\u63d0\u5347\u5408\u6210\u7684\u8a9e\u8005\u76f8\u4f3c\u5ea6\u4ee5\u53ca\u8a9e \u53c3\u8003\u6587\u737b (References) \u97f3\u54c1\u8cea\uff0c\u6211\u5011\u5617\u8a66\u7f6e\u63db\u985e\u795e\u7d93\u7db2\u8def\u67b6\u69cb\u4e2d\uff0c\u4f5c\u70ba\u63d0\u5347\u983b\u8b5c\u7684 Post-Net \u7684\u90e8\u5206\uff0c Chen, C., & Bilmes, J. (2007). MVA processing of speech features. IEEE Trans. on Audio, Speech, and Language Processing, 15(1), \u5728\u6b64\u8655\u6211\u5011\u4f7f\u7528\u4e86\u4e00\u500b\u5f8c\u7f6e\u6ffe\u6ce2\u5668(Post-Filter)\u7684\u7db2\u8def\u4f86\u53d6\u4ee3\uff0c\u4e14\u6bd4\u8f03\u548c Post-257-270. https://doi.org/10.1109/TASL.2006.876717 Net \u6240\u7522\u751f\u7684\u983b\u8b5c\u5dee\u7570\u4ee5\u53ca\u63a2\u8a0e\u5176\u6a21\u578b\u53c3\u6578\u91cf\u4e4b\u5dee\u7570\u6027\u3002\u5be6\u9a57\u7d50\u679c\u8868\u660e\uff0c\u900f\u904e 0.6837 PESQ 1.8003 1.7966 1.7966 1.7853 1.7972 (c) \u96dc\u8a0a\u8a9e\u97f3\u7d93\u7531\u7406\u60f3 IRM \u8655\u7406\u4e4b\u8a9e\u97f3 [c. the oracle-IRM enhanced utterance] \u758a\u52a0\u6027\u6ce8\u610f\u529b\u6a5f\u5236\u4f86\u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u9032\u5165\u5230\u985e\u795e\u7d93\u7db2\u8def\u67b6\u69cb\u7684\u8a9e\u97f3\u5408\u6210\u7cfb Erdogan, H., Hershey, J. R., Watanabe, S., & Le Roux, J. (2015). Phase-sensitive and (d) \u96dc\u8a0a\u8a9e\u97f3\u7d93\u7531\u539f\u59cb IRM \u8655\u7406\u4e4b\u8a9e\u97f3 recognition-boosted speech separation using deep recurrent neural networks. In \u7d71\u7684\u78ba\u80fd\u5920\u6709\u6548\u5730\u7522\u751f\u5177\u6709\u76ee\u6a19\u8a9e\u8005\u7684\u5408\u6210\u8a9e\u97f3\uff0c\u4e26\u4e14\u5728\u52a0\u5165\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u7db2\u8def [d. the original-IRM enhanced utterance] Proceedings of 40th IEEE International Conference on Acoustics, Speech and Signal \u5f8c\u80fd\u5920\u6bd4\u50b3\u7d71\u900f\u904e Post-Net \u7684\u65b9\u5f0f\u4f86\u5f37\u5316\u5408\u6210\u8a9e\u97f3\u7684\u8a9e\u8005\u7279\u6027\u4ee5\u53ca\u8a9e\u97f3\u54c1\u8cea\uff0c 4.4 \u4f7f\u7528\u6642\u983b\u5716\u6f14\u793a\u7d50\u679c (Spectrogram Demonstration for Each Method) Processing (ICASSP 2015), 780-712. https://doi.org/10.1109/ICASSP.2015.7178061 \u4e14\u5408\u6210\u4e00\u822c\u9577\u5ea6\u8a9e\u97f3\u53e5\u7684\u6642\u9593\u7d04\u70ba 2 \u79d2\u9418\uff0c\u5df2\u63a5\u8fd1\u5373\u6642\u5408\u6210\u500b\u4eba\u5316\u8a9e\u97f3\u4e4b\u6210</td></tr></table>", |
| "num": null |
| }, |
| "TABREF11": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u6a21\u578b\u81ea\u9069\u61c9\uff1a \u4e3b\u8981\u662f\u5728 TTS \u7cfb\u7d71\u4e2d\u52a0\u5165 Speaker ID Table \u4f86\u4f7f\u6a21\u578b\u80fd\u5920\u4f9d\u7167 Speaker ID \u751f\u6210\u5c0d\u61c9\u8a9e\u8005\u7684\u8072\u97f3\uff0c\u5b83\u65e2\u80fd\u66f4\u63db\u5167\u5bb9\u4e5f\u80fd\u66f4\u63db\u8a9e\u8005\uff0c\u4f46\u662f\u9700\u8981\u5927\u91cf\u4e0d\u540c\u8a9e\u8005\u7684\u8a9e\u97f3\u6578 \u64da\u4ee5\u53ca\u8f03\u591a\u7684\u8a13\u7df4\u6642\u9593\u4f86\u9054\u6210\u76ee\u6a19\uff0c\u4e14\u7121\u6cd5\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002 \u57fa\u65bc\u8a9e\u97f3\u8f49\u63db\u548c\u6a21\u578b\u81ea\u9069\u61c9\u5728\u591a\u8a9e\u8005 TTS \u4e0a\u7684\u4e0d\u8db3\uff0c\u65bc\u662f\u6709\u8457 (Jia et al., 2018) \u548c (Chien et al., 2021) \u7b49\u7814\u7a76\uff0c\u5c07\u8a9e\u97f3\u8f49\u63db\u6216\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u7a2e\u65b9\u6cd5\u53d6\u4ee3\u6a21\u578b\u81ea\u9069\u61c9\u4e2d\u7684 Speaker ID", |
| "content": "<table><tr><td>\u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6</td><td>51</td></tr><tr><td>\uf0b7</td><td/></tr><tr><td>\u95dc\u9375\u8a5e\uff1a\u591a\u8a9e\u8005\u8a9e\u97f3\u5408\u6210\u3001\u8a9e\u97f3\u8f49\u63db\u3001\u8a9e\u8005\u8b58\u5225\u3001\u5c11\u91cf\u6a23\u672c\u3001\u5f8c\u7f6e\u6ffe\u6ce2\u5668</td><td/></tr><tr><td colspan=\"2\">Keywords: Multi-speaker Text-to-Speech, Voice Conversion, Speaker Verification,</td></tr><tr><td>Zero-Shot, Post-Filter</td><td/></tr><tr><td>1. \u7dd2\u8ad6 (Introduction)</td><td/></tr><tr><td colspan=\"2\">\u5c31\u55ae\u4e00\u8a9e\u8005\u7684\u8a9e\u97f3\u5408\u6210\u6280\u8853\u4f86\u770b\uff0c\u5176\u5408\u6210\u6280\u8853\u5df2\u7d93\u80fd\u5920\u5408\u6210\u51fa\u903c\u771f\u4e14\u81ea\u7136\u7684\u8a9e\u97f3\uff0c\u4e26\u4e14</td></tr><tr><td colspan=\"2\">\u4e0d\u9700\u8981\u592a\u591a\u7684\u8a9e\u97f3\u6578\u64da\u53ca\u8a13\u7df4\u6642\u9593\uff0c\u800c\u70ba\u4e86\u64f4\u5c55\u5230\u5176\u4ed6\u8a9e\u8005\uff0c\u5e38\u898b\u7684\u65b9\u6cd5\u6709\u8a9e\u97f3\u8f49\u63db\u548c</td></tr><tr><td>\u6a21\u578b\u81ea\u9069\u61c9\u5169\u7a2e\u65b9\u6cd5\uff1a</td><td/></tr><tr><td>\u7684\u6548\u679c\uff0c\u552f\u4e00\u7684\u4fb7\u9650\u5c31\u662f\u50c5\u80fd\u66f4\u63db\u8a9e\u8005\u4e0d\u80fd\u66f4\u6539\u5167\u5bb9\u3002</td><td/></tr></table>", |
| "num": null |
| }, |
| "TABREF12": { |
| "html": null, |
| "type_str": "table", |
| "text": "Learnable Dictionary Encoding(Cooper et al., 2020) \u7c21\u7a31 LDE\uff0c\u4f5c\u70ba\u672c\u6b21\u7814\u7a76\u7684 \u8a9e\u8005\u8fa8\u8b58\u6a21\u578b\uff0c\u5b83\u662f\u57fa\u65bc X-Vector(Snyder et al., 2018) \u6240\u505a\u7684\u6539\u9032\uff0c\u4e26\u4e14\u5728\u8a9e\u8005\u8fa8\u8b58\u7684 \u4efb\u52d9\u4e0a\u4ee5\u53ca\u591a\u8a9e\u8005 TTS \u7cfb\u7d71\u4e0a\u7686\u662f\u512a\u65bc X-Vector \u7684\u3002 Post-Net \u76ee\u7684\u662f\u70ba\u4e86\u6539\u5584\u983b\u8b5c\u91cd\u69cb\u7684\u54c1\u8cea\uff0c\u5728 Tacotron 2 \u7684\u8ad6\u6587\u88e1\u63d0\u5230\uff0c\u6709 Post-Net \u7684 MOS \u8a55\u5206\u662f\u6bd4\u8f03\u9ad8\u7684\u3002 \u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u984d\u5916\u5f15\u5165\u4e86\u53e6\u4e00\u500b\u67b6\u69cb Diffwav(Kong, Z. et al., 2020) \u4f5c\u70ba Post-Filter \u4f86\u8207 Post-Net \u6bd4\u8f03\u3002Diffwave \u662f Nvidia \u65bc 2020 \u5e74\u63a8\u51fa\u7684 Vocoder\uff0c\u80fd\u5920\u5c07\u983b\u8b5c\u8f49 \u63db\u6210\u6ce2\u5f62\u8a0a\u865f\uff0c\u5b83\u7684\u57fa\u790e\u7406\u8ad6\u662f Denoising Diffusion Probabilistic Model(Ho et al., 2020)\uff0c \u7c21\u7a31 DDPM\u3002DDPM \u662f\u4e00\u500b\u99ac\u53ef\u592b\u934a (Markov Chain) \u6a21\u578b\uff0c\u900f\u904e\u6307\u5b9a\u6b65\u6578\u70ba\u76ee\u6a19\u6dfb\u52a0 \u9ad8\u65af\u566a\u97f3\u76f4\u81f3\u76ee\u6a19\u8b8a\u6210\u9ad8\u65af\u4e82\u6578\uff0c\u518d\u900f\u904e\u6717\u4e4b\u842c\u52d5\u529b\u5b78 (Langevin Dynamics) \u53cd\u5411\u9084\u539f \u81f3\u76ee\u6a19\u3002", |
| "content": "<table><tr><td>\u3002 \u56e0\u6b64\uff0c\u6211\u5011\u5c07\u904b\u7528\u8fd1\u671f\u7684\u795e\u7d93\u7db2\u8def\u6280\u8853\u4f86\u66f4\u52d5 Tacotron 2 \u6a21\u578b\uff0c\u671f\u671b\u6a21\u578b\u8a13\u7df4\u901f\u5ea6\u52a0\u5feb\u3001 \u5408\u6210\u8a9e\u97f3\u54c1\u8cea\u7684\u63d0\u5347\u4ee5\u53ca\u52a0\u5f37\u5408\u6210\u591a\u8a9e\u8005\u8a9e\u97f3\u7684\u8a9e\u8005\u76f8\u4f3c\u5ea6\u3002 \u6211\u5011\u5c07\u5728\u7b2c\u4e8c\u7ae0\u7bc0\u95e1\u8ff0\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u6240\u7528\u5230\u7684\u8a9e\u97f3\u8f49\u63db\u53ca\u8a9e\u8005\u8fa8\u8b58\u6a21\u578b\uff0c\u5728\u7b2c\u4e09\u7ae0 \u7bc0\u95e1\u8ff0\u672c\u6b21\u7814\u7a76\u6539\u52d5 Tacotron 2 \u7684\u65b9\u6cd5\uff0c\u7b2c\u56db\u7ae0\u7bc0\u95e1\u8ff0\u5be6\u9a57\u7d50\u679c\uff0c\u6700\u5f8c\uff0c\u5728\u7b2c\u4e94\u7ae0\u7bc0\u95e1 \u8ff0\u672c\u6b21\u7814\u7a76\u7684\u7d50\u8ad6\u3002 2. \u8a9e\u8005\u5d4c\u5165\u5411\u91cf \u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u4f7f\u7528 AdaIN-VC \u4f5c\u70ba\u672c\u6b21\u7814\u7a76\u7684\u8a9e\u97f3\u8f49\u63db\u6a21\u578b\uff0c\u96d6\u7136\u5982\u4e4b\u524d\u6240\u8ff0\uff0c \u8a9e\u97f3\u8f49\u63db\u7684\u6a21\u578b\u6709\u5f88\u591a\u7a2e\uff0c\u4f46\u4e26\u4e0d\u662f\u90fd\u80fd\u63d0\u53d6\u51fa\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u5982 StarGan \u53ca CycleGAN \u7b49 GAN \u6a21\u578b\u96d6\u7136\u4e5f\u662f\u8a9e\u97f3\u8f49\u63db\uff0c\u4f46\u5b83\u5011\u662f\u900f\u904e\u5728\u8a13\u7df4\u671f\u9593\u5224\u5225\u5668 (Discriminator) \u7684\u7d04 \u675f\uff0c\u4f7f\u5f97\u751f\u6210\u7684\u8a9e\u97f3\u63a5\u8fd1\u5167\u90e8\u8a9e\u8005\uff0c\u9019\u7121\u6cd5\u63d0\u53d6\u51fa\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff1b\u5c6c\u65bc AutoEncoder \u6a21 \u578b\u7684 AutoVC \u4e5f\u7121\u6cd5\u63d0\u53d6\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u56e0\u5b83\u662f\u5229\u7528\u7de8\u78bc\u5c64\u5c07\u8a9e\u8005\u8a0a\u606f\u53bb\u9664\uff0c\u4e26\u5728\u89e3\u78bc \u5c64\u52a0\u5165 Speaker ID Table \u4f86\u9032\u884c\u8f49\u63db\u7684\uff0c\u800c AdaIN-VC (Adaptive Instance Normalization-Voice Conversion) \u662f\u4e00\u7a2e\u5c07\u5716\u7247\u98a8\u683c\u8f49\u63db\u7684\u6280\u8853\u5957\u7528\u5230\u8a9e\u97f3\u8f49\u63db\u4e0a\u7684 VAE \u6a21\u578b\uff0c\u5b83\u900f \u904e\u5169\u500b\u7de8\u78bc\u5c64\u5c07\u8a9e\u97f3\u7de8\u78bc\u6210\u8a9e\u8005\u6f5b\u5728\u8868\u793a\u53ca\u5167\u5bb9\u6f5b\u5728\u8868\u793a\uff0c\u4e26\u900f\u904e\u89e3\u78bc\u5c64\u7d44\u5408\u5169\u8005\u5f8c\u751f \u6210\u8f49\u63db\u5f8c\u7684\u8a9e\u97f3\uff0c\u6211\u5011\u53ef\u4ee5\u85c9\u7531\u66f4\u63db\u8a9e\u8005\u6f5b\u5728\u8868\u793a\u4f86\u9054\u5230\u8a9e\u97f3\u8f49\u63db\u7684\u6548\u679c\uff0c\u5176\u6a21\u578b\u67b6\u69cb \u5982\u5716 2 \u6240\u793a\uff1a \u4e9b\u8cc7\u8a0a\u5c07\u5728\u89e3\u78bc\u5c64\u5e6b\u52a9\u6ce8\u610f\u529b\u6a5f\u5236\u66f4\u5feb\u7684\u5c0d\u9f4a\uff0c\u6211\u5011\u5c07\u539f LSTM \u8f38\u51fa\u7a31\u70ba\u5167\u5bb9\u8cc7\u8a0a (Content Information)\uff0c\u53e6\u4e00\u500b\u901a\u904e Self-Attention \u7684\u8f38\u51fa\u7a31\u70ba\u9577\u8ddd\u96e2\u5167\u5bb9\u8cc7\u8a0a (Long-distance Content Information)\uff0c\u540c\u6642\uff0c\u70ba\u4e86\u4f7f\u6a21\u578b\u80fd\u5920\u5408\u6210\u591a\u8a9e\u8005\u7684\u8a9e\u97f3\uff0c\u6211\u5011\u5728\u9019\u5169\u500b \u6b64\u5916\uff0c\u70ba\u4e86\u52a0\u5f37\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5c0d\u65bc\u6a21\u578b\u7684\u4f5c\u7528\uff0c\u6211\u5011\u5728 Pre-Net \u5c64\u52a0\u5165\u4e86\u8a9e\u8005\u5d4c\u5165 \u5411\u91cf\uff0c\u900f\u904e\u795e\u7d93\u7db2\u8def\u7684\u5b78\u7fd2\uff0c\u80fd\u5920\u4f7f\u6a21\u578b\u66f4\u770b\u91cd\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u3002 \u6700\u5f8c\uff0c\u5728\u901a\u904e LSTM \u89e3\u78bc\u5f8c\uff0c\u6211\u5011\u53c8\u518d\u4e00\u6b21\u5f15\u5165 Self-Attention \u5c07\u983b\u8b5c\u6f5b\u5728\u8868\u793a\u7684\u8cc7 \u5716 2. \u6211\u5011\u4f7f\u7528 X-Vector \u7684\u904b\u4f5c\u65b9\u5f0f\u662f\u5c07\u6574\u500b\u8a9e\u97f3\u5206\u6210\u6578\u500b\u7247\u6bb5\u4e26\u900f\u904e\u6578\u5c64\u5377\u7a4d\u8a08\u7b97\u5176\u8f38\u51fa\u7279\u5fb5\uff0c \u518d\u5c07\u6240\u6709\u7279\u5fb5\u53d6\u5e73\u5747\u8207\u6a19\u6e96\u5dee\u901a\u904e\u7dda\u6027\u8f49\u63db\u4f86\u8a08\u7b97\u8a72\u8a9e\u8005\u7684\u5d4c\u5165\u5411\u91cf\u3002LDE \u8207 X-Vector \u4e0d\u540c\u7684\u5730\u65b9\u662f LDE \u5f15\u5165\u4e86\u6578\u500b Dictionary Clusters\uff0c\u9019\u4e9b Clusters \u662f\u9700\u8981\u900f\u904e\u795e\u7d93\u7db2\u8def\u53bb \u5b78\u7fd2\u7684\uff0c\u5b83\u5011\u4ee3\u8868\u67d0\u4e9b\u8aaa\u8a71\u4eba\u7684\u7279\u5fb5\uff0cLDE \u4f7f X-Vector \u5f97\u5230\u7684\u8f38\u51fa\u7279\u5fb5\u8207\u6240\u6709 Clusters \u8a08\u7b97\u5f7c\u6b64\u5dee\u8ddd\u7684\u5e73\u5747\u503c\u8207\u6a19\u6e96\u5dee\u4f86\u5224\u65b7\u8a72\u8a9e\u97f3\u63a5\u8fd1\u54ea\u4e00\u500b Clusters\uff0c\u7136\u5f8c\u518d\u9032\u4e00\u6b65\u8b93\u795e \u7d93\u7db2\u8def\u5224\u65b7\u8a72\u8a9e\u8005\u7684\u5d4c\u5165\u5411\u91cf\uff0c\u5176\u6a21\u578b\u67b6\u69cb\u5982\u5716 3 \u6240\u793a\uff1a \u7576\u4f5c\u53e6\u4e00\u500b\u8f38\u51fa\uff0cSelf-Attention \u6703\u5c07 LSTM \u8f38\u51fa\u7684\u6f5b\u5728\u8868\u793a\u9032\u884c\u5168\u57df\u76f8\u95dc\u6027\u7684\u9023\u63a5\uff0c\u9019 Forward Attention \u7af6\u722d\u6587\u5b57\u8207\u983b\u8b5c\u9593\u7684\u5c0d\u9f4a\uff0c\u5728\u5be6\u9a57\u7d50\u679c\u6703\u6709\u66f4\u8a73\u7d30\u5730\u8aaa\u660e\u3002 \u5716 3. \u57fa\u65bc\u539f\u672c\u7684 Tacotron 2 \u67b6\u69cb\uff0c\u6211\u5011\u5c07 LSTM \u7684\u8f38\u51fa\u964d\u7dad\u964d\u81f3 128 \u7dad\u4e26\u901a\u904e Self-Attention 54 \u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d \u5716 4\u89e3\u78bc\u5668\u90e8\u4efd\u6211\u5011\u505a\u4e86\u8f03\u591a\u7684\u6539\u52d5\uff0c\u9996\u5148\uff0c\u7531\u65bc\u7de8\u78bc\u5c64\u6709\u5169\u500b\u8f38\u51fa\uff0c\u56e0\u6b64\u6211\u5011\u5206\u5225\u5f15\u5165\u4e86 \u5169\u500b\u4e0d\u540c\u7684\u6ce8\u610f\u529b\u6a5f\u5236\uff0c\u6211\u5011\u70ba\u5167\u5bb9\u8cc7\u8a0a\u5f15\u5165\u4e86 Forward Attention \u53d6\u4ee3 Tacotron 2 \u820a\u6709 \u7684\u6ce8\u610f\u529b\u6a5f\u5236\uff0c\u5b83\u53ef\u4ee5\u66f4\u5feb\u5730\u5f15\u767c\u5c0d\u9f4a\uff0c\u4e26\u4e14\u80fd\u6539\u5584\u56e0\u9577\u53e5\u6240\u5f15\u767c\u7684\u91cd\u8907\u767c\u97f3\u6216\u6f0f\u5b57\u7684 \u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6 55 \u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d \u6211\u5011\u5229\u7528\u4e0a\u8ff0\u7684\u539f\u7406\uff0c\u5c07 Diffwave \u4fee\u6539\u6210\u983b\u8b5c\u9593\u7684\u8f49\u63db\u7684 Post-Filter\uff0c\u671f\u671b\u900f\u904e\u6dfb \u52a0\u566a\u97f3\u80fd\u4f7f\u751f\u6210\u7684\u983b\u8b5c\u6709\u8457\u66f4\u591a\u7684\u7d30\u7bc0\uff0c\u5176\u904b\u4f5c\u6d41\u7a0b\u5982\u5716 6\uff1a \u5716 6. Diffwave \u6d41\u7a0b [Figure 6. Diffwave process] \u5982\u5716 6 \u6240\u793a\uff0cDiffwave \u900f\u904e\u6a21\u578b\u53cd\u8986\u904b\u4f5c\u4e26\u4ee5\u566a\u97f3\u8868 (Noise Schedule\uff0c\u5f37\u5ea6\u7531\u5c0f\u5230 \u5927\u7684\u566a\u97f3) \u8207\u6885\u723e\u983b\u8b5c\u4f5c\u70ba\u8f38\u5165\u689d\u4ef6\u4f7f\u6a21\u578b\u5728\u8a13\u7df4\u671f\u9593\u5b78\u7fd2\u5230\u5982\u4f55\u900f\u904e\u8f38\u5165\u689d\u4ef6\u4f86\u6dfb\u52a0 \u566a\u97f3\u5206\u4f48\u7834\u58de\u8f38\u5165\u76ee\u6a19\uff1b\u7531\u65bc\u6a21\u578b\u5df2\u7d93\u5b78\u5f97\u5982\u4f55\u4f9d\u7167\u8f38\u5165\u689d\u4ef6\u6dfb\u52a0\u566a\u97f3\u5206\u4f48\uff0c\u5728\u63a8\u8ad6\u671f \u9593\uff0c\u904b\u7528\u53cd\u51fd\u5f0f\u7684\u4f5c\u6cd5\uff0c\u5c07\u6dfb\u52a0\u7684\u566a\u97f3\u5206\u4f48\u9664\u53bb\uff0c\u4f7f\u8f38\u5165\u7684\u9ad8\u65af\u566a\u97f3\u9010\u6f38\u9084\u539f\u6210\u76ee\u6a19\uff0c \u5176\u6a21\u578b\u67b6\u69cb\u5982\u5716 7 \u6240\u793a\uff1a \u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6 57 \u5716 7\u6211\u5011\u4f7f\u7528 AISHELL-3 \u9ad8\u4fdd\u771f\u4e2d\u6587\u8a9e\u97f3\u6578\u64da\u5eab\u4f5c\u70ba\u672c\u6b21\u5be6\u9a57\u7684\u8cc7\u6599\u96c6\uff0c\u5171\u6709 88035 \u500b\u97f3 \u5716 5\u539f\u672c 56 \u6a94\uff0c218 \u4f4d\u8a9e\u8005\uff0c\u63a1\u6a23\u7387\u70ba 44.1kHz\uff0c16bit\u3002\u6211\u5011\u5c07\u6240\u6709\u97f3\u6a94\u4e0b\u63a1\u6a23\u81f3 22050Hz\uff0c\u4e26\u5f9e\u4e2d \u554f\u984c\uff1b\u9577\u8ddd\u96e2\u5167\u5bb9\u8cc7\u8a0a\u5247\u5f15\u5165\u4e86 Bahdanau Attention (Bahdanau et al., 2014)\uff0c\u5b83\u662f\u4e00\u500b\u50b3 \u63d0\u53d6\u51fa 173 \u4f4d\u8a9e\u8005(\u7d04\u4f54\u6574\u9ad4\u8a9e\u8005 80%)\uff0c\u6bcf\u4f4d\u8a9e\u8005\u96a8\u6a5f\u53d6 100 \u53e5\u97f3\u6a94\u4f5c\u70ba\u8a13\u7df4\u96c6\uff0c\u5171 \u7d71\u7684 Additive Attention\uff0c\u56e0\u5176\u67b6\u69cb\u8f03\u70ba\u7c21\u55ae\uff0c\u53ef\u4ee5\u5feb\u901f\u5730\u5f97\u5230\u67d0\u4e9b\u983b\u8b5c\u8207\u6587\u5b57\u7684\u95dc\u4fc2\uff0c \u9019\u5c07\u80fd\u5920\u5e6b\u52a9 Forward Attention \u66f4\u5feb\u5730\u5f15\u767c\u5c0d\u9f4a\uff0c\u4e26\u4e14\u56e0\u70ba\u4f4e\u7dad\u5ea6\u7684\u95dc\u4fc2\uff0c\u5b83\u4e0d\u6703\u8207 17300 \u500b\u97f3\u6a94\uff0c\u5176\u9918 45 \u70ba\u8a9e\u8005\u7576\u6210\u672a\u770b\u904e\u8a9e\u8005\u6e2c\u9a57\u6a21\u578b\u5408\u6210\u5916\u90e8\u8a9e\u8005\u7684\u6027\u80fd\u3002</td></tr><tr><td>\u6f5b\u5728\u8868\u793a\u5f8c\u65b9\u4e32\u63a5\u4e86\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u8a73\u7d30\u67b6\u69cb\u5982\u5716 4 \u6240\u793a\uff1a \u8a0a\u9032\u884c\u5168\u57df\u76f8\u95dc\u6027\u7684\u9023\u63a5\uff0c\u4ee5\u5e6b\u52a9\u5f8c\u7e8c\u7dda\u6027\u8f49\u63db\u66f4\u5feb\u5730\u512a\u5316\uff0c\u5176\u8a73\u7d30\u67b6\u69cb\u5982\u5716 5 \u6240\u793a\uff1a</td></tr></table>", |
| "num": null |
| }, |
| "TABREF13": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u4f5c\u70ba\u672c\u6b21\u5be6\u9a57\u7684 Vocoder\uff0c\u6c92\u6709\u91cd\u65b0\u8a13\u7df4 \u4e5f\u6c92\u6709\u9032\u884c\u53c3\u6578\u7684\u5fae\u8abf\uff0c\u50c5\u4f7f\u7528\u539f\u4f5c\u8005\u5be6\u73fe\u7684 Github \u4e2d\u6240\u63d0\u4f9b\u7684\u9810\u8a13\u7df4\u6a21\u578b\u3002\u63a5\u8457\uff0c\u6211 \u5011\u5229\u7528\u8cc7\u6599\u96c6\u7684\u97f3\u6a94\u5206\u5225\u5c0d\u65bc\u8a9e\u97f3\u8f49\u63db\u7684 AdaIN-VC \u548c\u8a9e\u8005\u8fa8\u8b58\u7684 LDE \u6a21\u578b\u8a13\u7df4\uff0c\u4f7f\u5176 \u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d \u751f\u6210 128 \u7dad\u5ea6\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u3002\u5728\u6211\u5011\u63d0\u51fa\u6539\u52d5\u7684 Tacotron 2 \u6a21\u578b\u67b6\u69cb\u4e4b\u4e2d\uff0c\u7de8\u78bc\u5c64\u7684 \u8f38\u51fa Content Information \u8f38\u51fa\u7dad\u5ea6\u4ecd\u7dad\u6301 512 \u7dad\uff0c\u4e32\u63a5\u4e0a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5f8c\u70ba 640 \u7dad\uff1bLongdistance Content Information \u8f38\u51fa\u7dad\u5ea6\u70ba 128 \u7dad\uff0c\u4e32\u63a5\u4e0a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5f8c\u70ba 256 \u7dad \u3002 \u5728 \u89e3\u78bc\u5c64\u4e2d\uff0c\u6211\u5011\u628a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5347\u7dad\u81f3 256 \u7dad\u4e26\u4ee5 Softsign \u6fc0\u6d3b\u51fd\u6578\u6fc0\u6d3b\uff0c\u65bc Pre-Net \u5c64 \u4e2d\u8207\u983b\u8b5c\u76f8\u52a0\uff0c\u5176\u9918\u8a2d\u7f6e\u7686\u6309\u7167\u539f Tacotron 2\u3002 \u6211\u5011\u63d0\u51fa\u7684 TTS \u6a21\u578b\u662f\u5728 Pytorch \u795e\u7d93\u7db2\u8def\u6846\u67b6\u4e0a\u904b\u884c\uff0c\u4e26\u4ee5 Nvidia GeForce RTX 2070 GPU \u8a13\u7df4\uff0c\u6279\u91cf\u5927\u5c0f (Batch Size) \u8a2d\u70ba 8\uff0c\u5171\u8a13\u7df4 208,000 \u500b Steps\uff0c\u7d04\u70ba 96 \u500b", |
| "content": "<table><tr><td colspan=\"5\">\u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6</td><td>59</td></tr><tr><td colspan=\"5\">\u8868 3. \u8a08\u7b97 Post-Filter \u8207 Post-Net \u7684 MCD\uff0c\u503c\u8d8a\u5c0f\u8d8a\u597d\u3002 [Table 3. Calculate the MCD of Post-Filter and Post-Net, the smaller the value, the better.]</td></tr><tr><td/><td>Inside</td><td/><td>Outside</td><td/></tr><tr><td/><td>Men</td><td>Women</td><td>Men</td><td>Women</td></tr><tr><td>Post-Filter</td><td>6.99</td><td>7.30</td><td>8.15</td><td>8.65</td></tr><tr><td>Post-Net</td><td>7.31</td><td>7.98</td><td>9.20</td><td>9.11</td></tr><tr><td>Epochs\u3002</td><td/><td/><td/><td/></tr><tr><td colspan=\"2\">4.3 \u5be6\u9a57\u7d50\u679c (Results)</td><td/><td/><td/></tr><tr><td colspan=\"2\">4.3.1 \u8a9e\u97f3\u54c1\u8cea (Speech quality)</td><td/><td/><td/></tr><tr><td colspan=\"6\">\u9996\u5148\uff0c\u6211\u5011\u4f7f\u7528\u5ba2\u89c0\u8a55\u6e2c (MOS) \u4f86\u8b49\u5be6\u5be6\u9a57\u7d50\u679c\uff0c\u5206\u5225\u5408\u6210\u8a9e\u97f3\u8f49\u63db\u548c\u8a9e\u8005\u8fa8\u8b58\u6240\u8a13</td></tr><tr><td colspan=\"6\">\u7df4\u7684 TTS \u7cfb\u7d71\u5404 10 \u500b\u5167\u90e8\u8a9e\u8005\u7684\u97f3\u6a94\u4f86\u6bd4\u8f03\u54c1\u8cea\uff0c\u53e6\u5916\u518d\u5408\u6210\u5404 10 \u500b\u5167\u90e8\u8a9e\u8005\u7684\u97f3\u6a94</td></tr><tr><td colspan=\"2\">\u6bd4\u8f03\u8a9e\u8005\u76f8\u4f3c\u5ea6\uff0c\u5176\u7d50\u679c\u5982\u8868 1\uff1a</td><td/><td/><td/></tr><tr><td colspan=\"3\">\u8868 1Quality</td><td colspan=\"2\">Similarity</td></tr><tr><td>Tacotron 2 with VC</td><td colspan=\"2\">2.67 \u00b1 0.35</td><td colspan=\"2\">2.70 \u00b1 0.41</td></tr><tr><td>Tacotron 2 with SV</td><td colspan=\"2\">2.54 \u00b1 0.37</td><td colspan=\"2\">2.31 \u00b1 0.18</td></tr><tr><td colspan=\"6\">\u6839\u64da\u8868 1 \u53ef\u4ee5\u767c\u73fe\u8a9e\u97f3\u8f49\u63db\u63d0\u53d6\u51fa\u4f86\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5c0d\u65bc\u6211\u5011\u7684 TTS \u7cfb\u7d71\u6548\u679c\u8f03\u597d\uff0c\u56e0</td></tr><tr><td colspan=\"6\">\u6b64\u9032\u4e00\u6b65\u4f7f\u7528\u8a9e\u97f3\u8f49\u63db\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4f86\u6bd4\u8f03 Post-Filter \u8207\u539f\u59cb Post-Net \u7684\u6548\u679c\uff0c\u7d50\u679c</td></tr><tr><td>\u5982\u4e0b\u8868\uff1a</td><td/><td/><td/><td/></tr><tr><td colspan=\"3\">\u8868 2Quality</td><td colspan=\"2\">Similarity</td></tr><tr><td>Post-Filter</td><td colspan=\"2\">3.75 \u00b1 0.35</td><td colspan=\"2\">3.75 \u00b1 0.71</td></tr><tr><td>Post-Net</td><td colspan=\"2\">2.67 \u00b1 0.71</td><td colspan=\"2\">2.50 \u00b1 0.30</td></tr><tr><td colspan=\"6\">\u63a5\u8457\u6211\u5011\u4f7f\u7528 Mel Cepstral Distortions (MCD) \u4f5c\u70ba\u5ba2\u89c0\u8a55\u6e2c\u7684\u65b9\u6cd5\uff0c\u96a8\u6a5f\u5f9e\u5167\u90e8\u8a9e\u8005\u8207</td></tr><tr><td colspan=\"6\">\u5916\u90e8\u8a9e\u8005\u5404\u6311\u9078 5 \u500b\u7537\u6027\u8207\u5973\u6027\u8a9e\u8005\uff0c\u6bcf\u500b\u8a9e\u8005\u5408\u6210 10 \u500b\u97f3\u6a94\u4f86\u8a08\u7b97 MCD \u503c\uff0c\u7d50\u679c\u5982</td></tr><tr><td>\u4e0b\u8868\uff1a</td><td/><td/><td/><td/></tr></table>", |
| "num": null |
| }, |
| "TABREF14": { |
| "html": null, |
| "type_str": "table", |
| "text": "\u5f9e\u5716 12 \u5f97\u77e5\uff0c\u6a21\u578b\u8a13\u7df4\u5230 16000 \u500b Steps \u6642\uff0c\u5118\u7ba1\u96d9\u65b9\u90fd\u7121\u6cd5\u5efa\u7acb\u826f\u597d\u7684\u5c0d\u9f4a\uff0c\u4f46\u6709 Bahdanau Attention \u7684\u5c0d\u9f4a\u662f\u512a\u65bc\u6c92\u6709 Bahdanau Attention \u7684\uff0c\u5728 19000 \u500b Steps \u6642\uff0c\u6709 Bahdanau Attention \u5df2\u7d93\u80fd\u5efa\u7acb\u5c0d\u9f4a\u4e86\uff0c\u53e6\u4e00\u500b\u5247\u96b1\u7d04\u6709\u5c0d\u9f4a\u7dda\u800c\u5df2\uff0c\u56e0\u6b64\u53ef\u5f97\u77e5\uff0c Bahdanau Attention \u52a0\u4e0a Forward Attention \u7684\u67b6\u69cb\u662f\u80fd\u5920\u5e6b\u52a9\u6a21\u578b\u5feb\u901f\u5730\u5efa\u7acb\u5c0d\u9f4a\u3002\u53ef\u4ee5 \u65bc\u6211\u5011\u7684\u7db2\u7ad9\u4e0a\u8046\u807d\u6a23\u672c\uff1ahttps://babaili.github.io/rocling2021_demo/", |
| "content": "<table><tr><td>\u6574\u5408\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u8207\u5f8c\u7f6e\u6ffe\u6ce2\u5668\u65bc\u63d0\u5347\u500b\u4eba\u5316\u5408\u6210\u8a9e\u97f3\u4e4b\u8a9e\u8005\u76f8\u4f3c\u5ea6</td><td>63</td></tr><tr><td colspan=\"2\">\u65e2\u7136 Bahdanau Attention \u593e\u5e36\u6bcf\u6bb5\u8a9e\u97f3\u5927\u6982\u7684\u97f3\u6846\u7bc4\u570d\u8cc7\u8a0a\uff0c\u90a3\u9019\u4e9b\u8cc7\u8a0a\u662f\u5426\u80fd\u5e6b</td></tr><tr><td>\u52a9\u6a21\u578b\u5feb\u901f\u5efa\u7acb\u5c0d\u9f4a\u5462?\u4e0b\u5716\u5c07\u986f\u793a\u6709\u7121 Bahdanau Attention \u7684\u5dee\u7570\uff1a</td><td/></tr><tr><td>\u5716</td><td/></tr></table>", |
| "num": null |
| }, |
| "TABREF15": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>Passage</td><td>\u4e09\u4ee3\u540c\u5802\u5bb6\u5ead\u662f\u5b50\u5973\u548c\u7236\u6bcd\u3001\u7956\u7236\u6bcd\u6216\u5916</td></tr><tr><td/><td>\u7956\u7236\u6bcd\u540c\u4f4f\u3002</td></tr><tr><td>Question</td><td>\u300c\u6211\u548c\u7238\u7238\u3001\u5abd\u5abd\u3001\u723a\u723a\u3001\u5976\u5976\u4f4f\u5728\u4e00\u8d77\u3002\u300d</td></tr><tr><td/><td>\u662f\u5c6c\u65bc\u54ea\u4e00\u7a2e\u985e\u578b\u7684\u5bb6\u5ead\uff1f</td></tr><tr><td>Options</td><td>(1) \u4e09\u4ee3\u540c\u5802\u5bb6\u5ead</td></tr><tr><td/><td>(2) \u55ae\u89aa\u5bb6\u5ead</td></tr></table>", |
| "num": null |
| }, |
| "TABREF16": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>Problem type</td><td>Questions</td></tr><tr><td>Negation</td><td>Question: \u6d69\u6d69\u8ddf\u5bb6\u4eba\u5230\u81fa\u6771\u7e23\u95dc\u5c71\u93ae\u904a\u73a9\uff0c\u4ed6\u4e0d\u53ef\u80fd\u5728\u7576\u5730\u770b</td></tr><tr><td/><td>\u5230\u4ec0\u9ebc\uff1f</td></tr><tr><td/><td>Options: (1)\u963f\u7f8e\u65cf\u8c50\u5e74\u796d (2)\u74b0\u93ae\u81ea\u884c\u8eca\u9053 (3)\u6cb9\u6850\u82b1\u5a5a\u79ae</td></tr><tr><td/><td>(4)\u89aa\u6c34\u516c\u5712</td></tr><tr><td>All of the</td><td>Question: \u5728\u9ad8\u9f61\u5316\u7684\u793e\u6703\u88e1\uff0c\u6211\u5011\u61c9\u8a72\u5982\u4f55\u56e0\u61c9\u9ad8\u9f61\u5316\u793e\u6703\u7684</td></tr><tr><td>above</td><td>\u5230\u4f86\uff1f</td></tr><tr><td/><td>Options: (1)\u5236\u5b9a\u8001\u4eba\u798f\u5229\u653f\u7b56 (2)\u63d0\u4f9b\u826f\u597d\u7684\u5b89\u990a\u7167\u9867 (3)</td></tr><tr><td/><td>\u5efa\u7acb\u5065\u5168\u7684\u91ab\u7642\u9ad4\u7cfb (4)\u4ee5\u4e0a\u7686\u662f</td></tr><tr><td>None of the</td><td>Question: \u90fd\u5e02\u6709\u516c\u5171\u8a2d\u65bd\u5b8c\u5584\u3001\u5de5\u4f5c\u6a5f\u6703\u591a\u7b49\u512a\u9ede\uff0c\u5e38\u5438\u5f15\u9109</td></tr><tr><td>above</td><td>\u6751\u5730\u5340\u54ea\u4e00\u7a2e\u5e74\u9f61\u5c64\u7684\u5c45\u6c11\u524d\u5f80\uff1f</td></tr><tr><td/><td>Options:</td></tr></table>", |
| "num": null |
| }, |
| "TABREF17": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>Subset</td><td>Training</td><td>Dev</td><td>Test</td></tr><tr><td>Questions</td><td>3,879</td><td>780</td><td>778</td></tr><tr><td>Questions w/ SE</td><td>3,135</td><td>604</td><td>563</td></tr><tr><td>Questions w/o SE</td><td>744</td><td>176</td><td>215</td></tr><tr><td>Averaged SPs</td><td>1.09</td><td>1.16</td><td>1.14</td></tr><tr><td>Averaged SSs</td><td>3.17</td><td>2.94</td><td>2.73</td></tr><tr><td/><td colspan=\"3\">*Questions w/o SE: the number of questions without supporting evidence</td></tr><tr><td/><td colspan=\"3\">Averaged SPs: the average number of Supporting Paragraphs</td></tr><tr><td/><td colspan=\"3\">Averaged SSs: the average number of Supporting Sentences</td></tr></table>", |
| "num": null |
| }, |
| "TABREF18": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td/><td>Training</td><td>Dev</td><td>Test</td></tr><tr><td>Lessons</td><td>202</td><td>27</td><td>26</td></tr><tr><td>Questions</td><td>3,879</td><td>780</td><td>778</td></tr><tr><td>Averaged paragraphs/lesson</td><td>11.28</td><td>13.93</td><td>10.93</td></tr><tr><td>#Averaged entences/lesson</td><td>46.40</td><td>52.67</td><td>46.33</td></tr></table>", |
| "num": null |
| }, |
| "TABREF19": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td/><td>Training</td><td>Dev</td><td>Test</td></tr><tr><td>Lessons</td><td>196</td><td>27</td><td>26</td></tr><tr><td>Questions</td><td>3,135</td><td>604</td><td>563</td></tr><tr><td>( NEG a )</td><td>(53)</td><td>(14)</td><td>(15)</td></tr><tr><td>( AllAbv&NonAbv b )</td><td>(332)</td><td>(69)</td><td>(56)</td></tr><tr><td>Averaged paragraphs/lesson</td><td>11.35</td><td>13.93</td><td>10.85</td></tr><tr><td>Averaged sentences/ Lesson</td><td>46.72</td><td>52.67</td><td>46.15</td></tr></table>", |
| "num": null |
| }, |
| "TABREF20": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>Error Type</td><td>Questions</td></tr><tr><td>Incorrect</td><td>Wrong SE: \u6e05\u671d\u7d71\u6cbb\u81fa\u7063\u521d\u671f\uff0c\u6f22\u4eba\u6e21\u6d77\u4f86\u81fa\u5f8c\uff0c\u5f80\u5f80\u540c\u9109\u4eba\u805a</td></tr><tr><td>supporting</td><td>\u5c45\u5728\u4e00\u8d77\uff0c\u4e26\u4e14\u5efa\u7bc9\u5edf\u5b87\u4f9b\u5949\u5171\u540c\u4fe1\u4ef0\u7684\u795e\u660e\u3002</td></tr><tr><td>evidence</td><td>Question: \u81fa\u7063\u6709\u8a31\u591a\u5f9e\u4e2d\u570b\u79fb\u6c11\u4f86\u7684\u6f22\u4eba\uff0c\u4f86\u81fa\u8981\u6e21\u904e\u5371\u96aa\u7684\u81fa</td></tr><tr><td>(52%)</td><td>\u7063\u6d77\u5cfd\uff0c\u6240\u4ee5\u4ec0\u9ebc\u795e\u660e\u5c31\u88ab\u6240\u6709\u79fb\u6c11\u6240\u5171\u540c\u4fe1\u4ef0\uff1f</td></tr><tr><td/><td>Options: (1)\u95dc\u516c (2)\u571f\u5730\u516c (3)\u5abd\u7956 (4)\u4e09\u5c71\u570b\u738b</td></tr><tr><td>Requires</td><td>SE: \u5211\u6cd5\u5c0d\u50b7\u5bb3\u4ed6\u4eba\u7684\u884c\u70ba\u52a0\u4ee5\u8655\u7f70\uff1b\u6c11\u6cd5\u5247\u4ee5\u640d\u5bb3\u8ce0\u511f\u7684\u65b9\u5f0f\uff0c</td></tr><tr><td>advanced inference capability</td><td>\u8acb\u554f\u725b\u5976\u7684\u4fdd\u5b58\u671f\u9650\u904e\u4e86\u6c92\uff1f(\u76f8\u95dc\u6cd5\u5f8b\uff1a\u6c11\u6cd5\u3001\u6d88\u8cbb\u8005\u4fdd\u8b77\u6cd5\u3001 \u98df\u54c1\u5b89\u5168\u885b\u751f\u7ba1\u7406\u6cd5)</td></tr><tr><td>(48%)</td><td/></tr></table>", |
| "num": null |
| }, |
| "TABREF21": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>IJCLCLP 2021 Index-1</td></tr></table>", |
| "num": null |
| } |
| } |
| } |
| } |