| { |
| "paper_id": "2019", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:27:21.259100Z" |
| }, |
| "title": "A Feature-granularity Training Strategy for Chinese Spoken Question Answering", |
| "authors": [ |
| { |
| "first": "\u7f85\u4e0a\u5821", |
| "middle": [ |
| "\uf02a" |
| ], |
| "last": "\u3001\u9673\u51a0\u5b87", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University of Science and Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Shang-Bao", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University of Science and Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kuan-Yu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University of Science and Technology", |
| "location": {} |
| }, |
| "email": "kychen@mail.ntust.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In spoken question answering, a segment of audio is usually converted into a textual representation through an automatic speech recognition (ASR) system, and then input to a text-based question answering model to generate the answer. However, based on the ASR transcriptions, which usually contain lots of recognition errors, text-based question answering system may produce imperfect results. In order to mitigate the performance gap, in this study, a featured-granularity training strategy is proposed. Accordingly, we evaluate the proposed training strategy on spoken Chinese machine reading comprehension task,", |
| "pdf_parse": { |
| "paper_id": "2019", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In spoken question answering, a segment of audio is usually converted into a textual representation through an automatic speech recognition (ASR) system, and then input to a text-based question answering model to generate the answer. However, based on the ASR transcriptions, which usually contain lots of recognition errors, text-based question answering system may produce imperfect results. In order to mitigate the performance gap, in this study, a featured-granularity training strategy is proposed. Accordingly, we evaluate the proposed training strategy on spoken Chinese machine reading comprehension task,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "\u6a5f\u5668\u95b1\u8b80\u7406\u89e3\u662f\u4e00\u500b\u81ea\u7136\u8a9e\u8a00\u8655\u7406(Natural Language Processing, NLP)\u9818\u57df\u4e2d\u76f8\u7576\u91cd\u8981\u7684 \u4efb\u52d9\uff0c\u5176\u76ee\u6a19\u662f\u5e0c\u671b\u8b93\u6a5f\u5668\u50cf\u4eba\u985e\u4e00\u6a23\u9032\u884c\u6587\u672c\u95b1\u8b80\uff0c\u4e26\u6839\u64da\u5c0d\u8a72\u6587\u672c\u4e4b\u7406\u89e3\uff0c\u9032\u800c\u56de \u7b54\u76f8\u95dc\u4e4b\u554f\u984c\u3002\u8b93\u96fb\u8166\u5e6b\u52a9\u4eba\u985e\u5728\u5927\u91cf\u6587\u672c\u4e2d\u627e\u5230\u60f3\u8981\u7684\u7b54\u6848\uff0c\u53ef\u4ee5\u6e1b\u8f15\u8cc7\u8a0a\u7372\u53d6\u7684\u6210 \u672c\u3001\u52a0\u901f\u8cc7\u8a0a\u8655\u7406\u7684\u901f\u5ea6\u4ee5\u53ca\u63d0\u5347\u8cc7\u8a0a\u7684\u5229\u7528\u7387\u3002\u9032\u4e00\u6b65\u5730\uff0c\u5982\u679c\u96fb\u8166\u80fd\u5177\u5099\u76f8\u7576\u9ad8\u6c34 \u6e96\u7684\u95b1\u8b80\u7406\u89e3\u80fd\u529b\uff0c\u8a31\u591a\u61c9\u7528\u5c07\u6703\u6709\u66f4\u9032\u4e00\u6b65\u7684\u767c\u5c55\uff0c\u4f8b\u5982\u554f\u7b54\u7cfb\u7d71(Question Answering, QA)\u3001\u5c0d\u8a71\u7cfb\u7d71(Dialogue System)\u4ee5\u53ca\u641c\u5c0b\u5f15\u64ce(Search Engine)\u7b49\u3002\u56e0\u6b64\u6a5f\u5668\u95b1\u8b80\u7406\u89e3\u4e0d \u8ad6\u5728\u5b78\u8853\u754c\u6216\u7522\u696d\u754c\u90fd\u6709\u8457\u6975\u9ad8\u7684\u7814\u7a76\u50f9\u503c\u3002 \u8fd1\u5e74\u4f86\u554f\u7b54\u7cfb\u7d71\u5df2\u6709\u5927\u91cf\u7814\u7a76\u8207\u767c\u5c55\uff0c\u554f\u7b54\u7cfb\u7d71\u4e3b\u8981\u53c8\u5206\u70ba\u591a\u7a2e\u5f62\u5f0f\uff1a\u57fa\u65bc\u5716\u50cf\u7684 \u554f\u7b54\u7cfb\u7d71\u3001\u57fa\u65bc\u6587\u5b57\u7684\u554f\u7b54\u7cfb\u7d71\u4ee5\u53ca\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u7b49\u7b49\u3002\u76ee\u524d\u57fa\u65bc\u5716\u50cf\u7684\u554f\u7b54\u7cfb\u7d71\u4e3b\u8981 \u7684\u5f62\u5f0f\u70ba\u5834\u666f\u7406\u89e3(Scene Understanding)\uff0c\u5176\u76ee\u6a19\u662f\u7d66\u5b9a\u4e00\u5f35\u5716\u50cf\uff0c\u8b93\u7cfb\u7d71\u9032\u884c\u7269\u4ef6\u6aa2\u7d22 (Scene Object Retrieval)\u6216\u5834\u666f\u5206\u5272(Scene Segmentation)\u7b49\u4efb\u52d9\uff0c\u5df2\u6709\u8a31\u591a\u7d93\u5178\u7684\u6a21\u578b (Kingma & Dhariwal, 2018; Karras, Laine & Aila, 2019; \u88ab\u63d0\u51fa\u4e26\u9a57\u8b49\u65bc LSUN \u6578\u64da\u96c6(Construction of a Large-scale Image Dataset using Deep Learning with Humans in the Loop) (Yu et al., 2015) \u3002\u57fa\u65bc\u6587\u5b57\u7684\u554f\u7b54\u7cfb\u7d71\uff0c\u9664\u4e86\u50b3 \u7d71\u5b8c\u5f62\u586b\u7a7a(Cloze Style)\u8207\u6587\u672c\u6bb5(Text Span)\u9810\u6e2c\u5916\uff0c\u8a31\u591a\u7814\u7a76\u7d1b\u7d1b\u63d0\u51fa\u5404\u5f0f\u9078\u64c7\u984c (Multiple Choices)\u548c\u7c21\u7b54\u984c(Short Answer Questions)\u7684\u554f\u7b54\u6a21\u578b\u3002\u5b8c\u578b\u586b\u7a7a\u662f\u53bb\u6389\u6587\u672c\u4e2d \u7684\u67d0\u500b\u8a5e\u8a9e\uff0c\u8b93\u7cfb\u7d71\u9032\u884c\u586b\u7a7a\uff0c\u4f46\u7b54\u6848\u5f80\u5f80\u662f\u55ae\u4e00\u7684\u5b57\u8a5e\uff0c\u4e26\u4e0d\u9700\u8981\u5c0d\u65bc\u6574\u6bb5\u6587\u672c\u9032\u884c \u7406\u89e3\uff0c\u56e0\u6b64\u9019\u985e\u578b\u7684\u56de\u7b54\u5f62\u5f0f\u8f03\u96e3\u4ee5\u61c9\u7528\u65bc\u5be6\u969b\u751f\u6d3b\u4e2d\u3002\u70ba\u4e86\u5f4c\u88dc\u5b8c\u578b\u586b\u7a7a\u7684\u4e0d\u8db3\uff0c2016 \u5e74\u6642\u5927\u898f\u6a21\u7684\u6587\u672c\u6bb5\u985e\u578b\u6578\u64da\u96c6 SQuAD (The Stanford Question Answering Dataset) (Rajpurkar, Zhang, Lopyrev & Liang, 2016 )\u61c9\u904b\u800c\u751f\uff0c\u6b64\u4e00\u6578\u64da\u96c6\u5305\u542b\u5341\u842c\u591a\u500b\u554f\u984c\u7b54\u6848 \u7d44\uff0c\u6587\u672c\u7686\u70ba\u7dad\u57fa\u767e\u79d1\u7684\u6587\u7ae0\u3002\u70ba\u4e86\u7c21\u5316\u554f\u984c\uff0c\u73fe\u4eca\u7684\u6587\u672c\u6bb5\u9810\u6e2c\u591a\u534a\u662f\u5728\u7d66\u5b9a\u6587\u672c\u8207 \u554f\u984c\u5f8c\uff0c\u6a5f\u5668\u9700\u7531\u6587\u672c\u4e2d\u627e\u51fa\u4e00\u500b\u9023\u7e8c\u7247\u6bb5\u4f5c\u70ba\u7b54\u6848\u8f38\u51fa\uff0c\u4e5f\u5c31\u662f\u554f\u984c\u7684\u7b54\u6848\u6307\u5b9a\u70ba\u6587 \u672c\u4e2d\u7684\u4e00\u500b\u7247\u6bb5\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6b64\u985e\u554f\u984c\u6f38\u6f38\u5730\u5f9e\u55ae\u8f2a(Single-turn)\u554f\u7b54\u7684\u5f62\u5f0f\u81f3\u591a\u8f2a (Multi-turn) \u554f \u7b54 \u7684 \u65b9 \u5411 \u767c \u5c55 \uff0c \u5c31 \u5f62 \u6210 \u4e86 \u5c0d \u8a71 \u5f0f \u7684 \u554f \u7b54 \u7cfb \u7d71 (Conversational Question Answering) \uff0c \u5176 \u4e2d \u6700 \u5177 \u4ee3 \u8868 \u6027 \u7684 \u5c31 \u662f CoQA (A Conversational Question Answering Challenge) (Reddy, Chen & Manning, 2019) \u8207 QuAC (Question Answering in Context) (Choi et al., 2018) \u6578\u64da\u96c6\u3002\u53e6\u5916\uff0c\u9078\u64c7\u984c\u578b\u5f0f\u7684\u554f\u7b54\u7cfb\u7d71\u5247\u662f\u7d66\u4e88\u6a5f\u5668\u6587\u7ae0\u3001\u554f\u984c\u4ee5\u53ca\u591a\u500b\u9078 \u9805 \uff0c \u6a5f \u5668 \u9808 \u5f9e \u9019 \u4e9b \u9078 \u9805 \u4e2d \u9078 \u64c7 \u4e00 \u500b \u505a \u70ba \u7b54 \u6848 \u8f38 \u51fa \uff0c RACE (Large-scale ReAding Comprehension Dataset From Examinations) (Lai, Xie, Liu, Tang & Hovy, 2017 )\u662f\u9078\u64c7\u984c\u5f0f \u554f\u7b54\u7cfb\u7d71\u6975\u5177\u4ee3\u8868\u6027\u7684\u6578\u64da\u96c6\uff0c\u5b83\u662f\u5f9e\u570b\u4e2d\u8207\u9ad8\u4e2d\u7684\u8003\u984c\u4e0a\u9032\u884c\u8490\u96c6\u800c\u6210\u7684\u5927\u578b\u6578\u64da\u96c6\u3002 \u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528 3 \u7d9c\u89c0\u554f\u7b54\u7cfb\u7d71\u7684\u767c\u5c55\uff0c\u57fa\u65bc\u6587\u672c\u7684\u554f\u7b54\u6a21\u578b\uff0c\u5728\u5404\u7a2e\u984c\u578b\u4e0a\uff0c\u90fd\u6709\u6108\u4f86\u6108\u5f37\u5065\u7684\u6a21\u578b\u9678 \u7e8c\u5728\u8fd1\u5e74\u88ab\u63d0\u51fa (Tang, Cai & Zhuo, 2019; Wang, Yu, Jiang & Chang, 2018; Zhang et al., 2019; Ran, Li, Hu & Zhou, 2019) ", |
| "cite_spans": [ |
| { |
| "start": 495, |
| "end": 520, |
| "text": "(Kingma & Dhariwal, 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 521, |
| "end": 548, |
| "text": "Karras, Laine & Aila, 2019;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 655, |
| "end": 672, |
| "text": "(Yu et al., 2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 946, |
| "end": 986, |
| "text": "(Rajpurkar, Zhang, Lopyrev & Liang, 2016", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1296, |
| "end": 1325, |
| "text": "(Reddy, Chen & Manning, 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1365, |
| "end": 1384, |
| "text": "(Choi et al., 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1530, |
| "end": 1563, |
| "text": "(Lai, Xie, Liu, Tang & Hovy, 2017", |
| "ref_id": null |
| }, |
| { |
| "start": 1684, |
| "end": 1708, |
| "text": "(Tang, Cai & Zhuo, 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1709, |
| "end": 1739, |
| "text": "Wang, Yu, Jiang & Chang, 2018;", |
| "ref_id": null |
| }, |
| { |
| "start": 1740, |
| "end": 1759, |
| "text": "Zhang et al., 2019;", |
| "ref_id": null |
| }, |
| { |
| "start": 1760, |
| "end": 1785, |
| "text": "Ran, Li, Hu & Zhou, 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7dd2\u8ad6 (Introduction)", |
| "sec_num": "1." |
| }, |
| { |
| "text": "\u50b3\u7d71\u57fa\u65bc\u6587\u5b57\u7684\u554f\u7b54\u7cfb\u7d71\u88e1\uff0cQACNN (Query-based Attention CNN) (Liu, Wu, Lee, 2017) \u8207 Co-Matching (Zhang et al., 2019)\u70ba\u9078\u64c7\u984c\u5f62\u5f0f\u7684\u7d93\u5178\u6a21\u578b\uff0cQANet (Yu et al., 2018) \u5247 \u70ba\u6587\u672c\u6bb5\u9810\u6e2c\u7684\u7d93\u5178\u6a21\u578b\u3002QACNN \u662f\u61c9\u7528\u65bc\u82f1\u6587\u9078\u64c7\u984c\u7684\u554f\u7b54\u6a21\u578b\uff0c\u6b64\u6a21\u578b\u5171\u5305\u542b\u4e09 \u500b\u4e3b\u8981\u90e8\u5206\uff1a\u76f8\u4f3c\u6620\u5c04\u5c64(Similarity Mapping Layer)\u3001QACNN \u5c64\u8207\u9810\u6e2c\u5c64\u3002\u5728\u76f8\u4f3c\u6620\u5c04 \u5c64\u4e2d\uff0c\u5c07\u8f38\u5165\u6587\u5b57\u900f\u904e\u8a5e\u5411\u91cf (Mikolov, Chen, Corrado & Dean, 2013; Pennington, Socher & Manning, 2014; Bengio, Ducharme, Vincent & Jauvin, 2003 (Vaswani et al., 2017) Ba, J. L., Kiros, J. R., & Hinton, G. E. (2016) . Layer Normalization. In arXiv preprint arXiv:1607.06450", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 67, |
| "text": "(Liu, Wu, Lee, 2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 120, |
| "end": 137, |
| "text": "(Yu et al., 2018)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 251, |
| "end": 288, |
| "text": "(Mikolov, Chen, Corrado & Dean, 2013;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 289, |
| "end": 324, |
| "text": "Pennington, Socher & Manning, 2014;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 325, |
| "end": 365, |
| "text": "Bengio, Ducharme, Vincent & Jauvin, 2003", |
| "ref_id": null |
| }, |
| { |
| "start": 366, |
| "end": 388, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 400, |
| "end": 436, |
| "text": "Kiros, J. R., & Hinton, G. E. (2016)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u76f8\u95dc\u65b9\u6cd5 (Related Methods)", |
| "sec_num": "2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "; Bojanowski, Grave, Joulin & Mikilov, 2017)\u8868\u793a\u5f8c\uff0c\u900f\u904e\u9918\u5f26\u76f8\u4f3c\u5ea6\u8a08\u7b97\u6bcf\u500b\u6587\u7ae0\u8207\u554f\u984c\u6216\u9078\u9805\u4e4b\u9593\u7684\u76f8\u4f3c\u5ea6\u53d6\u5f97\u76f8 \u5c0d\u61c9\u7684\u77e9\u9663 \u8207 \u4f86\u8868\u793a\u4f4d\u7f6e\u95dc\u806f(Location Relationship)\u7684\u8cc7\u8a0a\uff1a , , ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "\u76f8\u95dc\u65b9\u6cd5 (Related Methods)", |
| "sec_num": "2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u0305 \u2296 \u0305 \u2297 (4) \u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528 5 (5) \u63a5\u8457\u900f\u904e\u96d9\u5411\u5faa\u74b0\u795e\u7d93\u7db2\u8def(Recurrent Neural Network, RNN)\u4f86\u5f59\u6574\u6bcf\u500b\u5b57\u7d1a\u7279\u5fb5 \u4f86 \u7522\u751f\u53e5\u7d1a\u7279\u5fb5 \uff1a (6) \u5c07\u6587\u672c\u6bcf\u500b\u53e5\u5b50\u7684\u53e5\u7d1a\u7279\u5fb5 \u4e32\u63a5\u8d77\u4f86\u5f8c\uff0c\u91cd\u65b0\u8868\u9054\u70ba \uff1a ; ; \u2026 ; (7) \u518d \u7d93 \u7531 \u968e \u7d1a \u5f0f \u5f59 \u96c6 (Hierarchical Aggregation) \u5c0d \u65bc \u6bcf \u500b \u53e5 \u7d1a \u7684 \u7d50 \u69cb \u6574 \u5408 \u6210 \u6587 \u6a94 \u7d1a (Document-level)\u7684\u7279\u5fb5 \uff0c\u4e26\u7528\u6b64\u7279\u5fb5\u4f86\u9810\u6e2c\u6700\u6709\u53ef\u80fd\u662f\u7b54\u6848\u7684\u9078\u9805\uff1a (8) \u5716 3. QANet \u6a21\u578b\u793a\u610f\u5716 [Fiqure 3. Illustration of QANet Model.] \u76f8\u8f03\u65bc QACNN \u8207 Co-Matching\uff0cQANET \u662f\u9810\u6e2c\u6587\u672c\u6bb5\u7684\u554f\u7b54\u6a21\u578b\uff0c\u6b64\u6a21\u578b\u5305\u542b\u4e94 \u500b\u4e3b\u8981\u90e8\u5206\uff1a\u5d4c\u5165\u5c64(Embedding Layer)\u3001\u5d4c\u5165\u7de8\u78bc\u5c64(Embedding Encoder Layer)\u3001\u8a9e\u5883\u67e5 \u8a62\u6ce8\u610f\u529b\u5c64(Context-query Attention Layer)\u3001\u6a21\u578b\u7de8\u78bc\u5c64(Model Encoder Layer)\u8207\u8f38\u51fa\u5c64 (Output Layer) \u3002 \u76ee \u524d \u5927 \u591a \u6578 \u7684 \u6a5f \u5668 \u95b1 \u8b80 \u7406 \u89e3 \u6a21 \u578b \u4e3b \u8981 \u7686 \u4ee5 \u6ce8 \u610f \u529b \u6a5f \u5236 (Attention Mechanism)\u8207\u5faa\u74b0\u795e\u7d93\u7db2\u8def\u70ba\u4e3b\u8981\u67b6\u69cb\uff0c\u4f46 QANET \u6574\u9ad4\u7684\u7db2\u8def\u8a2d\u8a08\u7686\u6368\u68c4\u5faa\u74b0\u795e\u7d93\u7db2 \u8def \uff0c \u50c5 \u4f7f \u7528 \u5377 \u7a4d \u795e \u7d93 \u7db2 \u8def (Convolutional Neural Networks, CNN) \u548c \u81ea \u6211 \u6ce8 \u610f \u529b \u6a5f \u5236 (Self-attention Mechanism)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "\u76f8\u95dc\u65b9\u6cd5 (Related Methods)", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Bengio, Y., Ducharme, R., Vincent, P., & Jauvin, C. (2003) . A Neural Probabilistic Language Model. JMLR, 3, 1137-1155.", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 58, |
| "text": "Vincent, P., & Jauvin, C. (2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u76f8\u95dc\u65b9\u6cd5 (Related Methods)", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Bojanowski, P., Grave, E., Joulin, A., & Mikolov, T. (2017) . Enriching Word Vectors with Subword Information. In Proceedings of TACL, 5, 135-146. doi: 10.1162/tacl_a_00051 ", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 59, |
| "text": "& Mikolov, T. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 129, |
| "end": 172, |
| "text": "TACL, 5, 135-146. doi: 10.1162/tacl_a_00051", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u76f8\u95dc\u65b9\u6cd5 (Related Methods)", |
| "sec_num": "2." |
| }, |
| { |
| "text": "CNA: http://www.cna.com.tw/ 2 Formosa Grand Challenge -Talk to AI: https://fgc.stpi.narl.org.tw/activity/techai2018", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "QuAC : Question Answering in Context. In arXiv preprint", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "W.-T", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "\u2026zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.07036" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Choi, E., He, H., Iyyer, M., Yatskar, M., Yih, W.-t., Choi, Y., \u2026Zettlemoyer, L. (2018). QuAC : Question Answering in Context. In arXiv preprint arXiv:1808.07036", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-W", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Devlin, J., Chang, M.-W., Lee, K., & Toutanova, K. (2018). BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In arXiv preprint arXiv:1810.04805", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Deep Residual Learning for Image Recognition", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of CVPR 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "770--778", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2016.90" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep Residual Learning for Image Recognition. In Proceedings of CVPR 2016,770-778. doi: 10.1109/CVPR.2016.90", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Constructing Sub-word Units for Spoken Term Detection", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of ICASSP 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "5780--5784", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2017.7953264" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Constructing Sub-word Units for Spoken Term Detection. In Proceedings of ICASSP 2017, 5780-5784. doi: 10.1109/ICASSP.2017.7953264", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A Style-based Generator Architecture for Generative Adversarial Networks", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Karras", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Laine", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Aila", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of CVPR 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "4401--4410", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2019.00453" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karras, T., Laine, S., & Aila, T. (2019). A Style-based Generator Architecture for Generative Adversarial Networks. In Proceedings of CVPR 2019, 4401-4410. doi: 10.1109/CVPR.2019.00453", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adam: A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kingma, D. P. & Ba, J. (2014). Adam: A Method for Stochastic Optimization. In arXiv preprint arXiv:1412.6980", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Glow: Generative Flow with Invertible 1x1 Convolutions", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NIPS 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "10215--10224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kingma, D. P. & Dhariwal, P. (2018). Glow: Generative Flow with Invertible 1x1 Convolutions. In Proceedings of NIPS 2018, 10215-10224.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "RACE: Large-scale ReAding Comprehension Dataset From Examinations", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1704.04683" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lai, G., Xie, Q., Liu, H., Yang, Y., & Hovy, E. (2017). RACE: Large-scale ReAding Comprehension Dataset From Examinations. In arXiv preprint arXiv:1704.04683", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Mitigating the Impact of Speech Recognition Errors on Spoken Question Answering by Adversarial Domain Adaptation", |
| "authors": [ |
| { |
| "first": "C.-H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Y.-N", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ICASSP 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "7300--7304", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2019.8683377" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lee, C.-H., Chen, Y.-N., & Lee, H.-y. (2019). Mitigating the Impact of Speech Recognition Errors on Spoken Question Answering by Adversarial Domain Adaptation. In Proceedings of ICASSP 2019, 7300-7304. doi: 10.1109/ICASSP.2019.8683377", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Query-based Attention CNN for Text Similarity Map", |
| "authors": [ |
| { |
| "first": "T.-C", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y.-H", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1709.05036" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liu, T.-C., Wu, Y.-H., & Lee, H.-y. (2017). Query-based Attention CNN for Text Similarity Map. In arXiv preprint arXiv:1709.05036", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Efficient Estimation of Word Representations in Vector Space", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprinarXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikolov, T., Chen, K., Corrado, G., & Dean, J. (2013). Efficient Estimation of Word Representations in Vector Space. In arXiv preprin arXiv:1301.3781", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chintala", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "8024--8035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chintala, S. (2019). PyTorch: An Imperative Style, High-Performance Deep Learning Library. Advances in Neural Information Processing Systems 32, 8024-8035", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "GloVe: Global Vectors for Word Representation", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/D14-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pennington, J., Socher, R., & Manning, C. D. (2014). GloVe: Global Vectors for Word Representation. In Proceedings of EMNLP 2014, 1532-1543. doi: 10.3115/v1/D14-1162", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semi-orthogonal Low-rank Matrix Factorization for Deep Neural Networks", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Yarmohamadi", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "\u2026khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of INTERSPEECH 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "3743--3747", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2018-1417" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Cheng, G., Wang, Y., Li, K., Xu, H., Yarmohamadi, M., \u2026Khudanpur, S. (2018). Semi-orthogonal Low-rank Matrix Factorization for Deep Neural Networks. In Proceedings of INTERSPEECH 2018, 3743-3747. doi: 10.21437/Interspeech.2018-1417", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The Kaldi Speech Recognition Toolkit", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Glembek", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "\u2026vesel\u00fd", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ASRU", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Ghoshal, A., Boulianne, G., Burget, L., Glembek, O., Goel, N., \u2026Vesel\u00fd, K. (2011). The Kaldi Speech Recognition Toolkit. In Proceedings of ASRU 2011.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Purely Sequence-trained Neural Networks for ASR Based on Lattice-free MMI", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Peddinti", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Galvez", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ghahremani", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Na", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "\u2026khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of INTERSPEECH 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "2751--2755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Povey, D., Peddinti, V., Galvez, D., Ghahremani, P., Manohar, V., Na, X., Wang, Y., \u2026Khudanpur, S. (2016). Purely Sequence-trained Neural Networks for ASR Based on Lattice-free MMI. In Proceedings of INTERSPEECH 2016, 2751-2755. \u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Option Comparison Network for Multiple-choice Reading Comprehension", |
| "authors": [ |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Ran", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1903.03033" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ran, Q., Li, P., Hu, W., & Zhou, J. (2019). Option Comparison Network for Multiple-choice Reading Comprehension. In arXiv preprint arXiv:1903.03033", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2302", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1264" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rajpurkar, P., Zhang, J., Lopyrev, K., & Liang, P. (2016). SQuAD: 100,000+ Questions for Machine Comprehension of Text. In Proceedings of EMNLP 2016, 2383-2302. doi: 10.18653/v1/D16-1264", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Coqa: A Conversational Question Answering Challenge", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of TACL", |
| "volume": "7", |
| "issue": "", |
| "pages": "249--266", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00266" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reddy, S., Chen, D., & Manning, C. D. (2019). Coqa: A Conversational Question Answering Challenge. In Proceedings of TACL, 7, 249-266. doi: 10.1162/tacl_a_00266", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "DRCD: a Chinese Machine Reading Comprehension Dataset", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "C" |
| ], |
| "last": "Shao", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1806.00920" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shao, C. C., Liu, T., Lai, Y., Tseng, Y., & Tsai, S. (2018). DRCD: a Chinese Machine Reading Comprehension Dataset. In arXiv preprint arXiv:1806.00920", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Spoken Question Answering Using Tree-structured Conditional Random Fields and Two-layer Random Walk", |
| "authors": [ |
| { |
| "first": "S.-R", |
| "middle": [], |
| "last": "Shiang", |
| "suffix": "" |
| }, |
| { |
| "first": "H.-Y", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ISCA 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "263--267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiang, S.-R., Lee, H.-y., & Lee, L.-s. (2014). Spoken Question Answering Using Tree-structured Conditional Random Fields and Two-layer Random Walk. In Proceedings of ISCA 2014, 263-267.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Hybrid word-subword spoken term detection. (Doctoral thesis", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Sz\u0151ke", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "SZ\u0150KE, I. (2010). Hybrid word-subword spoken term detection. (Doctoral thesis, Brno University of Technology, Brno, Czech Republic). Retrieved from: https://www.fit.vut.cz/study/phd-thesis/150/", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Multi-Matching Network for Multiple Choice Reading Comprehension", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Zhuo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of AAAI 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v33i01.33017088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tang, M., Cai, J., & Zhuo, H. H. (2019). Multi-Matching Network for Multiple Choice Reading Comprehension. In Proceedings of AAAI 2019. doi:10.1609/aaai.v33i01.33017088", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Attention is All You Need", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., \u2026 Polosukhin, I. (2017). Attention is All You Need. In Proceedings of NIPS 2017, 5998-6008.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Sequence-discriminative Training of Deep Neural Networks", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Vesel\u00fd", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of INTERSPEECH 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "2345--2349", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vesel\u00fd, K., Ghoshal, A., Burget, L., & Povey, D. (2013). Sequence-discriminative Training of Deep Neural Networks. In Proceedings of INTERSPEECH 2013, 2345-2349.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "MATBN: A Mandarin Chinese Broadcast News Corpus", |
| "authors": [ |
| { |
| "first": "H.-M", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J.-W", |
| "middle": [], |
| "last": "Kuo", |
| "suffix": "" |
| }, |
| { |
| "first": "S.-S", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "IJCLCLP", |
| "volume": "10", |
| "issue": "2", |
| "pages": "219--236", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, H.-M., Chen, B., Kuo, J.-W., & Cheng, S.-S. (2005). MATBN: A Mandarin Chinese Broadcast News Corpus. IJCLCLP, 10(2), 219-236.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Revisiting Video Saliency: A Large-Scale Benchmark and a New Model", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-M", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Borji", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of CVPR 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "4894--4903", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2018.00514" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, W., Shen, J., Guo, F., Cheng, M.-M., & Borji, A. (2018). Revisiting Video Saliency: A Large-Scale Benchmark and a New Model. In Proceedings of CVPR 2018, 4894-4903. doi: 10.1109/CVPR.2018.00514", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Revisiting Video Saliency: A Large-Scale Benchmark and a New Model", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-M", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Borji", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of CVPR 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "4894--4903", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2018.00514" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, W., Shen, J., Guo, F., Cheng, M.-M., & Borji, A. (2018). Revisiting Video Saliency: A Large-Scale Benchmark and a New Model. In Proceedings of CVPR 2018, 4894-4903. doi: 10.1109/CVPR.2018.00514", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1906.08237" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang, Z., Dai, Z., Yang, Y., Carbonell, J., Salakhutdinov, R., & Le, Q. V. (2019). XLNet: Generalized Autoregressive Pretraining for Language Understanding. In arXiv preprint arXiv:1906.08237.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "QANet: Combining Local Convolution with Global Self-Attention for Reading Comprehension", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Dohan", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-T", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [ |
| "V" |
| ], |
| "last": "\u2026le", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1804.09541" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu, A. W., Dohan, D., Luong, M.-T., Zhao, R., Chen, K., Norouzi, M., \u2026Le, Q. V. (2018). QANet: Combining Local Convolution with Global Self-Attention for Reading Comprehension. In arXiv preprint arXiv:1804.09541", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "LSUN: Construction of a Large-scale Image Dataset Using Deep Learning with Humans in the Loop. In arXiv preprint", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Seff", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Funkhouser", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.03365" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu, F., Seff, A., Zhang, Y., Song, S., Funkhouser, T., & Xiao, J. (2015). LSUN: Construction of a Large-scale Image Dataset Using Deep Learning with Humans in the Loop. In arXiv preprint arXiv:1506.03365.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table><tr><td>4</td><td>\u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87</td></tr><tr><td>\u6240\u793a\u3002</td><td/></tr><tr><td colspan=\"2\">\u5716 1. \u9664\u4e86 QACNN \u5916\uff0cCo-Matching \u540c\u6a23\u70ba\u82f1\u6587\u9078\u64c7\u984c\u7684\u7d93\u5178\u6a21\u578b\uff0c\u5176\u6a21\u578b\u793a\u610f\u5716\u5982\u5716 2</td></tr><tr><td colspan=\"2\">\u6240\u793a\u3002\u6b64\u6a21\u578b\u5229\u7528\u5339\u914d\u7279\u5fb5(Match Feature)\u4f86\u7522\u751f\u6587\u7ae0\u3001\u554f\u984c\u8207\u9078\u9805\u7684\u5b57\u7d1a\u7279\u5fb5\u3002\u5339\u914d\u7279</td></tr><tr><td colspan=\"2\">\u5fb5\u901a\u5e38\u6703\u6709\u5169\u7a2e\u8f38\u5165\uff0c\u5206\u5225\u4ee5 \u8207 \u4f86\u8868\u793a\uff0c\u4e26\u4e14\u900f\u904e\u5404\u81ea\u6240\u5b9a\u7fa9\u7684\u6ce8\u610f\u529b\u6a5f\u5236\u7b97\u6cd5\u4f86\u7522</td></tr><tr><td>\u751f \u0305 \u4e4b\u5f8c\u4f86\u9032\u884c\u5f8c\u7e8c\u7684\u904b\u7b97\u9700\u6c42\uff0c\u5982\u5f0f(3)\u6240\u793a\u3002</td><td/></tr><tr><td colspan=\"2\">\u5176\u4e2d \u70ba\u6587\u7ae0\u5171\u6709\u5e7e\u500b\u53e5\u5b50\u3001 \u8207 \u5247\u662f\u5206\u5225\u4ee3\u8868\u6587\u7ae0\u8207\u554f\u984c\u6bcf\u53e5\u7684\u9577\u5ea6\u3002\u63a5\u8457\uff0c\u7d93\u7531</td></tr><tr><td colspan=\"2\">QACNN \u5c64\u900f\u904e\u5169\u968e\u6bb5\u7684\u6ce8\u610f\u529b\u6a5f\u5236(Attention Mechanism)\u5c07\u4f4d\u7f6e\u76f8\u95dc\u8cc7\u8a0a\u8996\u70ba\u4e00\u7a2e\u5716\u5f62 (Pattern)\u53bb\u7522\u751f\u5b57\u7d1a(Word-level)\u5230\u53e5\u7d1a(Sentence-level)\u7684\u7279\u5fb5 \u4f86\u8868\u793a\u7b2c m \u500b\u9078\u9805\u5c0d\u65bc \u6587\u7ae0\u8207\u554f\u984c\u7684\u8cc7\u8a0a\uff1a \u5716 2. \u900f\u904e\u5339\u914d\u7279\u5fb5\u63d0\u53d6\u76f8\u95dc(Correlation)\u8cc7\u8a0a \u91cd\u65b0\u8868\u9054 \u8207 \u7684\u7279\u5fb5\u95dc\u4fc2\uff0c\u5982\u5f0f(4)</td></tr><tr><td>, \u6240\u793a\u3002Co-Matching \u5c07\u6587\u7ae0\u4e2d\u7684\u53e5\u5b50 \u3001\u554f\u984c \u8207\u9078\u9805 \uff0c\u4f86\u7522\u751f</td><td>(2) \u7279\u5fb5\u4ee5\u4f9b\u5f8c\u7e8c\u6a21\u578b</td></tr><tr><td colspan=\"2\">\u9700\u6c42\u4f7f\u7528\uff0c\u5982\u5f0f(5)\u6240\u793a\uff0c\u4e26\u4e14\u5176\u4e2d \u8207 \u5206\u5225\u70ba\u6587\u7ae0\u7b2c\u5e7e\u500b\u53e5\u5b50\u4ee5\u53ca\u7b2c\u5e7e\u500b\u9078\u9805\u3002 \u6700\u5f8c\u900f\u904e\u9810\u6e2c\u5c64\u4f86\u8490\u96c6\u6bcf\u7d44\u9078\u9805\u7684\u8cc7\u8a0a \u4f86\u9810\u6e2c\u6700\u6709\u53ef\u80fd\u662f\u7b54\u6848\u7684\u9078\u9805\u3002QACNN \u7684\u7279 \u8272\u662f\u5c07\u6587\u7ae0\u3001\u554f\u984c\u8207\u9078\u9805\u900f\u904e\u6587\u5b57\u76f8\u4f3c\u5ea6\u7684\u65b9\u5f0f\uff0c\u5c07\u6b64\u4efb\u52d9\u8b8a\u6210\u4e00\u7a2e\u985e\u4f3c\u65bc\u5716\u5f62\u5b78\u7fd2 \u0305 ,</td></tr><tr><td colspan=\"2\">(Pattern Learning)\u7684\u65b9\u5f0f\u4f86\u5448\u73fe\u3002\u56e0\u6b64\uff0cQACNN \u5c64\u4e2d\uff0c\u5229\u7528\u591a\u500b\u6838\u5927\u5c0f(Kernel Size)\u4f86\u53d6</td></tr><tr><td colspan=\"2\">\u5f97\u4e0d\u540c\u5c3a\u5ea6\u7684\u7279\u5fb5\uff0c\u6700\u5f8c\u5229\u7528\u9019\u4e9b\u7279\u5fb5\u4f86\u9032\u884c\u7b54\u6848\u7684\u9810\u6e2c\u3002QACNN \u6a21\u578b\u793a\u610f\u5716\u5982\u5716 1</td></tr></table>", |
| "html": null, |
| "text": "", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>6 8 10</td><td>\u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528 \u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528 \u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528 \u57fa\u65bc\u7279\u5fb5\u7c92\u5ea6\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u4e2d\u6587\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e4b\u61c9\u7528</td><td>\u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87 \u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87 \u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87 7 9 11 \u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87 13 \u7f85\u4e0a\u5821\u8207\u9673\u51a0\u5b87</td></tr><tr><td colspan=\"3\">\u7fd2\u5168\u6587\u4e2d\u55ae\u8a5e\u8207\u55ae\u8a5e\u4e4b\u9593\u7684\u95dc\u4fc2\uff0c\u7531\u65bc\u6c92\u6709\u6642\u9593\u4e0a\u7684\u905e\u8ff4\u95dc\u4fc2\uff0c\u53ef\u4ee5\u628a\u6a21\u578b\u5efa\u7684\u66f4\u6df1\uff0c \u4e5f\u8b93\u8a13\u7df4\u901f\u5ea6\u63d0\u5347 3-13 \u500d\u3001\u63a8\u8ad6\u901f\u5ea6\u63d0\u5347 4-9 \u500d\u3002QANET \u6240\u4f7f\u7528\u7684\u7de8\u78bc\u5668\u90fd\u662f\u57fa\u65bc\u4e00 \u5b9a\u898f\u683c\u4f86\u5efa\u69cb\uff0c\u5982\u5716 3 \u6240\u793a\uff0c\u4e26\u5c0d\u6bcf\u4e00\u5c64\u5377\u7a4d\u5c64\u7684\u6578\u91cf\u9032\u884c\u4fee\u6539\uff0c\u800c\u4e14\u6bcf\u4e00\u5c64\u4e4b\u9593\u7686\u4f7f \u7528\u5c64\u6b63\u898f\u5316(Layer Normalization) (Ba, Kiros & Hinton, 2016)\u8207\u6b98\u5dee\u7db2\u8def(Residual Network) (He, Zhang, Ren & Sun, 2016)\u4f86\u7a69\u5b9a\u8a13\u7df4\u904e\u7a0b\u3002\u9664\u6b64\u4e4b\u5916\uff0cQANET \u5171\u4eab\u6587\u7ae0\u3001\u554f\u984c\u8207\u6a21 \u578b\u7de8\u78bc\u5668\u4e4b\u9593\u7684\u90e8\u5206\u6b0a\u91cd\uff0c\u4ee5\u9054\u5230\u66f4\u52a0\u6cdb\u5316\u4e4b\u6548\u679c\u3002 \u8fd1\u671f BERT(Bidirectional Encoder Representation from Transformers) (Devling, Chang, Lee & Toutanova, 2018)\u7684\u554f\u4e16\u4ee5\u53ca\u5f8c\u7e8c XLNet (Yang et al., 2019)\u5c0d\u65bc BERT \u7684\u6539\u9032\uff0c\u8b93 \u932f\u8aa4\u6240\u9020\u6210\u7684\u6548\u80fd\u6e1b\u640d\uff0c\u8b93\u539f\u672c\u554f\u7b54\u6a21\u578b\u9054\u5230\u66f4\u5177\u5f37\u5065\u6027\u7684\u6548\u679c\u3002 \u7684\u9577\u5ea6\u9650\u5236\u5206\u5225\u70ba 600\u300140 \u8207 40 \u500b\u5b57\u5143\u3002\u5be6\u9a57\u7686\u4ee5 python 3.7.2 \u8207 PyTorch 0.4.1(Paszke et al., 2017)\u5957\u4ef6\u5be6\u73fe\u3002 \u8868 1. 2018 \u79d1\u6280\u5927\u64c2\u53f0\u6578\u64da\u96c6 [Table 1. 2018 Formosa Grand Challenge -Talk to AI Dataset.] \u8a13\u7df4\u96c6 Training Set \u767c\u5c55\u96c6 Development Set \u6e2c\u8a66\u96c6 Test Set 4.2 \u5be6\u9a57\u7d50\u679c (Experimental Results) 4.2.1 \u53e3\u8a9e\u9078\u64c7\u984c\u7684\u5be6\u9a57\u7d50\u679c (Experimental Results of Spoken MCQA) \u6b64\u5be6\u9a57\u7d50\u679c\u4e2d\uff0c\u6211\u5011\u5148\u5448\u73fe QACNN \u4ee5\u53ca Co-Matching \u5728\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9 \u4e4b\u7d50\u679c\uff0c\u6b64\u7d44\u5be6\u9a57\u4e5f\u662f\u672c\u5be6\u9a57\u7684\u57fa\u790e\u7cfb\u7d71\uff0c\u5be6\u9a57\u7d50\u679c\u5982\u8868 3 \u8207\u8868 4 \u6240\u793a\u3002\u7531\u65bc\u6211\u5011\u8a13\u7df4 \u63a5\u4e0b\u4f86\uff0c\u6211\u5011\u5448\u73fe\u672c\u8ad6\u6587\u63d0\u4f9b\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u5404\u5f0f\u6a21\u578b\u7684\u6548\u679c\uff0c\u5982\u8868 5 \u81f3 7 \u6240\u793a\u3002\u6709 \u5225\u65bc\u8868 3 \u8207\u8868 4 \u7684\u8868\u793a\u6cd5\uff0cW \u81f3 W-C-S \u662f\u6307\u5404\u7a2e\u4e0d\u540c\u7684\u8a13\u7df4\u7b56\u7565\u6642\uff0c\u6240\u4f7f\u7528\u7684\u8a5e\u5411\u91cf\u9806 \u5e8f\uff1b\u5728\u65bc\u8868\u683c\u4e2d QACNN[W/C/S]\uff0c\u5247\u662f QACNN \u5728\u9019\u8a13\u7df4\u7b56\u7565\u4e0b\uff0c\u6211\u5011\u4f7f\u7528[W/C/S]\u7684[\u8a5e /\u5b57\u7b26/\u97f3\u7bc0]\u5411\u91cf\u4f5c\u70ba\u8f38\u5165\u6642\uff0c\u5176\u6a21\u578b\u9810\u6e2c\u7684\u6e96\u78ba\u5ea6\u3002\u5176\u4e2d\u7c97\u9ad4\u4ee3\u8868\u7684\u662f\u5c0d\u61c9\u65bc\u8a13\u7df4\u6c7a\u7b56 \u6642\uff0c\u6709\u6240\u4f7f\u7528\u7684\u5411\u91cf\u8868\u793a\uff1b\u7c97\u9ad4\u52a0\u5e95\u7dda\u7684\u6578\u64da\u5247\u662f\u7531\u65bc\u8a13\u7df4\u7b56\u7565\u6709\u5229\u7528\u591a\u500b\u5411\u91cf\u8868\u793a\u4f86 \u8868 6. \u5404\u6a21\u578b\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\u6e2c\u8a66\u96c6 1 \u4e4b\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u5be6\u9a57\u7d50\u679c [Table 6. Experimental Results of the Baseline System in Chinese Spoken Question Answering Test 1 Task with ASR Transcription.] Test 1 W C S W-C W-S C-S W-C-S \u9a57\u65bc\u9019\u7d44\u6e2c\u8a66\u4e0a\uff0c\u662f\u70ba\u4e86\u8b49\u660e\u672c\u8a13\u7df4\u7b56\u7565\u5728\u65bc\u975e\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7684\u6587\u672c\u4e0b\uff0c\u4e5f\u53ef\u4ee5\u8b93 \u6a21\u578b\u5f97\u5230\u4e00\u5b9a\u7a0b\u5ea6\u4e4b\u6539\u5584\u3002 \u8868 8. QANet \u65bc DRCD \u554f\u7b54\u4efb\u52d9\u6e2c\u8a66\u96c6\u4e4b F1 \u5be6\u9a57\u7d50\u679c [Table 8. F1 Experimental Results of the QANet Model in DRCD Test Task.] DRCD 4. \u5be6\u9a57\u8a2d\u5b9a\u8207\u7d50\u679c \u672c\u8ad6\u6587\u9032\u884c\u53e3\u8a9e\u554f\u7b54\u6a21\u578b\u5be6\u9a57\u6642\uff0c\u5c07\u9996\u5148\u900f\u904e\u8a5e\u5411\u91cf\u6280\u8853\u4f86\u8868\u9054\u8fa8\u8b58\u51fa\u4f86\u7684\u6bcf\u500b\u8a5e\u3001\u5b57 \u7b26\u8207\u97f3\u7bc0\uff0c\u4e26\u4e14\u7686\u63a1\u7528\u6279\u8e22\u8e22\u5be6\u696d\u574a(PTT)\u4ee5\u53ca\u4e2d\u592e\u65b0\u805e\u793e(CNA 1 )\u6587\u672c\u4f5c\u70ba\u8a13\u7df4\u8a9e\u6599\uff0c\u7dad \u5ea6\u5927\u5c0f\u7686\u70ba 300 \u7dad\uff0c\u4e26\u5c07\u8a5e\u983b\u5c0f\u65bc 5 \u7684\u8a5e\u5f59\u6368\u53bb\u3002\u5728\u8a13\u7df4\u8a5e\u8207\u5b57\u7b26\u5411\u91cf\u6642\uff0c\u662f\u5229\u7528\u5feb\u6587 (Fasttext)\u5411\u91cf\u6a21\u578b\u4f86\u9032\u884c\u8a13\u7df4\uff0c\u7531\u65bc\u5feb\u6587\u5411\u91cf\u672c\u8eab\u6f14\u7b97\u6cd5\u7684\u512a\u52e2\uff0c\u53ef\u4ee5\u5728\u8a13\u7df4\u8a5e\u5411\u91cf\u6642\uff0c 7,050 1,000 1,500 1,000 \u8868 2. \u53f0\u9054\u96fb\u95b1\u8b80\u7406\u89e3\u8cc7\u6599\u96c6 [Table 2. Delta Reading Comprehension Dataset.] \u6bcf\u500b\u8a5e\u3001\u5b57\u7b26\u4ee5\u53ca\u97f3\u7bc0\u5411\u91cf\u6642\uff0c\u5176\u7dad\u5ea6\u7686\u76f8\u540c\uff0c\u6545\u6b64\u554f\u7b54\u6a21\u578b\u53ef\u4ee5\u4e1f\u5165\u4e0d\u540c\u7684\u8a5e\u5411\u91cf\u4f86 \u9032\u884c\u9810\u6e2c\u3002\u6211\u5011\u9019\u908a\u662f\u4ee5\u6210\u5c0d\u7684\u65b9\u5f0f\u4f86\u9032\u884c\u6578\u64da\u5448\u73fe\uff1b\u4f8b\u5982 C \u5c31\u662f\u8a13\u7df4\u8207\u9810\u6e2c\u6a21\u578b\u6642\u7686 \u53ea\u4f7f\u7528\u5b57\u7b26\u5411\u91cf\u4f86\u9032\u884c\u8a55\u4f30\u3002\u6709\u9451\u65bc\u5728\u6bcf\u500b\u4e0d\u540c\u7684\u53c3\u6578\u8a2d\u5b9a\u4e26\u6c92\u6709\u5f88\u660e\u986f\u7684\u6a21\u578b\u6548\u80fd\u5dee \u7570\uff0c\u6545\u63a1\u7528\u6bcf\u500b\u7cfb\u7d71\u5e73\u5747\u7cbe\u78ba\u5ea6\u7684\u65b9\u5f0f\u4f86\u5448\u73fe\u5be6\u9a57\u7d50\u679c\u3002\u53ef\u4ee5\u767c\u73fe\u5728\u65bc\u5169\u7a2e\u6a21\u578b\u5728\u9a57\u8b49 \u9032\u884c\u8a13\u7df4\uff0c\u4e26\u900f\u904e\u9019\u7a2e\u6a19\u8a18\u65b9\u5f0f\u4f86\u5448\u73fe\uff0c\u9019\u4e9b\u90fd\u5c6c\u65bc\u76f8\u5c0d\u6709\u6240\u4f7f\u7528\u7684\u5411\u91cf\u3002\u6211\u5011\u767c\u73fe\u85c9 QACNN [W] 68.68 68.28 67.57 69.33 68.57 68.16 69.07 W C S W-C W-S C-S W-C-S \u7531\u9019\u7a2e\u8a13\u7df4\u7b56\u7565\uff0c\u76f8\u8f03\u65bc\u55ae\u7528\u55ae\u4e00\u5411\u91cf\u8a13\u7df4 W\u3001C \u8207 S \u7684\u72c0\u6cc1\uff0c\u7576\u5229\u7528\u591a\u500b\u5411\u91cf\u9032\u884c\u8a13 QACNN [C] 68.58 70.73 71.16 71.22 71.30 71.39 71.30 QANet [W] 78.04 24.31 12.76 79.02 75.27 30.11 76.37 \u7df4\u6642\uff0c\u662f\u6709\u52a9\u65bc\u6a21\u578b\u9032\u884c\u9810\u6e2c\u6642\u7684\u6548\u679c\u3002\u5728\u9a57\u8b49\u96c6\u3001\u6e2c\u8a66\u96c6 1 \u8207 2 \u4e4b\u4e2d\uff0c\u5206\u5225\u9054\u5230 1.28%~2.07%\u30010.52%~2.35%\u8207 0.38%~1.84%\u4e4b\u6548\u80fd\u6539\u9032\u3002\u4e0a\u8ff0\u7684\u5be6\u9a57\u7d50\u679c\u4e2d\uff0c\u53ef\u4ee5\u767c\u73fe QACNN [S] 72.19 73.36 74.02 74.05 74.33 74.09 74.54 QANet [C] 57.99 81.61 10.63 83.40 65.33 80.77 82.23 \u5404\u9805\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u4efb\u52d9\u5275\u4e0b\u6700\u65b0\u7d00\u9304\u3002BERT \u900f\u904e\u591a\u5c64\u96d9\u5411\u8f49\u63db\u7de8\u78bc\u5668\u4f86\u8a13\u7df4\u5169\u500b\u7121\u76e3 \u7763\u7684\u9810\u6e2c\u4efb\u52d9\uff0c\u5206\u5225\u70ba\u906e\u63a9\u5f0f\u8a9e\u8a00\u6a21\u578b(Masked Language Model)\u4ee5\u53ca\u4e0b\u4e00\u53e5\u9810\u6e2c(Next \u4e5f\u53ef\u53d6\u5f97\u5b57\u7b26\u5411\u91cf\u8868\u9054\u3002\u97f3\u7bc0\u5411\u91cf\u8a13\u7df4\u5247\u662f\u63a1\u7528\u5168\u5c40\u5411\u91cf\u6a21\u578b(GloVe)\u4f86\u9032\u884c\u8a13\u7df4\u3002 \u8a13\u7df4\u96c6 Training Set \u6e2c\u8a66\u96c6 Test Set \u8207\u6e2c\u8a66\u96c6 1 \u4e0a\uff0c\u90fd\u5448\u73fe S > C > W \u7684\u72c0\u6cc1\u3002\u6211\u5011\u8a8d\u70ba\u9019\u662f\u7576\u6709\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7522\u751f\u7684 QACNN \u8207 Co-Matching \u5169\u500b\u6a21\u578b\uff0c\u5728\u65bc\u4e0d\u540c\u7684\u5411\u91cf\u8f38\u5165\u4e0a\uff0c\u4e26\u6c92\u6709\u5f88\u56b4\u91cd\u7684\u4e0d\u5339\u914d\u60c5 Co-Matching [W] 58.56 49.97 42.97 62.02 60.48 50.15 62.38 QANet [S] 11.56 22.81 76.52 25.58 69.20 74.78 74.00</td></tr><tr><td colspan=\"3\">\u4f86\u5efa\u69cb\uff0c\u7531\u65bc QANET \u4e26\u672a\u4f7f\u7528\u5faa\u74b0\u795e\u7d93 \u7db2\u8def\uff0c\u56e0\u6b64\u5b83\u53ef\u4ee5\u63a1\u7528\u5e73\u884c\u5316\u7684\u8a13\u7df4\u65b9\u5f0f\uff0c\u4f7f\u5f97\u8a13\u7df4\u901f\u5ea6\u8207\u63a8\u8ad6(Reasoning)\u901f\u5ea6\u66f4\u5feb\u3002 \u66f4\u660e\u78ba\u5730\uff0cQANET \u63a1\u7528\u5377\u7a4d\u795e\u7d93\u7db2\u8def\u7372\u53d6\u6587\u672c\u7684\u5c40\u90e8\u7d50\u69cb\uff0c\u800c\u81ea\u6211\u6ce8\u610f\u529b\u6a5f\u5236\u53ef\u4ee5\u5b78 Sentence Prediction)\uff0c\u4e0d\u50c5\u8b93\u6a21\u578b\u6210\u70ba\u9ad8\u5f37\u5065\u6027\u7684\u8868\u5fb5\u5b78\u7fd2\u6cd5\uff0c\u4e26\u4e14\u6b64\u4e00\u6a21\u578b\u50c5\u9700\u85c9\u7531\u7c21 \u6613\u7684\u5fae\u8abf(Fine-tune)\u6a5f\u5236\uff0c\u5373\u53ef\u5728\u5404\u5f0f\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u7684\u4efb\u52d9\u4e0a\u53d6\u5f97\u76f8\u7576\u4eae\u773c\u7684\u6548\u679c(Zhang et al., 2019; \u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u5f80\u5f80\u7531\u65bc\u74b0\u5883\u566a\u97f3\u3001\u8aaa\u8a71\u8005\u53e3\u97f3\u6216\u65b0\u8208\u7684\u8a5e\u5f59\u4e0d\u5b58\u5728\u65bc\u8fad\u5178\u4e2d\u800c\u7121\u6cd5\u88ab\u8fa8 \u8b58\u7b49\u56e0\u7d20\uff0c\u5c0e\u81f4\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7684\u767c\u751f\uff0c\u7576\u8fa8\u8b58\u932f\u8aa4\u767c\u751f\u6642\uff0c\u5f8c\u7e8c\u53c8\u6703\u9020\u6210\u8af8\u5982\u65b7\u8a5e\u932f\u8aa4\u3001 \u8a9e\u610f\u4e0d\u6e05\u6216\u662f\u95dc\u9375\u5b57\u932f\u8aa4\u7b49\u554f\u984c\u3002\u9019\u4e9b\u554f\u984c\u53c8\u90fd\u5c07\u5f71\u97ff\u5f8c\u7e8c\u5404\u5f0f\u57fa\u65bc\u6587\u672c\u7684\u554f\u7b54\u6a21\u578b\uff0c \u9020\u6210\u4efb\u52d9\u6210\u6548\u4e0d\u5f70\u7684\u72c0\u6cc1\u3002\u66f4\u660e\u78ba\u5730\uff0c\u6211\u5011\u4ee5\u4e00\u500b\u5be6\u969b\u7684\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u8cc7\u6599\u70ba\u4f8b\uff0c\u5982\u5716 4 \u6240\u793a\u3002\u5728\u4ee5\u8a5e(Word)\u70ba\u55ae\u4f4d\u7684\u72c0\u6cc1\u4e0b\uff0c\u9078\u9805\u8ddf\u6587\u7ae0\u662f\u767c\u751f\u5b8c\u5168\u4e0d\u5339\u914d(Mismatch)\u7684\u60c5\u6cc1\u3002 \u503c\u5f97\u6ce8\u610f\u7684\u662f\uff0c\u56e0\u70ba\u9078\u9805\u6c92\u6709\u524d\u5f8c\u6587\u8cc7\u8a0a\uff0c\u800c\u4e14\u901a\u5e38\u90fd\u662f\u7c21\u77ed\u7684\u95dc\u9375\u5b57\u6240\u7d44\u6210\uff0c\u56e0\u6b64\u5c0d \u65bc\u8a9e\u97f3\u8fa8\u8b58\u800c\u8a00\uff0c\u5f88\u96e3\u5229\u7528\u8a9e\u8a00\u6a21\u578b\u7b49\u6280\u8853\u8b93\u8f38\u51fa\u7684\u7d50\u679c\u8b8a\u5f97\u66f4\u597d\uff0c\u4e5f\u56e0\u6b64\u5728\u9019\u500b\u4f8b\u5b50 \u4e2d\uff0c\u8fa8\u8b58\u51fa\u300e\u5ee2\u300f\u8207\u300e\u525b\u624d\u300f\u9019\u5169\u500b\u5728\u55ae\u5b57\u8a5e\u8a9e\u8a00\u6a21\u578b(Unigram)\u4e2d\u6a5f\u7387\u76f8\u5c0d\u8f03\u9ad8\u7684\u55ae\u8a5e\u3002 \u82e5\u6211\u5011\u5c07\u8fa8\u8b58\u7684\u7d50\u679c\u4ee5\u5b57\u7b26(Character)\u5448\u73fe\uff0c\u5247\u9078\u9805\u8207\u6587\u7ae0\u6703\u5339\u914d\u5230\u300e\u5ee2\u300f\u9019\u500b\u5b57\u7b26\uff0c \u300e\u525b \u624d\u300f\u8207\u6587\u7ae0\u4e2d\u6240\u8fa8\u8b58\u51fa\u7684\u300e\u92fc \u6750\u300f\u540c\u6a23\u662f\u5b8c\u5168\u4e0d\u5339\u914d\u7684\u72c0\u6cc1\uff0c\u4e26\u4e14\u660e\u986f\u5730\uff0c\u9019\u4e9b\u5b57\u7b26\u5728 \u8a9e\u610f\u4e0a\u5e7e\u4e4e\u662f\u4e0d\u76f8\u95dc\u7684\u3002\u6700\u5f8c\uff0c\u5118\u7ba1\u8fa8\u8b58\u7d50\u679c\u5728\u8a5e\u8207\u5b57\u7b26\u7684\u8868\u73fe\u4e0a\u76f8\u7576\u4e0d\u7406\u60f3\uff0c\u4f46\u82e5\u6211 \u5011\u9032\u4e00\u6b65\u5730\u5c07\u8fa8\u8b58\u7d50\u679c\u4ee5\u97f3\u7bc0(Syllable)\u5448\u73fe\uff0c\u53cd\u800c\u53ef\u4ee5\u51fa\u73fe\u5b8c\u5168\u5339\u914d\u7684\u60c5\u6cc1\uff01\u4e5f\u5c31\u662f\u8aaa \u7406\u60f3\u60c5\u6cc1\u4e0a\uff0c\u4ee5\u8a5e\u70ba\u55ae\u4f4d\u7684\u8868\u793a\u6642\uff0c\u662f\u53ef\u4ee5\u660e\u78ba\u4e14\u6e05\u695a\u7684\u8868\u9054\u8a9e\u610f\u7684\u8cc7\u8a0a\uff0c\u5728\u4ee5\u5b57\u6216\u97f3 \u7bc0\u70ba\u55ae\u4f4d\u7684\u8868\u793a\u6642\uff0c\u8a9e\u610f\u662f\u8f03\u4e0d\u6e05\u695a\u7684\uff1b\u4f46\u662f\u5728\u8a9e\u97f3\u8fa8\u8b58\u7d50\u679c\u88e1\uff0c\u96d6\u7136\u4ee5\u8a5e\u70ba\u55ae\u4f4d\u770b\u4f3c \u51fa\u4e00\u7a2e\u540c\u6642\u4f7f\u7528\u4e0d\u540c\u7279\u5fb5\u7c92\u5ea6(\u5373\u8a5e\u3001\u5b57\u8207\u97f3\u7b26)\u7684\u8a13\u7df4\u7b56\u7565\uff0c\u4ee5\u9054\u5230\u6539\u5584\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58 \u7684\u8cc7\u6599\u90fd\u4ee5\u6b63\u78ba\u6587\u672c\u4f86\u9032\u884c\u5be6\u9a57\uff0c\u7406\u61c9\u4e0d\u5305\u542b\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7684\u554f\u984c\uff0c\u6240\u4ee5\u672c\u8ad6\u6587\u5be6 Co-Matching 59.88 73.26 72.90 62.56 71.83 73.22 39.17 42.01 40.64 \u7684\u8a13\u7df4\u7b56\u7565\u4f86\u8a13\u7df4\uff0c\u4e26\u63a1\u7528\u5b57\u7b26\u5411\u91cf\u4f5c\u70ba\u8f38\u5165\uff0c\u53ef\u5f97\u5230\u6700\u597d\u4e4b\u6548\u679c\u3002\u503c\u5f97\u6ce8\u610f\u7684\u662f\uff0cDRCD \u6709\u8a9e\u610f\u8cc7\u8a0a\u7684\uff0c\u537b\u4f3c\u4e4e\u53ef\u4ee5\u514b\u670d\u4e00\u4e9b\u8a9e\u97f3\u8fa8\u8b58\u6240\u9020\u6210\u7684\u932f\u8aa4\u554f\u984c\u3002\u6709\u9451\u65bc\u6b64\uff0c\u672c\u8ad6\u6587\u63d0 QACNN 64.76 70.42 70.54 80.14 82.58 82.26 41.67 42.25 42.53 \u4e00\u5b9a\u4e4b\u6548\u80fd\u3002\u56e0\u6b64\uff0cQANet \u5728\u672c\u8ad6\u6587\u6240\u63d0\u51fa\u7684\u8a13\u7df4\u7b56\u7565\u4e2d\uff0c\u6700\u9069\u5408\u4e4b\u7d44\u5408\u70ba\u900f\u904e W-C \u800c\u7576\u6211\u5011\u5c07\u8fa8\u8b58\u7684\u7d50\u679c\u8f49\u63db\u6210\u5b57\u7b26\u6216\u97f3\u7bc0\u7684\u8868\u793a\u6642\uff0c\u96d6\u7136\u8a9e\u610f\u8cc7\u8a0a\u8f03\u70ba\u8584\u5f31\uff0c\u751a\u81f3\u662f\u6c92 \u6279\u6b21(Batch)\u5927\u5c0f\u5206\u5225\u70ba 32 \u8207 4\uff0c\u7e3d\u671f\u6b21\u6578\u5206\u5225\u70ba 13 \u8207 80\u3002\u5c0d\u65bc\u6587\u7ae0\u3001\u554f\u984c\u8207\u6bcf\u500b\u9078\u9805 \u7d66\u6a21\u578b\uff0c\u4e26\u65bc\u5be6\u9a57\u7d50\u679c\u4e2d\u5448\u73fe\u4e26\u63a2\u8a0e\u3002 W C S W C S W C S \u5728 W-S \u7684\u8a13\u7df4\u7b56\u7565\u4e0a\uff0c\u5176\u6548\u80fd\u6bd4\u5176\u4ed6\u8a13\u7df4\u7b56\u7565\u90fd\u4f4e\u4e4b\u5916\uff0c\u537b\u53ef\u4ee5\u5728\u8f38\u5165\u5b57\u7b26\u5411\u91cf\u6642\uff0c\u6709 \u6709\u8f03\u660e\u78ba\u7684\u8a9e\u610f\u8cc7\u8a0a\uff0c\u4f46\u5f80\u5f80\u53d7\u5230\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7684\u5f71\u97ff\uff0c\u53ef\u80fd\u7372\u5f97\u7684\u662f\u932f\u8aa4\u7684\u5b57\u8a5e\u8cc7\u8a0a\uff0c \u5716 4. \u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u6587\u672c\u8207\u7279\u5fb5\u5dee\u7570\u4e4b\u793a\u610f\u5716 [Fiqure 4\u70ba\u4e86\u6539\u5584\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u5728\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\u4e2d\u7684\u5f71\u97ff\uff0c\u672c\u7814\u7a76\u63d0\u51fa\u4e00\u5957\u5229\u7528\u8a13\u7df4\u8cc7\u6599\u4e2d \u4e0d\u540c\u7c92\u5ea6\u7684\u7279\u5fb5\u4e4b\u8a13\u7df4\u7b56\u7565\u3002\u66f4\u660e\u78ba\u5730\uff0c\u6211\u5011\u5c07\u5229\u7528\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u7cfb\u7d71\u8f38\u51fa\u7684\u6587\u672c \uff0c \u9032\u4e00\u6b65\u5730\u8f49\u63db\u6210\u4e09\u7a2e\u8868\u793a\u6cd5\uff0c\u5206\u5225\u662f\u4ee5\u8a5e\u70ba\u55ae\u4f4d , , \u22ef , \u3001\u4ee5\u5b57\u7b26\u70ba\u55ae\u4f4d , , \u22ef , \u548c\u4ee5\u97f3\u7bc0\u70ba\u55ae\u4f4d , , \u22ef , \uff0c\u5176\u4e2d \u3001 \u8207 \u5206\u5225\u4ee3\u8868\u6587\u672c \u88ab\u8868 \u793a\u6210\u8a5e\u3001\u5b57\u7b26\u8207\u97f3\u7bc0\u6642\u7684\u9577\u5ea6\uff0c\u4e26\u4e14\u901a\u5e38 \uff0c\u5982\u5716 5 \u6240\u793a\u3002\u5728\u9078\u64c7\u984c\u5f62\u5f0f\u7684\u53e3 \u8a9e\u554f\u7b54\u4e2d\uff0c\u6587\u672c \u5247\u5305\u542b\u6587\u7ae0 \u3001\u554f\u984c \u8207\u9078\u9805 \uff0c\u56e0\u6b64\u6211\u5011\u53ef\u4ee5\u9032\u4e00\u6b65\u7684\u5c07\u6587\u7ae0\u4ee5\u8a5e\u3001 \u5b57\u7b26\u4ee5\u53ca\u97f3\u7bc0\u5206\u5225\u8868\u793a\u70ba \u3001 \u8207 \uff1b\u540c\u6a23\u7684\uff0c\u554f\u984c\u8207\u9078\u9805\u4e5f\u53ef\u4ee5\u5206\u5225\u8868\u793a\u70ba \u3001 \u3001 \u8207 \u3001 \u3001 \u3002\u63a5\u8457\uff0c\u672c\u8ad6\u6587\u63d0\u51fa\u7684\u8a13\u7df4\u7b56\u7565\u662f\u8f2a\u6d41\u4f7f\u7528\u4e0d\u540c\u7c92\u5ea6\u7684\u8cc7\u6599\u4f86\u9032\u884c\u540c \u4e00\u500b\u554f\u7b54\u6a21\u578b\u7684\u53c3\u6578\u66f4\u65b0\u8207\u8a13\u7df4\uff0c\u5982\u5716 6 \u6240\u793a\u3002 \u5716 6. \u5148\u8a5e\u518d\u5b57\u7b26\u5f8c\u97f3\u7bc0(W-C-S)\u4e4b\u8a13\u7df4\u7b56\u7565\u65bc\u9078\u64c7\u984c\u554f\u7b54\u6a21\u578b\u793a\u610f\u5716 \u6587\u672c\u6578 \u554f\u984c\u6578 \u6587\u672c\u6578 \u6642\u5019\uff0c\u5c0e\u81f4\u55ae\u7528\u8a5e\u5411\u91cf\u6703\u6709\u8a9e\u610f\u4e0d\u6e05\u7684\u60c5\u6cc1\uff1b\u7576\u900f\u904e\u5b57\u7b26\u5411\u91cf\u4f86\u8a13\u7df4\u6a21\u578b\uff0c\u96d6\u4e5f\u6709\u8a9e\u610f \u6cc1\u3002\u5c31\u50cf\u662f\u53ea\u8a13\u7df4\u65bc\u8a5e\u5411\u91cf\u7684\u554f\u7b54\u6a21\u578b\uff0c\u5c0d\u65bc\u4ee5\u5b57\u7b26\u8207\u97f3\u7bc0\u5411\u91cf\u4f5c\u70ba\u9810\u6e2c\u8cc7\u6599\u7684\u8f38\u5165\uff0c Co-Matching [C] 63.35 72.32 61.24 73.38 65.31 73.36 73.21 \u554f\u984c\u6578 4.1.2 \u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u7cfb\u7d71 (Automatic Speech Recognition System) \u5728\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u7cfb\u7d71\u4e0a\uff0c\u6211\u5011\u57fa\u65bc Kaldi (Povey et al., 2011)\u5de5\u5177\u5305\uff0c\u8072\u5b78\u6a21\u578b\u662f\u57fa\u65bc\u795e \u7d93\u7db2\u8def\u7684 TDNN-F (Povey et al., 2018)\u7cfb\u7d71\uff0c\u900f\u904e Lattice-free MMI (Povey et al., 2016)\u548c sMBR (Vesely, Ghoshal, Burget & Povey, 2013)\u6280\u8853\u4f86\u9032\u884c\u6a21\u578b\u8a13\u7df4\u3002\u6211\u5011\u7684\u8072\u5b78\u6a21\u578b\u8a13 5,014 26,936 1,000 3,524 4.1.4 \u6587\u672c\u6bb5\u7684\u554f\u7b54\u6a21\u578b (Answer-span Prediction Question Answering Model) \u4e0d\u6e05\u7684\u72c0\u6cc1\uff0c\u4f46\u662f\u76f8\u8f03\u65bc\u8a5e\u5411\u91cf\u6703\u6709\u6a5f\u6703\u627e\u51fa\u91cd\u8981\u7684\u5b57\u7b26\uff0c\u800c\u4e0d\u662f\u50c5\u4f9d\u9760\u8a5e\u4f86\u9032\u884c\u5224\u65b7\u3002 \u55ae\u8a13\u7df4\u5728\u97f3\u7bc0\u5411\u91cf\u6642\uff0c\u5176\u5be6\u9a57\u7d50\u679c\u7686\u8f03\u8a5e\u5411\u91cf\u8207\u5b57\u7b26\u5411\u91cf\u4f73\uff0c\u53ef\u80fd\u7684\u539f\u56e0\u70ba\u5c0d\u65bc QACNN \u4f86\u8aaa\uff0c\u4e0d\u9700\u8981\u592a\u904e\u5c08\u6ce8\u65bc\u8a5e\u5411\u91cf\u7684\u8868\u5fb5\u4e0a\uff0c\u800c\u662f\u770b\u6bcf\u500b\u8a5e\u8207\u8a5e\u4e4b\u9593\u7684\u76f8\u4f3c\u7a0b\u5ea6\u4f86\u9032\u884c\u5b78 \u7fd2\uff0c\u56e0\u6b64\u5229\u7528\u97f3\u7bc0\u9019\u7a2e\u4e00\u5c0d\u591a\u7684\u95dc\u4fc2\u4f86\u7d66\u8868\u793a\u76f8\u4f3c\u7a0b\u5ea6\u6216\u8a31\u662f\u6709\u52a9\u76ca\u7684\uff1b\u53cd\u904e\u4f86\u8aaa\uff0c \u4e5f\u80fd\u6709\u4e00\u5b9a\u4e4b\u6548\u80fd\u3002\u5f9e\u53e6\u5916\u4e00\u500b\u89d2\u5ea6\u4f86\u89c0\u5bdf\uff0c\u53ef\u4ee5\u767c\u73fe\u5728\u5927\u90e8\u5206\u7684\u6578\u64da\u4e2d\uff0c\u4e0d\u7ba1\u662f\u63a1\u7528 \u4f55\u7a2e\u8a13\u7df4\u7b56\u7565\u4e0b\uff0c\u63a1\u7528\u97f3\u7bc0\u5411\u91cf\u9032\u884c\u8f38\u5165\u6642\uff0c\u901a\u5e38\u6703\u53d6\u5f97\u6700\u597d\u7684\u7d50\u679c\uff0c\u6211\u5011\u8a8d\u70ba\u9019\u662f\u56e0 \u70ba\u5728\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u7684\u6587\u672c\u4e0a\uff0c\u97f3\u7bc0\u5c0d\u65bc QACNN \u8207 Co-Matching \u4f86\u8aaa\uff0c\u662f\u6bd4\u8f03\u6709\u5229 \u65bc\u9810\u6e2c\u7684\u3002\u53cd\u4e4b\u5728\u6b63\u78ba\u6587\u672c\u4e0a\uff0c\u5247\u662f\u63a1\u7528\u5b57\u7b26\u5411\u91cf\u6216\u97f3\u7bc0\u5411\u91cf\u6642\uff0c\u6703\u5f97\u5230\u5927\u90e8\u5206\u6700\u597d\u7684 Co-Matching [S] 66.43 68.65 74.86 66.92 74.88 75.26 74.98 \u8868 7. \u5404\u6a21\u578b\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\u6e2c\u8a66\u96c6 2 \u4e4b\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u5be6\u9a57\u7d50\u679c DRCD W C S W-C W-S C-S W-C-S [Fiqure 6\u66f4\u660e\u78ba\u5730\uff0c\u4ee5\u5716 6 \u70ba\u4f8b\uff0c\u6211\u5011\u6b32\u8a13\u7df4\u4e00\u500b\u9078\u64c7\u984c\u5f62\u5f0f\u7684\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\uff0c\u56e0\u6b64\u6211\u5011\u9996 \u5148\u5c07\u6240\u6709\u8a13\u7df4\u8cc7\u6599(\u5305\u542b\u6587\u7ae0\u3001\u554f\u984c\u8207\u9078\u9805)\u8f49\u63db\u6210\u4ee5\u8a5e\u70ba\u55ae\u4f4d , , \u3001\u4ee5\u5b57\u7b26\u70ba \u55ae\u4f4d , , \u548c\u4ee5\u97f3\u7bc0\u70ba\u55ae\u4f4d , , \u7684\u8868\u793a\u6cd5\u3002\u63a5\u8457\uff0c\u9019\u500b\u554f\u7b54\u6a21\u578b\u5c07\u9996\u5148\u4ee5\u8a5e \u70ba\u55ae\u4f4d\u7684\u8cc7\u6599(\u5373 , , )\u9032\u884c\u8a13\u7df4\uff0c\u518d\u4ee5\u5b57\u7b26\u70ba\u55ae\u4f4d\u7684\u6578\u64da(\u5373 , , )\u4f86 \u7d66\u4e88\u6a21\u578b\u9032\u884c\u8a13\u7df4\uff1b\u6700\u5f8c\u4f9d\u7167\u540c\u6a23\u6b65\u9a5f\uff0c\u5c07\u4ee5\u97f3\u7bc0\u70ba\u55ae\u4f4d\u7684\u8868\u793a\u6cd5(\u5373 \u7df4\u8a9e\u6599\u662f\u63a1\u7528 NER-Trs-Vol1~3 \u8207 MATBN (Wang, Chen, Kuo & Cheng, 2005)\u3002\u8a9e\u8a00\u6a21\u578b \u70ba\u4e09\u9023\u8a9e\u8a00\u6a21\u578b(Trigram Language Model)\u4e26\u4ee5 Kneser-Ney \u5e73\u6ed1\u5316\u6280\u8853\u4f86\u89e3\u6c7a\u7a00\u758f\u6578\u64da \u7684\u554f\u984c\u3002\u6700\u5f8c\u6b64\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u7cfb\u7d71\u7684\u5b57\u7b26\u932f\u8aa4\u7387(Character Error Rate)\u70ba 7.79%\u3002 \u9664\u4e86\u9078\u64c7\u984c\u5f0f\u7684\u554f\u7b54\u7cfb\u7d71\u5916\uff0c\u672c\u8ad6\u6587\u4ea6\u9032\u884c\u4ee5\u6587\u672c\u6bb5\u5f62\u5f0f\u7684\u554f\u7b54\u6a21\u578b\u5be6\u9a57\uff0c\u4ee5\u9032\u4e00\u6b65\u5730 \u9a57\u8b49\u672c\u8ad6\u6587\u63d0\u51fa\u4e4b\u65b9\u6cd5\u5728\u4e0d\u540c\u5f62\u5f0f\u7684\u554f\u7b54\u7cfb\u7d71\u4e2d\uff0c\u7686\u6709\u5176\u6548\u7528\u3002\u6211\u5011\u4f7f\u7528\u53f0\u9054\u96fb\u95b1\u8b80\u7406 \u89e3\u6578\u64da\u96c6(Delta Reading Comprehension Dataset, DRCD) (Shao, Liu, Lai, Tseng & Tsai, 2018)\u9032\u884c\u4e2d\u6587\u6587\u672c\u6bb5\u554f\u7b54\u6a21\u578b\u4e4b\u5be6\u9a57\uff0c\u5176\u8a13\u7df4\u96c6\u8207\u6e2c\u8a66\u96c6\u4e4b\u7d71\u8a08\u8cc7\u8a0a\u5982\u8868 2 \u6240\u793a\u3002\u5176\u4e2d Co-Matching \u662f\u5c0d\u8f38\u5165\u7684\u6587\u5b57\u9032\u884c\u7de8\u78bc\u5668\u5f8c\uff0c\u518d\u53d6\u5f97\u5339\u914d\u7279\u5fb5\u9032\u884c\u6240\u9700\u4e4b\u6d41\u7a0b\uff0c\u5728\u5be6\u9a57 \u7d50\u679c\u3002 QANet [W] 64.21 10.12 2.31 64.89 60.60 13.16 60.13 Test 2 \u4e0a\uff0c\u5728\u9a57\u8b49\u96c6\u8207\u6e2c\u8a66\u96c6 2 \u4e2d\u4e26\u7121\u660e\u986f\u6548\u76ca\uff0c\u800c\u6e2c\u8a66\u96c6 1 \u662f\u542b\u6709\u566a\u97f3\u7684\u5e72\u64fe\u554f\u984c\uff0c\u6240\u4ee5\u7576 \u8868 5. \u5404\u6a21\u578b\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\u9a57\u8b49\u96c6\u4e4b\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u5be6\u9a57\u7d50\u679c W C S W-C W-S C-S W-C-S QANet [C] 35.79 69.83 1.89 74.40 43.36 68.97 70.30 \u63a1\u7528\u97f3\u7bc0\u7279\u5fb5\u7d66\u4e88\u6a21\u578b\u5b78\u7fd2\u6642\uff0c\u6703\u6709\u6bd4\u8f03\u597d\u7684\u7d50\u679c\u3002\u5728\u6b63\u78ba\u8f49\u5beb\u6587\u672c\u7684\u5be6\u9a57\u7d50\u679c\u4e0a\uff0c\u76f8 \u8f03\u65bc\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u7684\u7d50\u679c\u6709\u76f8\u7576\u986f\u8457\u7684\u63d0\u5347\uff0c\u4e26\u4e14\u5728\u4e0d\u540c\u7c92\u5ea6\u7684\u8a5e\u5411\u91cf\u7684\u5dee\u8ddd\u53cd\u800c\u8b8a\u5c0f\u3002 QACNN [W] 39.07 40.19 39.35 40.24 40.31 39.35 QANet [S] 1.06 3.82 59.18 5.80 48.80 57.79 55.43 39.65 , , )\u7d66 \u4e88\u554f\u7b54\u6a21\u578b\u9032\u884c\u5b78\u7fd2\u3002\u63db\u53e5\u8a71\u8aaa\uff0c\u6211\u5011\u63d0\u51fa\u4f9d\u7167\u5148\u8a5e\u518d\u5b57\u7b26\u5f8c\u97f3\u7bc0(\u5373 W-C-S)\u7684\u9806\u5e8f\uff0c \u5faa\u74b0\u5730\u8a13\u7df4\u540c\u4e00\u7d44\u554f\u7b54\u6a21\u578b\u7684\u53c3\u6578\uff0c\u76f4\u5230\u6a21\u578b\u6536\u6582\u70ba\u6b62\u3002\u5fc5\u9808\u6ce8\u610f\u7684\u662f\uff0c\u5728\u8a13\u7df4\u4e0d\u540c\u7c92 \u5ea6\u7684\u8cc7\u6599\u6642\uff0c\u662f\u5c0d\u6574\u500b\u6578\u64da\u96c6\u9032\u884c\u6a21\u578b\u66f4\u65b0\u5f8c\uff0c\u518d\u63db\u4e0b\u4e00\u500b\u7c92\u5ea6\u7684\u8cc7\u6599\u9032\u884c\u5b78\u7fd2\uff0c\u800c\u4e0d \u662f\u4e00\u7b46\u8a13\u7df4\u8cc7\u6599\u4ee5\u4e09\u7a2e\u4e0d\u540c\u7684\u8868\u793a\u6cd5\u5206\u5225\u5b78\u4e00\u6b21\u5f8c\uff0c\u624d\u63db\u4e0b\u4e00\u7b46\u8cc7\u6599\u3002\u56e0\u6b64\uff0c\u5728\u4f7f\u7528\u6b64 \u4e00\u8a13\u7df4\u7b56\u7565\u6642\uff0c\u8a5e\u3001\u5b57\u7b26\u8207\u97f3\u7bc0\u7684\u5411\u91cf\u8868\u793a\u6cd5\u7dad\u5ea6\u61c9\u8a2d\u70ba\u76f8\u540c\uff0c\u4ee5\u5229\u65bc\u6a21\u578b\u5171\u4eab\u53c3\u6578\u9032 \u884c\u5b78\u7fd2\u3002 \u6e05\u695a\u7684\u8a9e\u610f\u8cc7\u8a0a\u7684\u8a5e\u5c64\u6b21\u8868\u793a\u6cd5\u6216\u662f\u6709\u8f03\u591a\u5339\u914d\u7684\u55ae\u5143\u4f46\u8a9e\u610f\u8cc7\u8a0a\u8584\u5f31(\u751a\u81f3\u6c92\u6709)\u7684 \u5b57\u7b26\u6216\u97f3\u7bc0\u7c92\u5ea6\uff0c\u4f86\u6a21\u64ec\u53d7\u5230\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4\u5f71\u97ff\u7684\u53e3\u8a9e\u554f\u7b54\u7cfb\u7d71\uff0c\u4ee5\u9054\u5230\u6539\u5584\u81ea\u52d5\u8a9e\u97f3 235.3\u300116.7 \u8207 4.1 \u500b\u5b57\u5143\u3002\u6b64\u5be6\u9a57\u63a1\u7528\u6b63\u78ba\u7387\u505a\u70ba\u8a55\u4f30\u6307\u6a19\u3002\u5728 QACNN \u4e2d\uff0c\u96b1\u85cf\u5c64\u5927 \u5c0f\u8a2d\u5b9a\u5206\u5225\u70ba 32\u300164 \u8207 128\uff0c\u5377\u7a4d\u5c64\u7684\u6838\u5927\u5c0f\u4e5f\u5206\u6210[1,2,3]\u3001[1,3,5]\u3001[1,3,7]\u8207[1,4,7]\u4f86 \u9032\u884c\u5be6\u9a57\u3002\u5728 Co-matching \u4e0a\uff0c\u96b1\u85cf\u5c64\u5927\u5c0f\u5247\u8a2d\u5b9a\u70ba 64 \u8207 128\u3002\u4ee5\u4e0a\u5169\u7a2e\u6a21\u578b\u4e1f\u5931\u7387 (Dropout Ratio)\u7686\u70ba 0.2\uff0c\u4e26\u90fd\u63a1\u7528 Adam \u512a\u5316\u5668(Kingma & Ba, 2014)\u4e14\u5b78\u7fd2\u7387\u70ba 0.001\u3001 \u5e8f\u4f86\u7d66\u4e88\u6a21\u578b\u9032\u884c\u5b78\u7fd2\uff1b\u50cf\u662f C-S \u5247\u662f\u4ee3\u8868\u5728\u8a13\u7df4\u7b56\u7565\u4e2d\uff0c\u6703\u8b93\u6a21\u578b\u5148\u8a13\u7df4\u65bc\u5b57\u7b26\u5411\u91cf \u5f8c\u518d\u7d66\u4e88\u97f3\u7bc0\u5411\u91cf\u4f86\u9032\u884c\u5b78\u7fd2\u3002\u9806\u5e36\u4e00\u63d0\u7684\u662f\uff0c\u672c\u8ad6\u6587\u63d0\u51fa\u7684\u8a13\u7df4\u7b56\u7565\uff0c\u662f\u61c9\u7528\u4e0d\u540c\u7684 \u6578\u64da\u7c92\u5ea6\u7684\u8cc7\u6599\u5448\u73fe\u4f86\u8a13\u7df4\u6a21\u578b\uff0c\u4ee5\u9054\u5230\u66f4\u597d\u7684\u6a21\u578b\u6548\u80fd\uff0c\u4e26\u4e0d\u662f\u5c08\u6ce8\u65bc\u5982\u4f55\u8b93\u4e0d\u540c\u7684 \u6578\u64da\u7279\u5fb5\u540c\u6642\u8f38\u5165\u7d66\u7db2\u8def\uff0c\u56e0\u6b64\u5728\u5be6\u969b\u9810\u6e2c\u6642\uff0c\u53ef\u4ee5\u900f\u904e\u4e0d\u540c\u6578\u64da\u7c92\u5ea6\u7684\u8a5e\u5411\u91cf\u4f86\u8f38\u5165 \u8868 4. \u5404\u6a21\u578b\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\u57fa\u790e\u7cfb\u7d71\u4e4b\u6b63\u78ba\u6587\u672c\u5be6\u9a57\u7d50\u679c Validation Test 1 W-C \u8207 W-S \u4e0a\uff0c\u537b\u767c\u73fe\u4e86\u4e0d\u540c\u7684\u60c5\u6cc1\uff1a\u5728 W-C \u7684\u8a13\u7df4\u7b56\u7565\u4e0a\uff0c\u662f\u7b26\u5408\u672a\u5339\u914d\u7684\u60c5\u6cc1\uff1b Test 2 \u6c92\u5b78\u7fd2\u904e\u7684\u5411\u91cf\u8868\u793a\u4e0a\uff0c\u6709\u5225\u65bc\u9078\u64c7\u984c\u5f62\u5f0f\u7684\u7d50\u679c\uff0c\u6703\u6709\u56b4\u91cd\u7684\u4e0d\u5339\u914d\u554f\u984c\u7522\u751f\uff0c\u4f46\u5728 Answering Task with Manual Transcription.] \u672c\u6bb5\u554f\u7b54\u6a21\u578b\u7684\u57fa\u790e\u7cfb\u7d71\uff0c\u4e26\u5176\u9918\u8868\u683c\u8a2d\u5b9a\u7686\u8207\u8868\u4e94\u81f3\u8868\u4e03\u76f8\u95dc\u63cf\u8ff0\u76f8\u540c\u3002\u53ef\u4ee5\u767c\u73fe\u5728 [Table 4. Experimental Results of the Baseline System in Chinese Spoken Question \u8207 EM \u8a55\u4f30\u7d50\u679c\uff0c\u5be6\u9a57\u7d50\u679c\u5982\u8868 8 \u8207\u8868 9 \u6240\u793a\u3002\u540c\u6a23\u5730\uff0cW\u3001C\u3001S \u4e2d\u7c97\u9ad4\u90e8\u5206\uff0c\u5373\u70ba\u6587 \u8fa8\u8b58\u932f\u8aa4\u6240\u9020\u6210\u7684\u6548\u80fd\u640d\u5931\uff0c\u8b93\u53e3\u8a9e\u554f\u7b54\u6a21\u578b\u9054\u5230\u66f4\u5177\u5f37\u5065\u6027\u7684\u6548\u679c\u3002 \u6e2c\u8a66\u96c6\u5247\u5206\u70ba\u5169\u7a2e\uff0c\u7b2c\u4e00\u7a2e\u6e2c\u8a66\u96c6\u662f\u6709\u6587\u8a00\u6587\u5167\u5bb9\u7684\u97f3\u8a0a\uff0c\u7b2c\u4e8c\u7a2e\u5247\u662f\u6c7a\u8cfd\u7684\u984c\u76ee\u4ee5\u4e2d \u9ad8\u968e\u83ef\u8a9e\u6587\u6e2c\u9a57\u70ba\u4e3b\u3002\u95dc\u65bc\u6bcf\u500b\u97f3\u6a94\u7684\u8cc7\u8a0a\uff0c\u5176\u6587\u7ae0\u3001\u554f\u984c\u8207\u9078\u9805\u5e73\u5747\u5206\u5225\u70ba 22,538\u3001 2,230 \u8207 5,904 \u500b\u5e40(Frame)\uff1b\u5728\u6b63\u78ba\u6587\u672c(Manual Transcriptions)\u4e0a\uff0c\u6bcf\u7d44\u984c\u76ee\u5e73\u5747\u5206\u5225\u70ba C(\u5b57\u7b26)\u8207 S(\u97f3\u7bc0)\u4f86\u5448\u73fe\uff0c\u5206\u5225\u4ee3\u8868\u8f38\u5165\u8a5e\u5411\u91cf\u3001\u5b57\u7b26\u5411\u91cf\u6216\u97f3\u7bc0\u5411\u91cf\u4f86\u8a13\u7df4\u6a21\u578b\u3002\u9664 \u4e86\u57fa\u790e\u7cfb\u7d71\u7684\u8a2d\u5b9a\u5916\uff0c\u9a57\u8b49\u672c\u8ad6\u6587\u63d0\u51fa\u7684\u65b9\u6cd5\u5171\u6709\u56db\u7a2e\u8a13\u7df4\u7b56\u7565\u65b9\u5f0f\uff0c\u5206\u5225\u70ba W-C\u3001 W-S\u3001C-S \u8207 W-C-S \u8a13\u7df4\u4ee3\u865f\u4f86\u5448\u73fe\uff0c\u5206\u5225\u4ee3\u8868\u9032\u884c\u8a13\u7df4\u6642\uff0c\u6703\u4f9d\u7167\u5176\u6240\u6307\u5b9a\u7684\u8a13\u7df4\u9806 QACNN Co-Matching 54.00 64.02 65.05 58.56 72.32 74.86 38.14 39.62 39.20 Co-Matching [S] 59.51 61.43 65.05 61.37 66.23 66.60 \u6211\u5011\u5448\u73fe QANet \u5728 DRCD \u8cc7\u6599\u96c6\u4e4b\u6210\u6548\uff0c\u4e26\u5305\u542b\u5404\u5f0f\u8a13\u7df4\u7b56\u7565\u8207\u4e0d\u540c\u8a5e\u5411\u91cf\u8f38\u5165\u4e4b F1 66.44 Answering) 56.67 63.59 64.78 68.68 70.73 74.02 39.07 39.45 39.53 Co-Matching [C] 59.84 64.02 57.66 64.19 61.80 64.72 65.11 4.2.2 \u6587\u672c\u6bb5\u7684\u5be6\u9a57\u7d50\u679c (Experimental Results of Answer-span Question \u7e3d\u7d50\u4f86\u8aaa\uff0c\u672c\u8ad6\u6587\u63d0\u51fa\u7684\u6a21\u578b\u8a13\u7df4\u7b56\u7565\u662f\u671f\u671b\u85c9\u7531\u5b78\u7fd2\u4e0d\u540c\u7c92\u5ea6\u7684\u8868\u793a\u6cd5\uff0c\u4f8b\u5982\u6709 4.12 )\u7684\u6bd4\u8cfd \u6bcf\u500b\u984c\u76ee\u90fd\u662f\u7531\u4e00\u6bb5\u6587\u7ae0\u8207\u554f\u984c\u6240\u7d44\u6210\uff0c\u5176\u7b54\u6848\u70ba\u6b64\u6587\u7ae0\u7684\u4e00\u5c0f\u6bb5\u843d\u4f5c\u70ba\u7b54\u6848\u3002\u6211\u5011\u63a1 \u7528 F1 \u8207 EM \u5206\u6578\u4f5c\u70ba\u8a55\u4f30\u6307\u6a19\u3002\u5728 QANet \u4e2d\uff0c\u96b1\u85cf\u5c64\u5927\u5c0f\u8a2d\u5b9a\u70ba 96\uff0c\u4e26\u4e14\u6279\u6b21\u5927\u5c0f\u8207 \u7e3d\u8a13\u7df4\u6b21\u6578\u5206\u5225\u70ba 10 \u8207 20\uff0c\u5176\u9918\u7686\u53c3\u7167\u539f\u8ad6\u6587\u4e4b\u8a13\u7df4\u65b9\u5f0f\u4f86\u9032\u884c\u5b78\u7fd2\u3002\u5c0d\u65bc\u6587\u7ae0\u3001\u554f \u984c\u7684\u9577\u5ea6\u9650\u5236\u5206\u5225\u70ba 800 \u8207 50 \u500b\u5b57\u3002 \u5728\u4e2d\u9ad8\u968e\u83ef\u8a9e\u6587\u96e3\u5ea6\u7684\u6e2c\u8a66\u96c6\uff0c\u56e0\u70ba\u9700\u8981\u660e\u78ba\u8a9e\u610f\u4f86\u56de\u7b54\u554f\u984c\uff0c\u6545\u5728\u76f8\u8f03\u65bc\u4f7f\u7528\u5b57\u7b26\u8207 Validation QACNN [C] 38.97 39.45 39.10 39.59 40.16 39.43 40.01 5. \u7d50\u8ad6 (Conclusions) \u97f3\u7bc0\u5411\u91cf\u4e0a\uff0c\u8a5e\u5411\u91cf\u6709\u6bd4\u8f03\u597d\u7684\u9810\u6e2c\u6548\u679c\u3002 \u8868 3. \u5404\u6a21\u578b\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\u57fa\u790e\u7cfb\u7d71\u4e4b\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u5be6\u9a57\u7d50\u679c W C S W-C W-S C-S W-C-S QACNN [W] 56.67 55.97 56.38 56.69 56.97 56.47 56.72 QACNN [S] 38.09 39.18 39.53 40.08 39.08 39.48 39.58 Co-Matching [W] 38.14 36.46 32.52 38.79 38.18 \u672c\u8ad6\u6587\u63d0\u51fa\u4e00\u7a2e\u7c21\u55ae\u7684\u8a13\u7df4\u7b56\u7565\uff0c\u900f\u904e\u6578\u64da\u7c92\u5ea6\u7684\u6982\u5ff5\uff0c\u4f86\u6709\u6548\u6539\u5584\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u932f\u8aa4 36.59 39.09 \u6240\u5c0e\u81f4\u7684\u6548\u80fd\u554f\u984c\uff0c\u4e26\u900f\u904e\u5be6\u9a57\u8b49\u660e\uff0c\u4e0d\u9700\u984d\u5916\u6a21\u578b\u7684\u652f\u63f4\u4e0b\uff0c\u50c5\u5229\u7528\u8f38\u5165\u4e0d\u540c\u5411\u91cf\u7684 \u8cc7\u6599\u4f86\u9032\u884c\u5be6\u9a57\u3002\u8a72\u6bd4\u8cfd\u662f\u5c08\u5c6c\u65bc\u4e2d\u6587\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u554f\u7b54\u4efb\u52d9\uff0c\u5176\u4e2d\u6bcf\u500b\u984c\u76ee\u90fd\u6703\u5305\u542b QACNN [C] 62.56 63.59 63.49 63.45 63.78 63.61 63.94 Co-Matching [C] 38.43 39.62 35.50 40.64 38.07 39.89 40.97 \u65b9\u5f0f\u8b93\u6a21\u578b\u9054\u81f3\u66f4\u597d\u7684\u7d50\u679c\u3002\u6211\u5011\u6240\u63d0\u51fa\u4e4b\u65b9\u6cd5\uff0c\u76f8\u8f03\u65bc\u57fa\u790e\u7cfb\u7d71\uff0c\u5728\u53e3\u8a9e\u9078\u64c7\u984c\u7684\u5be6 \u6587\u7ae0\u3001\u554f\u984c\u8207\u9078\u9805\u4e4b\u97f3\u6a94\uff0c\u56de\u7b54\u5247\u662f\u54ea\u500b\u9078\u9805\u662f\u6b63\u78ba\u7b54\u6848\u3002\u95dc\u65bc\u6b64\u6bd4\u8cfd\u8cc7\u6599\u6240\u6d89\u53ca\u7684\u9818 \u57df\u975e\u5e38\u7684\u5ee3\u6cdb\uff0c\u5305\u542b\u79d1\u5b78\u3001\u65b0\u805e\u6216\u6587\u8a00\u6587\u7b49\u7b49\uff0c\u6211\u5011\u9078\u7528\u5176\u4e2d 8 \u5834\u6bd4\u8cfd\u8cc7\u6599\u4f86\u9032\u884c\u5be6\u9a57\uff0c \u8a13\u7df4\u96c6\u3001\u767c\u5c55\u96c6\u8207\u5169\u7a2e\u6e2c\u8a66\u96c6\u4e4b\u7d71\u8a08\u8cc7\u8a0a\u5982\u8868 1 \u6240\u793a\u3002\u767c\u5c55\u96c6\u662f\u5305\u542b\u96dc\u8a0a\u5e72\u64fe\u5f8c\u7684\u97f3\u8a0a\uff0c W C S W C S W C S Co-Matching [W] 54.00 44.95 39.66 55.31 55.16 44.00 55.48 4.1\u672c\u8ad6\u6587\u5c0d\u65bc\u8a13\u7df4\u7b56\u7565\u9032\u884c\u5be6\u9a57\u6642\uff0c\u57fa\u790e\u7cfb\u7d71(Baseline System)\u662f\u4ee5\u4e0b\u8a13\u7df4\u4ee3\u865f\uff1aW(\u8a5e)\u3001 Validation Test 1 Test 2 QACNN [S] 64.19 64.98 64.78 65.99 65.91 66.07 66.26 Co-Matching [S] 36.37 36.68 39.20 36.94 38.71 39.73 39.76 \u9a57\u7d50\u679c\uff0c\u5206\u5225\u5728 QACNN \u8207</td></tr><tr><td/><td>\u5716 5. \u6578\u64da\u7c92\u5ea6\u793a\u610f\u5716</td><td/></tr></table>", |
| "html": null, |
| "text": "Ran et al., 2019)\u3002XLNet \u5247\u662f\u57fa\u65bc BERT \u7684\u7f3a\u9ede\u9032\u884c\u6539\u9032\uff0c\u63d0\u51fa\u4e00\u7a2e\u6cdb\u5316\u81ea \u56de\u6b78\u7684\u8a13\u7df4\u65b9\u6cd5(Generalized Autoregressive Pretraining Method)\uff0c\u91dd\u5c0d BERT \u5c0d\u65bc\u906e\u63a9\u4f4d \u7f6e\u8207\u5176\u4ed6\u7684\u4f9d\u8cf4\u95dc\u4fc2\u9032\u884c\u7a81\u7834\uff0c\u85c9\u6b64\u9054\u5230\u5237\u65b0 BERT \u7d00\u9304\u7684\u6a21\u578b\u3002 Co-Matching \u4e0a\uff0c\u5f97\u5230 2%\u81f3 4%\u7684\u9032\u6b65\uff1b\u5728 QANet \u65bc\u4e2d\u6587\u6578 \u64da\u96c6\u6587\u672c\u6bb5\u7684\u5be6\u9a57\u7d50\u679c\uff0cF1 \u8207 EM \u5206\u5225\u5f97\u5230 1.79%\u8207 4.57%\u7684\u9032\u6b65\u3002\u672a\u4f86\u6703\u900f\u904e\u5c07\u8a5e\u5411 \u91cf\u518d\u7d30\u5206\u70ba\u4e8c\u5143\u3001\u4e09\u5143\u7b49\u65b9\u5f0f\u4f86\u9032\u884c\u5ef6\u4f38\u64f4\u5145\u6216\u662f\u5c07\u6b64\u8a13\u7df4\u7b56\u7565\u61c9\u7528\u65bc\u82f1\u6587\u6578\u64da\u96c6\u4e0a\u4f86 \u9a57\u8b49\u6210\u6548\uff0c\u4e26\u4e14\u5c07\u6703\u8207 BERT \u8207 XLNET \u7b49\u57fa\u65bc\u8a9e\u8a00\u6a21\u578b\u7684\u795e\u7d93\u7db2\u8def\u7cfb\u7d71\u9032\u884c\u76f8\u7d50\u5408\u3002", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |