ACL-OCL / Base_JSON /prefixO /json /O18 /O18-1007.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "O18-1007",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:09:54.349411Z"
},
"title": "Investigating acoustic model combination and semi-supervised discriminative training for meeting speech recognition",
"authors": [
{
"first": "Tien-Hong",
"middle": [],
"last": "\u7f85\u5929\u5b8f",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Taiwan Normal University",
"location": {}
},
"email": ""
},
{
"first": "\u9673\u67cf\u7433",
"middle": [],
"last": "Lo",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Taiwan Normal University",
"location": {}
},
"email": ""
},
{
"first": "Chen",
"middle": [],
"last": "Berlin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Taiwan Normal University",
"location": {}
},
"email": "berlin@ntnu.edu.tw"
},
{
"first": "",
"middle": [],
"last": "\u570b\u7acb\u81fa\u7063\u5e2b\u7bc4\u5927\u5b78\u8cc7\u8a0a\u5de5\u7a0b\u5b78\u7cfb",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Taiwan Normal University",
"location": {}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "",
"pdf_parse": {
"paper_id": "O18-1007",
"_pdf_hash": "",
"abstract": [],
"body_text": [],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Purely sequence-trained neural networks for ASR Based on Lattice-Free MMI",
"authors": [
{
"first": "D",
"middle": [],
"last": "Povey",
"suffix": ""
}
],
"year": 2016,
"venue": "Proc. INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "D. Povey et al., \"Purely sequence-trained neural networks for ASR Based on Lattice-Free MMI,\" in Proc. INTERSPEECH, 2016.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Semi-supervised training of deep neural networks",
"authors": [
{
"first": "K",
"middle": [],
"last": "Vesely",
"suffix": ""
}
],
"year": 2013,
"venue": "ASRU",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Vesely et al., \"Semi-supervised training of deep neural networks,\" in ASRU, 2013.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Semi-supervised bootstrapping approach for neural network feature extractor training",
"authors": [
{
"first": "F",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Grezl et al., \"Semi-supervised bootstrapping approach for neural network feature extractor training,\" in ASRU, 2013.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Semi-supervised dnn training in meeting recognition",
"authors": [
{
"first": "P",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of. Sheffield",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Zhang et al., \"Semi-supervised dnn training in meeting recognition,\" in Proceedings of. Sheffield, 2014.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Lightly supervised and unsupervised acoustic model training",
"authors": [
{
"first": "L",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2002,
"venue": "Computer Speech & Language",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "L. Lamel et al., \"Lightly supervised and unsupervised acoustic model training,\" Computer Speech & Language , 2002.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Improving broadcast news transcription by lightly supervised discriminative training",
"authors": [
{
"first": "H",
"middle": [
"Y"
],
"last": "Chan",
"suffix": ""
}
],
"year": 2004,
"venue": "ICASSP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H. Y. Chan et al., \"Improving broadcast news transcription by lightly supervised discriminative training,\" in ICASSP, 2004.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Investigating data selection for minimum phone error training of acoustic models",
"authors": [
{
"first": "S.-H",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2007,
"venue": "Multimedia and Expo",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S.-H. Liu et al., \"Investigating data selection for minimum phone error training of acoustic models,\"in Multimedia and Expo, 2007.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Semisupervised training of Deep Neural Networks",
"authors": [
{
"first": "K",
"middle": [],
"last": "Vesely",
"suffix": ""
}
],
"year": 2013,
"venue": "ASRU",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Vesely et al., \"Semisupervised training of Deep Neural Networks,\" in ASRU, 2013.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Deep neural network features and semisupervised training for low resource speech recognition",
"authors": [
{
"first": "S",
"middle": [],
"last": "Thomas",
"suffix": ""
}
],
"year": 2013,
"venue": "Proc. ICASSP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S. Thomas et al., \"Deep neural network features and semisupervised training for low resource speech recognition,\" in Proc. ICASSP, 2013.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Semisupervised DNN training in meeting recognition",
"authors": [
{
"first": "P",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2014,
"venue": "SLT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Zhang et al., \"Semisupervised DNN training in meeting recognition,\" in SLT, 2014.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Ensemble deep learning for speech recognition",
"authors": [
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
}
],
"year": 2014,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "L. Deng et al., \"Ensemble deep learning for speech recognition,\" in INTERSPEECH, 2014.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Minimum bayes risk decoding and system combination based on a recursion for edit distance",
"authors": [
{
"first": "H",
"middle": [],
"last": "Xu",
"suffix": ""
}
],
"year": 2011,
"venue": "Computer Speech and Language",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H. Xu et al., \"Minimum bayes risk decoding and system combination based on a recursion for edit distance,\" Computer Speech and Language, 2011",
"links": null
}
},
"ref_entries": {
"TABREF0": {
"num": null,
"content": "<table><tr><td>\u672c\u8ad6\u6587\u7684\u5be6\u4f5c\u76ee\u7684\u4fbf\u662f\u5728\u8a9e\u6599\u7f3a\u4e4f\u7684\u534a\u76e3\u7763\u5f0f\u74b0\u5883\u4e0b\uff0c\u5229\u7528\u8ca0\u689d\u4ef6\u71b5\u8207\u8a5e\u5716\u8f14\u52a9</td></tr><tr><td>LF-MMI \u7684\u8a13\u7df4\uff0c\u4e26\u5229\u7528\u6a21\u578b\u5408\u4f75\u6280\u8853\uff0c\u9032\u4e00\u6b65\u63d0\u5347\u6a21\u578b\u7684\u8fa8\u8b58\u7d50\u679c\u3002\u6211\u5011\u5e0c\u671b\u5373\u4f7f\u5728</td></tr><tr><td>\u8a9e\u6599\u4e0d\u8db3\u7684\u60c5\u6cc1\u4e0b\uff0c\u4ecd\u80fd\u9054\u5230\u4e0d\u932f\u7684\u8fa8\u8b58\u6548\u679c\uff0c\u751a\u81f3\u5ab2\u7f8e\u539f\u5148\u6709\u6a19\u8a18\u8a9e\u6599\u7684\u8a13\u7df4\u7d50\u679c\u3002</td></tr><tr><td>\u5be6\u9a57\u7d50\u679c\u986f\u793a\uff0c\u52a0\u5165 NCE \u8207\u8a5e\u5716\u7686\u80fd\u964d\u4f4e\u8a5e\u932f\u8aa4\u7387(Word error rate, WER)\uff0c\u800c\u6a21\u578b\u5408\u4f75</td></tr><tr><td>(Model combination)\u5247\u80fd\u5728\u5404\u500b\u968e\u6bb5\u986f\u8457\u63d0\u5347\u6548\u80fd\uff0c\u4e14\u5169\u8005\u7d50\u5408\u53ef\u4f7f\u8a5e\u4fee\u5fa9\u7387(Word</td></tr><tr><td>recovery rate, WRR)\u9054\u5230 60.8%\u3002</td></tr><tr><td>\u95dc\u9375\u8a5e\uff1a\u81ea\u52d5\u8a9e\u97f3\u8fa8\u8b58\u3001\u9451\u5225\u5f0f\u8a13\u7df4\u3001\u534a\u76e3\u7763\u5f0f\u8a13\u7df4\u3001\u6a21\u578b\u5408\u4f75</td></tr><tr><td>\u53c3\u8003\u6587\u737b</td></tr><tr><td>\u5728\u76e3\u7763\u5f0f\u74b0\u5883\u4e0b\u65ac\u7372\u6700\u597d\u7684\u6210\u679c\uff0c\u7136\u800c\u5728\u534a\u76e3\u7763\u5f0f\u74b0\u5883\u7684\u8868\u73fe\u4ecd\u6709\u5f85\u7814\u7a76\u3002\u5728\u534a\u76e3\u7763\u5f0f</td></tr><tr><td>\u74b0\u5883\u6700\u5e38\u898b\u7684\u8a13\u7df4\u65b9\u6cd5\u662f\u81ea\u6211\u5b78\u7fd2(Self-training)[2][3][4]\u4e2d\uff0c\u7531\u65bc\u7a2e\u5b50\u6a21\u578b(Seed model)</td></tr><tr><td>\u5e38\u56e0\u8a9e\u6599\u6709\u9650\u800c\u6548\u679c\u4e0d\u4f73\u3002\u4e14 LF-MMI \u5c6c\u65bc\u9451\u5225\u5f0f\u8a13\u7df4\u4e4b\u6545\uff0c\u66f4\u6613\u53d7\u5230\u6a19\u8a18\u932f\u8aa4\u7684\u5f71</td></tr><tr><td>\u97ff\u3002\u70ba\u4e86\u6e1b\u7de9\u4e0a\u8ff0\u7684\u554f\u984c\uff0c\u904e\u5f80\u5e38\u52a0\u5165\u7f6e\u4fe1\u5ea6\u904e\u6ffe\u5668(Confidence-based filter)[4][5][6]\u5c0d</td></tr><tr><td>\u8a13\u7df4\u8a9e\u6599\u505a\u6311\u9078\u3002\u904e\u6ffe\u8a9e\u6599\u53ef\u5728\u4e0d\u540c\u5c64\u7d1a\u4e0a\u9032\u884c\uff0c\u5206\u70ba\u97f3\u6846\u5c64\u7d1a[7]\u3001\u8a5e\u5c64\u7d1a[8]\u3001\u53e5\u5b50</td></tr><tr><td>\u5c64\u7d1a[3][8][9]\u3002</td></tr><tr><td>\u672c\u8ad6\u6587\u5229\u7528\u5169\u7a2e\u601d\u8def\u65bc\u534a\u76e3\u7763\u5f0f\u8a13\u7df4\u3002\u5176\u4e00\uff0c\u5f15\u5165\u8ca0\u689d\u4ef6\u71b5(Negative conditional entropy,</td></tr><tr><td>NCE)\u6b0a\u91cd\u8207\u8a5e\u5716(Lattice)\uff0c\u524d\u8005\u662f\u6700\u5c0f\u5316\u8a5e\u5716\u8def\u5f91\u7684\u689d\u4ef6\u71b5(Conditional entropy)\uff0c\u7b49\u540c</td></tr><tr><td>\u5c0d MMI \u7684\u53c3\u8003\u8f49\u9304(Reference transcript)\u505a\u6b0a\u91cd\u5e73\u5747\uff0c\u6b0a\u91cd\u7684\u6539\u8b8a\u80fd\u81ea\u7136\u5730\u52a0\u5165 MMI</td></tr><tr><td>\u8a13\u7df4\u4e2d\uff0c\u4e26\u540c\u6642\u5c0d\u4e0d\u78ba\u5b9a\u6027\u5efa\u6a21\u3002\u5176\u76ee\u7684\u5e0c\u671b\u7121\u7f6e\u4fe1\u5ea6\u904e\u6ffe\u5668(Confidence-based filter)</td></tr><tr><td>\u4e5f\u53ef\u8a13\u7df4\u6a21\u578b\u3002\u5f8c\u8005\u52a0\u5165\u8a5e\u5716\uff0c\u6bd4\u8d77\u904e\u5f80\u7684 one-best\uff0c\u53ef\u4fdd\u7559\u66f4\u591a\u5047\u8aaa\u7a7a\u9593\uff0c\u63d0\u5347\u627e\u5230</td></tr><tr><td>\u53c3\u8003\u8f49\u9304(Reference transcript)\u7684\u53ef\u80fd\u6027\uff1b\u5176\u4e8c\uff0c\u6211\u5011\u501f\u9452\u6574\u9ad4\u5b78\u7fd2(Ensemble learning)</td></tr></table>",
"text": "\u7684\u7a81\u7834[1] \uff0c \u6709 \u5225 \u65bc \u50b3 \u7d71 \u4ea4 \u4e92 \u71b5 \u8a13 \u7df4 (Cross-Entropy training, CE) \u548c\u9451\u5225\u5f0f\u8a13\u7df4 (Discriminative training)\u7684\u4e8c\u968e\u6bb5\u8a13\u7df4\uff0cLF-MMI \u63d0\u4f9b\u66f4\u5feb\u7684\u8a13\u7df4\u8207\u89e3\u78bc\u3002\u5118\u7ba1 LF-MMI The 2018 Conference on Computational Linguistics and Speech Processing ROCLING 2018, pp. 78-80 \u00a9The Association for Computational Linguistics and Chinese Language Processing \u7684 \u6982\u5ff5[10] \uff0c\u4f7f\u7528\u5f31 \u5b78 \u7fd2 \u5668 (Weak learner) \u4fee \u6b63 \u5f7c \u6b64 \u7684 \u932f \u8aa4 \uff0c \u5206 \u70ba \u97f3 \u6846 \u5c64 \u7d1a \u5408\u4f75 (Frame-level combination)[11]\u548c\u5047\u8aaa\u5c64\u7d1a\u5408\u4f75(Hypothesis-level combination)[12]\u3002",
"type_str": "table",
"html": null
}
}
}
}