| { |
| "paper_id": "2019", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:27:06.872523Z" |
| }, |
| "title": "Real-Time Mandarin Speech Synthesis System", |
| "authors": [ |
| { |
| "first": "\u912d\u5b89\u5091", |
| "middle": [ |
| "\uf02a" |
| ], |
| "last": "\u3001\u9673\u5609\u5e73 \uf02a", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Sun Yat-sen University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "An-Chieh", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Sun Yat-sen University", |
| "location": {} |
| }, |
| "email": "ajcheng@g-mail.nsysu.edu.tw" |
| }, |
| { |
| "first": "Chia-Ping", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Sun Yat-sen University", |
| "location": {} |
| }, |
| "email": "cpchen@mail.cse.nsysu.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This thesis studies and implements the real time Chinese speech synthesis system. This system uses a conversion model of the text sequence to the Mel spectrum sequence, and then concatenates a vocoder from the Mel spectrum to the synthesized speech. We use Tacotron2 to implement a sequence-to-sequence conversion model with several different vocoders, including Griffin-Lim, World-Vocoder, and WaveGlow. The WaveGlow neural network vocoder, which implements the reversible codec function, is the most prominent, and is impressive in terms of synthesis speed or speech quality. We use a single speaker with 12-hour corpus implementation system. In terms of voice quality, the MOS of the synthesized system voice using the WaveGlow vocoder is 4.08, which is slightly", |
| "pdf_parse": { |
| "paper_id": "2019", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This thesis studies and implements the real time Chinese speech synthesis system. This system uses a conversion model of the text sequence to the Mel spectrum sequence, and then concatenates a vocoder from the Mel spectrum to the synthesized speech. We use Tacotron2 to implement a sequence-to-sequence conversion model with several different vocoders, including Griffin-Lim, World-Vocoder, and WaveGlow. The WaveGlow neural network vocoder, which implements the reversible codec function, is the most prominent, and is impressive in terms of synthesis speed or speech quality. We use a single speaker with 12-hour corpus implementation system. In terms of voice quality, the MOS of the synthesized system voice using the WaveGlow vocoder is 4.08, which is slightly", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "\u96a8\u8457\u79d1\u6280\u7684\u767c\u5c55\uff0c\u4eba\u6a5f\u4e92\u52d5\u7684\u60c5\u6cc1\u4e5f\u8d8a\u4f86\u8d8a\u666e\u53ca\uff0c\u50cf\u662f", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7dd2\u8ad6 (Introduction)", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Tacotron2 (Shen et al., 2017) (MORISE et al., 2016) \u3001Griffin-Lim (Griffin & Lim, 1984) \u3001WaveGlow (Prenger et al., 2018) ", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 29, |
| "text": "(Shen et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 30, |
| "end": 51, |
| "text": "(MORISE et al., 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 65, |
| "end": 86, |
| "text": "(Griffin & Lim, 1984)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 97, |
| "end": 119, |
| "text": "(Prenger et al., 2018)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u8072\u78bc\u5668 (Vocoder)", |
| "sec_num": "2.3" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Attention-Based Models for Speech Recognition", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Serdyuk", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1506.07503v1" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chorowski, J., Bahdanau, D., Serdyuk, D., Cho, K., & Bengio, Y. (2015). Attention-Based Models for Speech Recognition. In arXiv preprint arXiv:1506.07503v1.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Signal estimation from modified short-time fourier transform", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Griffin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lim", |
| "suffix": "" |
| } |
| ], |
| "year": 1984, |
| "venue": "IEEE Transactions on Acoustics, Speech, and Signal Processing", |
| "volume": "32", |
| "issue": "2", |
| "pages": "236--243", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TASSP.1984.1164317" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Griffin, D. & Lim, J. (1984). Signal estimation from modified short-time fourier transform. IEEE Transactions on Acoustics, Speech, and Signal Processing, 32(2), 236-243. doi: 10.1109/TASSP.1984.1164317", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "WORLD: A Vocoder-Based High-Quality Speech Synthesis System for Real-Time Applications", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Morise", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Yokomori", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ozawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEICE Trans. on Information and Systems", |
| "volume": "99", |
| "issue": "7", |
| "pages": "1877--1884", |
| "other_ids": { |
| "DOI": [ |
| "10.1587/transinf.2015EDP7457" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "MORISE, M., YOKOMORI, F., & OZAWA, K. (2016).WORLD: A Vocoder-Based High-Quality Speech Synthesis System for Real-Time Applications. IEICE Trans. on Information and Systems, E99.D(7), 1877-1884. doi: 10.1587/transinf.2015EDP7457", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "WaveNet: A Generative Model for Raw Audio", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Van Den Oord", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Zen", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "\u2026kavukcuoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1609.03499v2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "van den Oord, A., Dieleman, S., Zen, H., Simonyan, K., Vinyals, O., Graves, A., \u2026Kavukcuoglu, K. (2016). WaveNet: A Generative Model for Raw Audio. In arXiv preprint arXiv:1609.03499v2.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "WaveGlow: A Flow-based Generative Network for Speech Synthesis", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Prenger", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Valle", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Catanzaro", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1811.00002v1" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prenger, R., Valle, R., & Catanzaro, B. (2018). WaveGlow: A Flow-based Generative Network for Speech Synthesis. In arXiv preprint arXiv:1811.00002v1.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions. In arXiv preprint", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "\u2026wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1712.05884" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., \u2026Wu, Y. (2017). Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions. In arXiv preprint arXiv:1712.05884.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Multi-Scale Context Aggregation by Dilated Convolutions", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Koltun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1511.07122v3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu, F. & Koltun, V. (2015). Multi-Scale Context Aggregation by Dilated Convolutions. In arXiv preprint arXiv:1511.07122v3.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "content": "<table><tr><td/><td>\u5373\u6642\u4e2d\u6587\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71</td><td>55 \u912d\u5b89\u5091\u8207\u9673\u5609\u5e73</td></tr><tr><td>\u8868 1\u6578\u64da\u898f\u683c</td><td/></tr><tr><td>\u6578\u64da\u5167\u5bb9</td><td>\u4e2d\u6587\u6a19\u6e96\u5973\u8072\u8a9e\u97f3\u6578\u64da\u5eab</td></tr><tr><td>\u9304\u97f3\u8a9e\u6599</td><td colspan=\"2\">\u7d9c\u5408\u8a9e\u6599\u6a23\u672c\u91cf:\u97f3\u7bc0\u97f3\u5b50\u7684\u6578\u91cf\u3001\u985e\u578b\u3001\u97f3\u8abf\u3001\u97f3 \u9023\u4ee5\u53ca\u97fb\u5f8b\u7b49\u9032\u884c\u8986\u84cb\u3002</td></tr><tr><td>\u6709\u6548\u6642\u9577</td><td>\u7d04 12 \u5c0f\u6642</td></tr><tr><td>\u5e73\u5747\u5b57\u6578</td><td>16 \u5b57</td></tr><tr><td colspan=\"3\">siri \u8a9e\u97f3\u52a9\u7406\u3001\u667a\u80fd\u5c0e\u822a\u3001\u6709\u8072 \u8b80\u7269\u7b49\u90fd\u5df2\u74b0\u7e5e\u5728\u6211\u5011\u751f\u6d3b\u88e1\u3002\u800c\u5176\u4e2d\uff0c\u8a9e\u97f3\u5408\u6210\u7684\u6280\u8853\u5c31\u626e\u6f14\u4e86\u4e00\u500b\u975e\u5e38\u91cd\u8981\u7684\u8173\u8272\u3002 \u8a9e\u8a00\u985e\u578b \u6a19\u6e96\u666e\u901a\u8a71</td></tr><tr><td colspan=\"3\">\u8a9e\u97f3\u5408\u6210\u662f\u900f\u904e\u6a5f\u68b0\u3001\u96fb\u5b50\u7684\u65b9\u5f0f\u7522\u751f\u4eba\u9020\u8a9e\u97f3\u7684\u6280\u8853\uff0c\u6587\u5b57\u8f49\u8a9e\u97f3\u6280\u8853\u4e5f\u96b8\u5c6c\u65bc\u8a9e\u97f3 \u767c\u97f3\u4eba \u5973 : 20-30 \u6b72</td></tr><tr><td colspan=\"3\">\u5408\u6210\u3002\u800c\u672c\u7814\u7a76\u5247\u662f\u81f4\u529b\u65bc\u958b\u767c\u51fa\u4e00\u500b\u53ef\u5408\u6210\u51fa\u66f4\u5feb\u4e14\u66f4\u70ba\u903c\u771f\u7684\u6587\u5b57\u8f49\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71\u3002 \u8072\u97f3\u63a1\u96c6\u74b0\u5883\u70ba\u5c08\u696d\u9304\u97f3\u5ba4</td></tr><tr><td colspan=\"3\">\u5be6\u73fe\u8a9e\u97f3\u5408\u6210\u7684\u65b9\u6cd5\u6709\u591a\u7a2e\uff0c\u5176\u4e2d\u5305\u542b\u53c3\u6578\u5f0f\u5408\u6210\u4ee5\u53ca\u62fc\u63a5\u5f0f\u5408\u6210\u3002\u57fa\u65bc\u53c3\u6578\u5f0f\u7684\u8a9e\u97f3 \u5408\u6210\u7cfb\u7d71\u4e3b\u8981\u662f\u900f\u904e\u7d71\u8a08\u5b78\u6a21\u578b\uff0c\u5229\u7528\u5b78\u7fd2\u51fa\u4f86\u7684\u8a9e\u97f3\u5b78\u7279\u5fb5\u548c\u5176\u8072\u5b78\u7279\u5fb5\u7684\u5c0d\u61c9\u95dc\u4fc2 1. \u9304\u97f3\u5ba4\u7b26\u5408\u5c08\u696d\u97f3\u5eab\u9304\u88fd\u6a19\u6e96 \u9304\u97f3\u74b0\u5883 2. \u9304\u97f3\u74b0\u5883\u548c\u8a2d\u5099\u81ea\u59cb\u81f3\u7d42\u4fdd\u6301\u4e0d\u8b8a</td></tr><tr><td colspan=\"3\">\u5f8c\uff0c\u9810\u6e2c\u51fa\u76f8\u61c9\u7684\u53c3\u6578\uff0c\u63a5\u8457\u8072\u78bc\u5668\u518d\u900f\u904e\u9019\u4e9b\u53c3\u6578\u5408\u6210\u51fa\u6240\u671f\u671b\u7684\u97f3\u983b\u3002\u4e0d\u904e\u9019\u7a2e\u5408 \u6210\u65b9\u5f0f\u6700\u5927\u7f3a\u9ede\u4e43\u70ba\u7121\u6cd5\u5408\u51fa\u63a5\u8fd1\u4eba\u985e\u7684\u81ea\u7136\u8a9e\u97f3\uff0c\u5728\u6280\u8853\u4e0a\u5c1a\u672a\u6709\u660e\u986f\u7684\u7a81\u7834\u3002\u62fc\u63a5 3. \u9304\u97f3\u74b0\u5883\u7684\u4fe1\u9020\u6bd4\u4e0d\u4f4e\u65bc 35dB \u5716 1. \u9810\u6e2c\u7db2\u8def\u4e4b\u6a21\u578b\u67b6\u69cb</td></tr><tr><td colspan=\"3\">\u5f0f\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71\u5247\u662f\u900f\u904e\u540c\u6a23\u7684\u65b9\u5f0f\u53bb\u9810\u6e2c\u51fa\u9019\u4e9b\u8072\u5b78\u7279\u5fb5\uff0c\u7136\u5f8c\u518d\u5230\u539f\u59cb\u8a9e\u97f3\u5eab\u4e2d\u627e \u9304\u88fd\u5de5\u5177 \u5c08\u696d\u9304\u97f3\u8a2d\u5099\u53ca\u9304\u97f3\u8edf\u9ad4</td></tr><tr><td colspan=\"3\">\u5c0b\u8fd1\u4f3c\u7684\u97f3\u7d20\uff0c\u6700\u5f8c\u62fc\u63a5\u800c\u6210\u3002\u4e0d\u904e\u9019\u7a2e\u5408\u6210\u65b9\u6cd5\u4e5f\u610f\u5473\u8457\u5408\u6210\u7684\u97f3\u8cea\u7a69\u5b9a\u6027\u8207\u8a9e\u97f3\u5eab \u63a1\u6a23\u683c\u5f0f \u7121\u58d3\u7e2e PCM WAV \u683c\u5f0f\uff0c\u63a1\u6a23\u7387\u70ba 48kHz\u300116bit\u3002</td></tr><tr><td colspan=\"3\">\u5927\u5c0f\u6210\u6b63\u6bd4\u3002\u82e5\u8981\u80fd\u5408\u6210\u51fa\u5b8c\u5584\u7684\u81ea\u7136\u8a9e\u53e5\uff0c\u5c31\u5fc5\u9808\u8981\u6709\u9f4a\u5168\u7684\u8cc7\u6599\u5eab\uff0c\u4e14\u540c\u6642\u70ba\u4e86\u4e0d</td></tr><tr><td colspan=\"3\">\u5ef6\u9072\u641c\u5c0b\u4e0a\u7684\u6548\u7387\uff0c\u66f4\u5fc5\u9808\u8981\u6709\u500b\u826f\u597d\u7684\u6f14\u7b97\u6cd5\u3002\u800c\u4e0a\u8ff0\u9019\u4e9b\u65b9\u6cd5\uff0c\u9664\u4e86\u7686\u6709\u8457\u660e\u986f\u7684 \u4eba\u5de5\u75d5\u8de1\u4e4b\u5916\uff0c\u5728\u5c08\u696d\u9818\u57df\u4e0a\u7684\u9580\u6abb\u4e5f\u90fd\u6975\u9ad8\u3002\u5e78\u904b\u5730\uff0c\u9084\u6709\u4e00\u7a2e\u795e\u7d93\u7db2\u8def\u5f0f\u7684\u5408\u6210\u6280 2.2 \u9810\u6e2c\u7db2\u8def (Predicted Model) \u539f\u59cb\u6587\u672c \u53ef\u60f3\u800c\u77e5\uff0c\u7515\u4e2d\u6349\u9c49\u986f\u7136\u6bd4\u4ea1\u7f8a\u88dc\u7262\u66f4\u53ef\u9760\u66f4\u6709\u6548\u3002 \u5728\u524d\u7aef\u9810\u6e2c\u7db2\u8def\u90e8\u5206\u6211\u5011\u91cd\u73fe\u4e86 Google \u7684 Tacotron2 (Shen et al., 2017)\uff0c\u4e26\u91dd\u5c0d\u4e2d\u6587\u8a9e\u97f3 \u8853\uff0c\u53ef\u5229\u7528\u795e\u7d93\u7db2\u8def\u76f4\u63a5\u5b78\u7fd2\u5f9e\u6587\u672c\u7aef\u5230\u8072\u5b78\u7279\u5fb5\u7aef\u7684\u5c0d\u61c9\u95dc\u4fc2\u3002 \u5408\u6210\u7cfb\u7d71\u505a\u4e86\u5ba2\u88fd\u5316\u3002\u5728\u67b6\u69cb\u4e0a\u9762\u4f7f\u7528\u7684\u662f\u4e00\u500b\u7de8\u78bc\u5668-\u89e3\u78bc\u5668(Encoder-Decoder)\u7684\u8a2d\u7f6e\uff0c \u7d93\u904e jieba \u53ef\u60f3\u800c\u77e5|\uff0c|\u7515\u4e2d\u6349\u9c49|\u986f\u7136|\u6bd4|\u4ea1\u7f8a\u88dc\u7262|\u66f4|\u53ef\u9760|\u66f4|\u6709\u6548|\u3002</td></tr><tr><td colspan=\"3\">\u4e26\u52a0\u5165\u4e86\u4f4d\u7f6e\u654f\u611f\u7684\u6ce8\u610f\u529b\u6a5f\u5236(Location sensitive attention) (Chorowski, Bahdanau, \u7d93\u904e pypinyin ke3 xiang3 er2 zhi1| \uff0c| weng4 zhong1 zhuo1 bie1 | xian3 ran2 | bi3 |</td></tr><tr><td colspan=\"3\">Serdyuk, Cho & Bengio, 2015)\uff0c\u6574\u9ad4\u67b6\u69cb\u5982\u5716 1\u3002\u800c\u7531\u65bc\u6211\u5011\u4f7f\u7528\u7684\u662f\u4e2d\u6587\u8cc7\u6599\u96c6\uff0c\u6545\u5728 wang2 yang2 bu3 lao2 | geng4| ke3 kao4 | geng4| you3 xiao4 |.</td></tr><tr><td colspan=\"3\">\u6587\u672c\u7684\u5167\u5bb9\u4e0a\u5148\u9032\u884c\u4e86\u8cc7\u6599\u7684\u9810\u8655\u7406\uff0c\u76ee\u7684\u662f\u70ba\u4e86\u8b93\u795e\u7d93\u7db2\u8def\u53ef\u4ee5\u5b78\u7fd2\u5230\u6211\u5011\u4e2d\u6587\u4e0a\u7684</td></tr><tr><td colspan=\"3\">\u97fb\u5f8b\u4ee5\u53ca\u6291\u63da\u9813\u632b\u3002\u7531\u65bc\u6f22\u5b57\u672c\u8eab\u6709\u6578\u842c\u500b\u76f8\u7570\u5b57\uff0c\u540c\u97f3\u7570\u5b57\u7684\u60c5\u6cc1\u4e5f\u4e0d\u5728\u5c11\u6578\uff0c\u82e5\u4ee5 \u8a13\u7df4\u8cc7\u6599\u9078\u81ea\u6a19\u8c9d\u8cc7\u6599\u96c6\uff0c\u662f\u7531\u300c\u6a19\u8c9d\u79d1\u6280\u6709\u9650\u516c\u53f8\u300d\u65bc 2018 \u5e74\u6240\u958b\u653e\u3002\u7531\u4e00\u4f4d\u5973\u6027\u9304 \u7aae\u8209\u7684\u65b9\u5f0f\u4f86\u5c0d\u795e\u7d93\u7db2\u8def\u505a\u8a13\u7df4\u986f\u7136\u4e0d\u5920\u660e\u667a\u3002\u6211\u5011\u8655\u7406\u7684\u65b9\u5f0f\u662f\u4f7f\u7528\u6f22\u8a9e\u62fc\u97f3\u4f5c\u70ba\u5b57 \u97f3\u8005\u9304\u88fd\u800c\u6210\uff0c\u5168\u9577\u7d04\u7565 12 \u5c0f\u6642\uff0c\u4f7f\u7528 48kHz 16bit \u63a1\u6a23\u983b\u7387\uff0c\u9304\u88fd\u74b0\u5883\u70ba\u5c08\u696d\u9304\u97f3\u5ba4 \u5143\u6a19\u8a3b\uff0c\u4e26\u63a1\u7528\u6578\u5b57\u4e00\u5230\u56db\u4f86\u8868\u793a\u6211\u5011\u7684\u8072\u8abf\u3002\u96d6\u7136\u904e\u7a0b\u4e2d\u90fd\u662f\u4ee5\u9019\u6a23\u7684\u5f62\u5f0f\u9032\u884c\u8a13\u7df4\uff0c \u53ca\u9304\u97f3\u8edf\u9ad4\uff0c\u8a9e\u6599\u6db5\u84cb\u5404\u985e\u65b0\u805e\u3001\u5c0f\u8aaa\u3001\u79d1\u6280\u3001\u5a1b\u6a02\u7b49\u9818\u57df\uff0c\u8a73\u7d30\u898f\u683c\u5982\u8868 1\u3002 \u4e0d\u904e\u5728\u5408\u6210\u968e\u6bb5\u6642\u6211\u5011\u53ef\u4ee5\u85c9\u7531\"pypinyin\"\u7684\u5957\u4ef6\u76f4\u63a5\u900f\u904e\u4e2d\u6587\u8f38\u5165\u5408\u6210\u51fa\u6240\u6307\u5b9a\u7684</td></tr><tr><td colspan=\"3\">\u53e5\u5b50\u3002\u53e6\u5916\uff0c\u70ba\u4e86\u63d0\u5347\u4e2d\u6587\u8a9e\u97f3\u7684\u6d41\u66a2\u5ea6\uff0c\u6211\u5011\u4e5f\u900f\u904e\"jieba\"\u65b7\u8a5e\u7cfb\u7d71\u91dd\u5c0d\u6587\u672c\u7684\u5167</td></tr><tr><td colspan=\"3\">\u5bb9\u5148\u9032\u884c\u65b7\u8a5e\uff0c\u6211\u5011\u5229\u7528 TrieTree \u7684\u7d50\u69cb\u53bb\u751f\u6210\u53e5\u5b50\u4e2d\u6240\u6709\u53ef\u80fd\u6210\u70ba\u8a5e\u7684\u60c5\u6cc1\uff0c\u4e26\u4f7f\u7528</td></tr><tr><td colspan=\"3\">\u52d5\u614b\u898f\u5283\u7684\u65b9\u5f0f\u627e\u51fa\u6700\u5927\u6a5f\u7387\u7684\u8def\u5f91\u3002\u6574\u500b\u524d\u8655\u7406\u7684\u904e\u7a0b\u53ef\u53c3\u8003\u8868 2\u3002</td></tr></table>", |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>58</td><td>\u5373\u6642\u4e2d\u6587\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71 \u5373\u6642\u4e2d\u6587\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71</td><td>57 59</td></tr><tr><td colspan=\"3\">\u4e00\u500b\u6a23\u672c\u7684\u751f\u6210\u65b9\u5f0f\u9664\u4e86\u9700\u8981\u9f90\u5927\u7684\u8a08\u7b97\u8cc7\u6e90\u4e4b\u5916\uff0c\u5728\u53ef\u5e73\u884c\u6027\u4e0a\u4e5f\u53d7\u5230\u4e86\u9650\u5236\u3002\u76f8\u5c0d</td></tr><tr><td colspan=\"3\">\u800c\u8a00\uff0c\u751f\u6210\u5c0d\u6297\u7db2\u8def\u5247\u514d\u9664\u4e86\u9019\u7a2e\u7169\u60f1\uff0c\u4e3b\u8981\u900f\u904e\u751f\u6210\u5668\u8207\u5224\u5225\u5668\u4e0d\u65b7\u76f8\u4e92\u5b78\u7fd2\u7684\u8fed\u4ee3</td></tr><tr><td colspan=\"3\">\u5f9e\u800c\u751f\u5f97\u8207\u771f\u5be6\u6a23\u672c\u63a5\u8fd1\u7684\u5206\u5e03\u3002\u4f46\u4e00\u76f4\u4ee5\u4f86\u751f\u6210\u5c0d\u6297\u7db2\u8def\u4e5f\u4e0d\u514d\u6703\u9047\u5230\u8a31\u591a\u554f\u984c\uff0c\u50cf</td></tr><tr><td colspan=\"3\">\u662f\u751f\u6210\u7684\u591a\u6a23\u6027\u4e0d\u8db3\u4ee5\u53ca\u8a13\u7df4\u904e\u7a0b\u4e0d\u7a69\u5b9a\u7b49\u7b49\u3002\u4e0d\u904e\u5e78\u904b\u5730\uff0c\u57fa\u65bc\u6d41\u7684\u751f\u6210\u6a21\u578b\u6709\u6548\u7684</td></tr><tr><td colspan=\"3\">\u89e3\u6c7a\u4e86\u9019\u4e9b\u554f\u984c\uff0c\u800c\u9019\u6a23\u7684\u751f\u6210\u65b9\u5f0f\uff0c\u4e5f\u88ab\u63a1\u7528\u5728\u8072\u78bc\u5668-WaveGlow (Prenger et al., 2018)</td></tr><tr><td>\u4e2d\u3002</td><td/><td/></tr><tr><td colspan=\"3\">WaveGlow (Prenger et al., 2018)\u900f\u904e\u5206\u5e03\u63a1\u6a23\u751f\u6210\u8a9e\u97f3\uff0c\u50c5\u9700\u4e00\u500b\u7db2\u8def\u53ca\u4e00\u500b\u6700\u5927\u5316</td></tr><tr><td colspan=\"3\">\u4f3c\u7136\u7684\u640d\u5931\u51fd\u6578\u5373\u53ef\u751f\u6210\u6642\u57df\u6ce2\u5f62\uff0c\u4e26\u4e14\u5728\u9ad8\u9084\u539f\u5ea6\u7684\u60c5\u6cc1\u4e0b\u4ea6\u80fd\u5373\u6642\u7684\u5408\u6210\u8a9e\u97f3\u3002\u5229</td></tr><tr><td colspan=\"3\">\u7528\u591a\u500b\u53ef\u9006\u7684\u8b8a\u63db\u51fd\u6578\u7d44\u6210\u5e8f\u5217\uff0c\u5c07\u4e00\u500b\u7c21\u55ae\u7684\u5206\u5e03\u900f\u904e\u4e00\u7cfb\u5217\u7684\u53ef\u9006\u51fd\u6578\u8f49\u63db\u5230\u4e00\u500b</td></tr><tr><td colspan=\"3\">\u8907\u96dc\u7684\u5206\u5e03\uff0c\u4e26\u85c9\u6b64\u4f86\u6a21\u64ec\u8a13\u7df4\u6578\u64da\u7684\u5206\u5e03\uff0c\u6700\u5f8c\u518d\u900f\u904e\u6700\u5927\u4f3c\u7136\u6e96\u5247\u4f86\u9032\u884c\u512a\u5316\u3002</td></tr><tr><td colspan=\"3\">\u5716 2. Dilated Causal Convolution \u6211\u5011\u4f7f\u7528\u7684\u7db2\u8def\u67b6\u69cb\u6bd4\u7167\u4e86(Prenger et al., 2018)\u4e2d\u7684\u914d\u7f6e\uff0c\u5305\u542b 12 \u5c64\u7684\u5c0d\u8026\u6620\u5c04\u5c64\u3001</td></tr><tr><td colspan=\"3\">[Figure 2. Dilated Causal Convolution] 12 \u500b 1*1 FFT_size\u3001hop_size\u3001window_size \u90fd\u8a2d\u5b9a\u4e86\u76f8\u7b26\u7684\u683c\u5f0f\u4ee5\u4fbf\u8a13\u7df4\uff0c\u5982\u8868 3 \u6240\u793a\u3002 2.3.1 Griffin-Lim</td></tr><tr><td colspan=\"3\">Griffin-Lim (Griffin & Lim, 1984) \u662f\u4e00\u7a2e\u8fed\u4ee3\u7684\u6f14\u7b97\u6cd5\uff0c\u97f3\u983b\u8cea\u91cf\u96d6\u7136\u4e0d\u5982 WaveNet (van den Oord et al., 2016)\uff0c\u4f46\u5728\u5373\u6642\u7cfb\u7d71\u4e2d\u4ecd\u4fdd\u6709\u4e86\u7af6\u722d\u529b\uff0c\u662f\u8a31\u591a\u5373\u6642\u8a9e\u97f3\u5408\u6210\u7cfb\u7d71\u6bd4\u8f03 \u7684\u5c0d\u8c61\u3002\u900f\u904e\u8fed\u4ee3\u7684\u6b21\u6578\u4f86\u63d0\u5347\u5408\u6210\u7684\u97f3\u8cea\uff0c\u6211\u5011\u7684\u5be6\u9a57\u4e2d\u63a1\u7528\u4e86\u516d\u5341\u6b21\u7684\u8fed\u4ee3\u4ee5\u78ba\u4fdd \u8868 3. \u8cc7\u6599\u96c6 : Biaobei</td></tr><tr><td colspan=\"3\">\u5176\u7a69\u5b9a\u6027\u3002\u6bd4\u8d77\u50b3\u7d71\u8072\u78bc\u5668\u9700\u8981\u57fa\u983b\u548c\u5012\u8b5c\u7b49\u53c3\u6578\u800c\u8a00\uff0cGriffin-Lim (Griffin & Lim, 1984)</td></tr><tr><td colspan=\"2\">\u53ef\u6839\u64da\u6587\u672c\u9810\u6e2c\u7684\u7dda\u6027\u983b\u8b5c\u5716\u76f4\u63a5\u91cd\u5efa\u6642\u57df\u6ce2\u5f62\u3002 sample_rate(Hz)</td><td>48k</td></tr><tr><td>num_mels</td><td/><td>160</td></tr><tr><td colspan=\"3\">FFT_size 2.3.2 World-Vocoder hop_size \u8a9e\u97f3\u662f\u8072\u97f3\u7684\u4e00\u7a2e\uff0c\u662f\u7531\u4eba\u7684\u767c\u8072\u5668\u5b98\u767c\u51fa\uff0c\u5177\u6709\u4e00\u5b9a\u8a9e\u6cd5\u548c\u610f\u7fa9\u7684\u8072\u97f3\u3002\u5927\u8166\u5c0d\u767c\u97f3 4096 600 \u5668\u5b98\u767c\u51fa\u904b\u52d5\u795e\u7d93\u6307\u4ee4\uff0c\u63a7\u5236\u767c\u97f3\u5668\u5b98\u5404\u7a2e\u808c\u8089\u904b\u52d5\u5f9e\u800c\u632f\u52d5\u7a7a\u6c23\u800c\u5f62\u6210\u3002\u6574\u9ad4\u767c\u8072\u904e window_size 2400</td></tr><tr><td colspan=\"3\">\u7a0b\u662f\u7a7a\u6c23\u7531\u80ba\u9032\u5165\u5589\u90e8\uff0c\u7d93\u904e\u8072\u5e36\u6fc0\u52f5\uff0c\u9032\u5165\u8072\u9053\uff0c\u6700\u5f8c\u901a\u904e\u5634\u5507\u8f3b\u5c04\u5f62\u6210\u8a9e\u97f3\u3002</td></tr><tr><td colspan=\"3\">World-Vocoder (MORISE, YOKOMORI & OZAWA, 2016) \u53c3\u7167\u4e86\u6b64\u767c\u8072\u539f\u7406\uff0c\u5206\u5225\u5c07\u4e09</td></tr><tr><td colspan=\"3\">\u7a2e\u8072\u5b78\u7279\u5fb5:\u57fa\u983b(Fundamental Frequency)\u3001\u983b\u8b5c\u5305\u7d61(Spectral envelope)\u4ee5\u53ca\u975e\u9031\u671f\u5e8f\u5217</td></tr><tr><td colspan=\"3\">(Aperiodic parameter)\u5c0d\u61c9\u5230\u4e86\u4eba\u767c\u8072\u6a5f\u7406\u7684\u7d93\u5178\u6e90-\u6ffe\u6ce2\u5668(source-filter)\u6a21\u578b\uff0c\u6700\u5f8c\u4e26\u5229 , 2016)\uff0c\u662f\u4f7f\u7528 \u5728\u6a21\u578b\u8a13\u7df4\u5b8c\u7562\u4e4b\u5f8c\uff0c\u6211\u5011\u5f9e\u8cc7\u6599\u96c6\u88e1\u9762\u96a8\u6a5f\u9078\u53d6\u4e86\u4e94\u53e5\u8207\u8a13\u7df4\u8cc7\u6599\u4e0d\u91cd\u8907\u7684\u6587\u672c\u9032\u884c \u7528\u9019\u4e9b\u53c3\u6578\u91cd\u5efa\u8a9e\u97f3\uff0c\u5982\u5716 3\u3001\u5716 4\u3002\u800c\u5728\u7279\u5fb5\u63d0\u53d6\u7684\u904e\u7a0b\uff0c\u6211\u5011\u5148\u662f\u900f\u904e DIO \u6f14\u7b97\u6cd5 Auto-regression \u7684\u65b9\u5f0f\u751f\u6210\u97f3\u983b\uff0c\u5373\u6bcf\u5728\u9810\u6e2c\u7576\u524d\u6642\u523b\u7684\u503c\u6642\u90fd\u662f\u6839\u64da\u524d\u4e00\u6642\u523b\u7684\u8f38\u51fa \u8a55\u4f30\uff0c\u53d7\u6e2c\u4eba\u54e1\u4e00\u5171\u5341\u4f4d\u3002\u53d7\u6e2c\u6e96\u5247\u5982\u4e0b:\u6bcf\u4eba\u807d\u6e2c\u4e94\u7a2e\u4e0d\u540c\u53e5\u5b50\uff0c\u6bcf\u7a2e\u53e5\u5b50\u5404\u5305\u542b\u56db\u500b \u63d0\u53d6\u51fa\u57fa\u983b\uff0c\u7136\u5f8c\u5229\u7528\u57fa\u983b\u4e2d\u7684 CheapTrick \u63d0\u53d6\u5305\u7d61\uff0c\u6700\u5f8c\u900f\u904e D4C \u5c07\u5f97\u5230\u5f8c\u7684\u57fa\u983b \u7d50\u679c\u3002\u6a21\u578b\u67b6\u69cb\u4e3b\u8981\u662f\u7531\u56e0\u679c\u5377\u7a4d(Causal Convolution)\u7d44\u6210\uff0c\u800c\u70ba\u4e86\u80fd\u5728\u6642\u57df\u4e0a\u7372\u5f97\u66f4 & Catanzaro, 2018)\u3002\u5118\u7ba1\u81ea\u56de\u6b78\u6a21\u578b\u5728\u8a31\u591a\u5be6\u9a57\u4e0a\u5f97\u5230\u4e86\u5f88\u597d\u7684\u6548\u679c\uff0c\u4f46\u9019\u7a2e\u4e00\u6b21\u751f\u6210 2017)\u7684\u524d\u7aef\u9810\u6e2c\u7db2\u8def\uff0c\u4e26\u91dd\u5c0d\u591a\u7a2e\u4e0d\u540c\u7684\u8072\u78bc\u5668\u9032\u884c\u5be6\u6e2c\u3001\u63a2\u8a0e\u3002 et al., 2017)\u6a21\u578b\u9032\u800c\u5be6\u73fe\u795e\u7d93\u7db2\u8def\u4e4b\u8a13\u7df4\u3002 \u751f\u6210\u5c0d\u6297\u7db2\u8def(GAN)\u4ee5\u53ca\u57fa\u65bc\u6d41\u7684\u751f\u6210\u6a21\u578b(Flow-based generative model) (Prenger, Valle \u5c1a\u6709\u5927\u5e45\u5ea6\u7684\u8abf\u6574\u7a7a\u9593\u3002\u800c\u70ba\u4e86\u4f7f\u7cfb\u7d71\u80fd\u5920\u5373\u6642\u5408\u6210\uff0c\u6211\u5011\u6839\u64da\u4e86 Tacotron2 (Shen et al., \u964d\u81f3 5 \u7dad\uff0c\u57fa\u983b\u90e8\u5206\u5247\u4fdd\u6301\u4e0d\u8b8a\uff0c\u6700\u5f8c\u518d\u5c07\u4e0a\u8ff0\u7279\u5fb5\u7dad\u5ea6\u9023\u63a5\u8d77\u4f86\uff0c\u8f38\u5165 Tacotron2 (Shen \u96a8\u8457\u795e\u7d93\u7db2\u8def\u7684\u767c\u5c55\uff0c\u76ee\u524d\u5e38\u4f7f\u7528\u5230\u7684\u751f\u6210\u6a21\u578b\u53ef\u6709:\u81ea\u56de\u6b78\u6a21\u578b(Autoregressive model)\u3001 \u64da\u6211\u5011\u7684\u8a55\u4f30\uff0c\u5f97\u82b1\u8cbb\u6578\u5341\u79d2\u7684\u5408\u6210\u6642\u9593\u624d\u80fd\u751f\u6210\u4e00\u79d2\u9418\u7684\u97f3\u983b\uff0c\u82e5\u8981\u4f5c\u70ba\u5be6\u969b\u61c9\u7528\uff0c \u7684 MFCC \u964d\u81f3 60 \u7dad\uff0c\u63a5\u8457\u5c07\u975e\u9031\u671f\u5e8f\u5217\u8f49\u8b8a\u6210 Band \u975e\u9031\u671f\u4fe1\u865f\uff0c\u6b64\u6b65\u9a5f\u53ef\u6709\u6548\u5c07\u7dad\u5ea6 2.3.3 WaveGlow \u771f\u7684\u4eba\u985e\u8a9e\u97f3\uff0c\u4e5f\u5728\u8a9e\u97f3\u5408\u6210\u4e0a\u9054\u5230\u4e86\u5f88\u597d\u7684\u6548\u679c\uff0c\u4f46\u7f8e\u4e2d\u4e0d\u8db3\u7684\u537b\u662f\u5176\u751f\u6210\u901f\u7387\u3002\u6839 \u58d3\u529b\u3002\u800c\u900f\u904e Merlin \u5de5\u5177\u5305\u53ef\u5e6b\u52a9\u5be6\u73fe\u7dad\u5ea6\u7684\u8f49\u63db\u3002\u4ee5\u6211\u5011\u7684\u5be6\u9a57\u70ba\u4f8b\uff0c\u9996\u5148\u5c07\u63d0\u53d6\u5230 \u5982\u5716 2\uff0c\u7576\u5c64\u6578\u758a\u52a0\uff0c\u611f\u77e5\u80fd\u529b\u5c31\u4ee5\u6307\u6578\u6027\u6210\u9577\u3002\u96d6\u7136\u9019\u6a23\u7684\u6a21\u578b\u67b6\u69cb\u80fd\u5920\u91cd\u73fe\u6975\u70ba\u903c \u7dad\u5ea6\u7684\u983b\u8b5c\u5305\u7d61\u4ee5\u53ca\u975e\u9031\u671f\u4fe1\u865f\u6211\u5011\u5fc5\u9808\u5148\u5c07\u5176\u964d\u7dad\uff0c\u4ee5\u7de9\u89e3\u795e\u7d93\u7db2\u8def\u8a13\u7df4\u6642\u6240\u5e36\u4f86\u7684 \u5ee3\u7684\u611f\u77e5\u80fd\u529b\uff0c\u6a21\u578b\u4e2d\u52a0\u5165\u4e86\u64f4\u5927\u5377\u7a4d(Dilated Causal Convolution) (Yu & Koltun, 2015)\uff0c \u8207\u5305\u7d61\u8a08\u7b97\u51fa\u975e\u9031\u671f\u4fe1\u865f\u3002\u4e0d\u904e\u70ba\u4e86\u8207 Tacotron2 (Shen et al., 2017)\u8a13\u7df4\u505a\u7d50\u5408\uff0c\u9019\u7a2e\u9ad8 \u97f3\u6a94\uff0c\u5206\u5225\u4f86\u81ea\u771f\u5be6\u6578\u64da\u4ee5\u53ca World-Vocoder</td></tr></table>", |
| "text": "\u7684\u53ef\u9006\u5377\u7a4d\u4ee5\u53ca WN \u4e2d\u8a2d\u6709 8 \u5c64\u7684 dilated convolutions\uff0c\u540c\u6642\u6839\u64da\u6211\u5011\u7684\u8cc7\u6599 \u96c6\u8abf\u6574\u4e86\u8d85\u53c3\u6578\u3002\u8a13\u7df4\u8cc7\u6599\u70ba 48kHz \u7684\u97f3\u983b\uff0c\u6211\u5011\u5c07 160 \u7dad\u7684\u6885\u723e\u983b\u8b5c\u4f5c\u70ba\u8f38\u5165\u4ee5\u53ca", |
| "type_str": "table", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td/><td/><td>\u912d\u5b89\u5091\u8207\u9673\u5609\u5e73</td></tr><tr><td>\u8868 4. Model</td><td colspan=\"3\">Mean Opinion Score(MOS)</td></tr><tr><td>World-Vocoder</td><td/><td>2.71</td><td/></tr><tr><td>Griffin-Lim</td><td/><td>3.15</td><td/></tr><tr><td>WaveGlow</td><td/><td>4.08</td><td/></tr><tr><td>Ground Truth</td><td/><td>4.41</td><td/></tr><tr><td colspan=\"2\">3.2 \u63a8\u65b7\u901f\u5ea6 Speed of Inference</td><td/><td/></tr><tr><td colspan=\"4\">\u4e00\u500b\u9ad8\u8cea\u91cf\u7684\u97f3\u983b\u81f3\u5c11\u9700\u8981\u64c1\u6709 16kHz \u7684\u63a1\u6a23\u9ede\u3002\u800c\u6211\u5011\u7684\u5be6\u9a57\u5f9e\u524d\u7aef\u9810\u6e2c\u7db2\u8def\u5230\u5f8c\u7aef</td></tr><tr><td colspan=\"4\">\u751f\u6210\u8a9e\u97f3\u4e0d\u50c5\u90fd\u7b26\u5408\u4e86\u6a19\u6e96\uff0c\u751a\u81f3\u90fd\u5c55\u73fe\u4e86\u6bd4\u5be6\u6642\u5408\u6210\u9084\u8981\u5feb\u7684\u901f\u5ea6\u3002\u8a13\u7df4\u5b8c\u7684\u6a21\u578b\u6211</td></tr><tr><td colspan=\"4\">\u5011\u7d71\u4e00\u653e\u5230\u4e86 GeForce RTX 2080 TI GPU \u4e0a\u9032\u884c\u63a8\u6e2c\u3002\u4ee5\u5408\u6210\u4e00\u500b\u5341\u79d2\u4e14 48kHz \u7684\u97f3\u983b</td></tr><tr><td colspan=\"4\">\u4f86\u8aaa\uff0c\u6211\u5011\u5206\u5225\u5728 World-Vocoder (MORISE et al., 2016) : 6 \u79d2\uff0cGriffin-Lim (Griffin & Lim,</td></tr><tr><td colspan=\"4\">1984) : 1.2 \u79d2\uff0cWaveGlow (Prenger et al., 2018) : 1.4 \u79d2\u7684\u6642\u9593\u5167\u5b8c\u6210\u4e86\u63a8\u65b7\u3002\u53e6\u5916\uff0c\u6211\u5011</td></tr><tr><td colspan=\"4\">\u4e5f\u6574\u7406\u4e86 Tacotron2 (Shen et al., 2017)\u9810\u6e2c\u7db2\u8def\u642d\u914d\u4e0d\u540c\u8072\u78bc\u5668\u5728\u540c\u4e00\u53f0\u6a5f\u5668\u4e0a\u5206\u5225\u6240\u4f54</td></tr><tr><td colspan=\"4\">\u7528\u7684\u8cc7\u6e90\uff0c\u96d6\u7136 WaveGlow (Prenger et al., 2018) \u5728\u63a8\u6e2c\u6642\u9593\u4e0a\u5c55\u73fe\u4e86\u512a\u7570\u7684\u5408\u6210\u901f\u5ea6\uff0c</td></tr><tr><td colspan=\"4\">\u4f46\u7531\u8868 5 \u53ef\u770b\u51fa\u5176\u6240\u4f54\u64da\u7684\u8cc7\u6e90\u4e5f\u76f8\u7576\u9ad8\uff0c\u8aaa\u660e\u4e86\u6211\u5011\u7684\u6a21\u578b\u4ecd\u6709\u512a\u5316\u7684\u53ef\u80fd\u3002</td></tr><tr><td colspan=\"2\">\u8868 5. Tacotron2 \u7d50\u5408\u4e0d\u540c\u8072\u78bc\u5668\u6240\u4f54\u7528\u7684\u8a08\u7b97\u8cc7\u6e90</td><td/><td/></tr><tr><td colspan=\"4\">[Table 5. Tacotron2 combines computing resources used by different vocoders]</td></tr><tr><td>Intel(R) Xeon(R) Gold 5118 CPU @ 2.30GHz/64GB/RTX 2080TI</td><td>World-Vocoder</td><td>Griffin-Lim</td><td>WaveGlow</td></tr><tr><td>GPU \u4f7f\u7528\u91cf(GB)</td><td>0.93G</td><td>1.31G</td><td>2.5G</td></tr><tr><td>CPU \u4f7f\u7528\u7387(%)</td><td>6.75%</td><td>7.5%</td><td>14.3%</td></tr><tr><td>MEM \u4f7f\u7528\u7387(%)</td><td>2.45%</td><td>3.25%</td><td>5%</td></tr><tr><td>4. \u7d50\u8ad6 (Conclusions)</td><td/><td/><td/></tr><tr><td colspan=\"4\">Griffin-Lim \u90fd\u5df2\u958b\u767c\u4e00\u6bb5\u6642\u9593\uff0c\u4f46\u5728\u97f3\u983b\u7684\u9084\u539f\u5ea6\u4e0a\u4ecd\u9060\u4e0d\u53ca\u8fd1\u671f\u8208\u8d77\u7684\u795e\u7d93\u7db2\u8def\u5f0f\u5408</td></tr><tr><td colspan=\"4\">\u4e09\u7a2e\u4e0d\u540c\u8072\u78bc\u5668\u5408\u6210\u7684\u97f3\u6a94\u3002\u5728\u6bcf\u4e00\u53e5\u807d \u6210\u5668\uff0c\u4e14 WaveGlow (Prenger et al., 2018)\u4e0d\u50c5\u5728\u97f3\u8cea\u7684\u9084\u539f\u5ea6\u4ea6\u6216\u662f\u5408\u6210\u7684\u901f\u5ea6\u4e0a(\u5728 \u5b8c\u5f8c\uff0c\u90fd\u7d66\u4e88\u4e00\u5230\u4e94\u5206\u7684\u4e3b\u89c0\u5206\u6578\uff0c\u7e3d\u8a08\u5f8c\u518d\u5e73\u5747\u8a08\u7b97\u3002\u800c\u6574\u500b\u904e\u7a0b\u5171\u5305\u542b\u4e94\u7a2e\u53e5\u5b50\u4ee5 2080TI \u4e0a\uff0c\u4e00\u79d2\u7d04\u83ab\u53ef\u751f\u6210 350kHz \u4ee5\u4e0a\u7684\u63a1\u6a23\u9ede)\u90fd\u7d66\u4e88\u4e86\u6211\u5011\u4e0d\u932f\u7684\u5c55\u793a\u3002\u4f46\u5c31\u9577\u671f \u53ca\u4e8c\u5341\u500b\u4e0d\u91cd\u8907\u7684\u97f3\u6a94\uff0c\u6e2c\u8a66\u7d50\u679c\u5982\u8868 4\u3002 \u800c\u8a00\uff0c\u6211\u5011\u7684\u6a21\u578b\u9084\u6709\u591a\u7a2e\u53ef\u80fd\u7684\u512a\u5316\u65b9\u5f0f\uff0c\u50cf\u662f\u6211\u5011\u4f7f\u7528\u7684\u4e2d\u6587\u8cc7\u6599\u96c6\u898f\u6a21\u8f03\u5c0f\uff0c\u5118</td></tr><tr><td colspan=\"4\">\u7ba1\u7d93\u904e\u4e86\u65b7\u8a5e\u7cfb\u7d71\u7684\u8abf\u6574\uff0c\u4ecd\u6709\u90e8\u5206\u8a9e\u53e5\u7121\u6cd5\u826f\u597d\u7684\u5448\u73fe\u4eba\u985e\u7684\u81ea\u7136\u8a9e\u97f3\u3002\u65e5\u5f8c\u9664\u4e86\u6536</td></tr><tr><td colspan=\"4\">\u96c6\u66f4\u5b8c\u6574\u7684\u8a9e\u6599\u5eab\u4e4b\u5916\uff0c\u5728\u9810\u6e2c\u7db2\u8def\u90e8\u5206\u52a0\u5165\u60c5\u7dd2\u8fa8\u8b58\u4f5c\u70ba\u689d\u4ef6\uff0c\u4f7f\u5f97\u5408\u6210\u7684\u97f3\u983b\u66f4\u751f</td></tr><tr><td>\u52d5\u66f4\u6709\u6eab\u5ea6\u4e5f\u662f\u6211\u5011\u7684\u4efb\u52d9\u4e4b\u4e00\u3002</td><td/><td/><td/></tr></table>", |
| "text": "\u6211\u5011\u7684\u7814\u7a76\u76ee\u524d\u5728\u8072\u78bc\u5668\u4e0a\u5617\u8a66\u4e86\u591a\u7a2e\u53ef\u80fd\uff0c\u5305\u542b:World-Vocoder (MORISE et al., 2016) \u3001 Griffin-Lim(Griffin & Lim, 1984) \u4ee5\u53ca WaveGlow(Prenger et al., 2018) \uff0c\u4e26\u5c07\u9019\u4e9b\u5408\u6210 \u6280\u8853\u90fd\u5957\u7528\u5728\u6211\u5011\u7684\u9810\u6e2c\u7db2\u8def\u4e0a\u3002\u5f9e\u6211\u5011\u7684\u7814\u7a76\u4f86\u770b\uff0c\u6211\u5011\u767c\u73fe\u5118\u7ba1 World-Vocoder \u53ca", |
| "type_str": "table", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |