| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:27:35.464359Z" |
| }, |
| "title": "Incorporating Speaker Embedding and Post-Filter Network for Improving Speaker Similarity of Personalized Speech Synthesis System", |
| "authors": [ |
| { |
| "first": "\u738b\u8056\u582f", |
| "middle": [ |
| "\uf02a" |
| ], |
| "last": "\u3001\u9ec3\u5955\u6b3d", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Pingtung University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sheng-Yao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Pingtung University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yi-Chin", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Pingtung University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In recent years, speech synthesis system can generate speech with high speech quality. However, multi-speaker text-to-speech (TTS) system still require large amount of speech data for each target speaker. In this study, we would like to construct a multi-speaker TTS system by incorporating two sub modules into artificial neural network-based speech synthesis system to alleviate this problem. First module is to add the speaker embedding into encoding module of the end-toend TTS framework while using small amount of the speech data of the training speakers. For speaker embedding method, in our study, two speaker embedding methods, namely speaker verification embedding and voice conversion embedding, are compared for deciding which one is suitable for the personalized TTS system. Besides, we substituted the conventional post-net module, which is conventionally adopted to enhance the output spectrum sequence, to a post-filter network, which is further improving the speech quality of the generated speech utterance. Finally, experiment results showed that the speaker embedding is useful by adding it into encoding module and the resultant speech utterance indeed perceived as the target speaker. Also, the post-filter network not only improving the speech quality and also enhancing the speaker similarity of the generated speech utterances. The constructed TTS system can generate a speech utterance of the target speaker in fewer than 2 seconds. In the future, other feature such as prosody information will be incorporated to help the TTS framework to improve the performance.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In recent years, speech synthesis system can generate speech with high speech quality. However, multi-speaker text-to-speech (TTS) system still require large amount of speech data for each target speaker. In this study, we would like to construct a multi-speaker TTS system by incorporating two sub modules into artificial neural network-based speech synthesis system to alleviate this problem. First module is to add the speaker embedding into encoding module of the end-toend TTS framework while using small amount of the speech data of the training speakers. For speaker embedding method, in our study, two speaker embedding methods, namely speaker verification embedding and voice conversion embedding, are compared for deciding which one is suitable for the personalized TTS system. Besides, we substituted the conventional post-net module, which is conventionally adopted to enhance the output spectrum sequence, to a post-filter network, which is further improving the speech quality of the generated speech utterance. Finally, experiment results showed that the speaker embedding is useful by adding it into encoding module and the resultant speech utterance indeed perceived as the target speaker. Also, the post-filter network not only improving the speech quality and also enhancing the speaker similarity of the generated speech utterances. The constructed TTS system can generate a speech utterance of the target speaker in fewer than 2 seconds. In the future, other feature such as prosody information will be incorporated to help the TTS framework to improve the performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "\u5c31\u55ae\u4e00\u8a9e\u8005\u7684\u8a9e\u97f3\u5408\u6210\u6280\u8853\u4f86\u770b\uff0c\u5176\u5408\u6210\u6280\u8853\u5df2\u7d93\u80fd\u5920\u5408\u6210\u51fa\u903c\u771f\u4e14\u81ea\u7136\u7684\u8a9e\u97f3\uff0c\u4e26\u4e14 \u4e0d\u9700\u8981\u592a\u591a\u7684\u8a9e\u97f3\u6578\u64da\u53ca\u8a13\u7df4\u6642\u9593\uff0c\u800c\u70ba\u4e86\u64f4\u5c55\u5230\u5176\u4ed6\u8a9e\u8005\uff0c\u5e38\u898b\u7684\u65b9\u6cd5\u6709\u8a9e\u97f3\u8f49\u63db\u548c \u6a21\u578b\u81ea\u9069\u61c9\u5169\u7a2e\u65b9\u6cd5\uff1a \uf0b7 \u8a9e\u97f3\u8f49\u63db\uff1a \u900f\u904e\u66f4\u63db\u4e0d\u540c\u8a9e\u8005\u8a0a\u606f\u4f86\u9054\u6210\u76ee\u6a19\uff0c\u6709\u57fa\u65bc GAN \u7684 StarGAN-VC (Kameoka et al., 2018) \u548c CyCleGAN-VC (Kaneko et al., 2018) \u7b49\u65b9\u6cd5\uff0c\u4e5f\u6709\u57fa\u65bc AutoEncoder \u7684 AdaIN-VC (Chou et al., 2019 ) \u548c AutoVC (Qian et al., 2019 (Jia et al., 2018) \u548c (Chien et al., 2021) \u7b49\u7814\u7a76\uff0c\u5c07\u8a9e\u97f3\u8f49\u63db\u6216\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u7a2e\u65b9\u6cd5\u53d6\u4ee3\u6a21\u578b\u81ea\u9069\u61c9\u4e2d\u7684 Speaker ID Table \u4f86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u5c07\u6bd4\u8f03\u5206\u5225\u4f7f\u7528\u8a9e \u97f3\u8f49\u63db\u548c\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u7a2e\u4efb\u52d9\u6240\u8a2d\u8a08\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4f5c\u70ba\u6211\u5011 TTS \u7cfb\u7d71\u4e2d\u8a9e\u8005\u7684\u8868\u793a\u65b9 \u5f0f\uff0c\u4e26\u6bd4\u8f03\u4f55\u8005\u5c0d\u65bc\u6211\u5011\u63d0\u51fa\u7684\u67b6\u69cb\u66f4\u5408\u9069\u3002 \u6211\u5011\u7684 TTS \u67b6\u69cb\u662f\u57fa\u65bc Google \u6240\u63d0\u51fa\u7684\u81ea\u56de\u6b78\u6a21\u578b Tacotron 2 (Shen et al., 2018) \uff0c \u5b83\u7531\u4e09\u500b\u795e\u7d93\u7db2\u8def\u5340\u584a\u7d44\u6210\uff0c\u6bcf\u500b\u5340\u584a\u90fd\u6709\u660e\u78ba\u7684\u76ee\u7684\u4ee5\u4fbf\u6211\u5011\u9032\u884c\u6539\u52d5\uff1a", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 156, |
| "text": "(Kameoka et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 171, |
| "end": 192, |
| "text": "(Kaneko et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 225, |
| "end": 243, |
| "text": "(Chou et al., 2019", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 255, |
| "end": 273, |
| "text": "(Qian et al., 2019", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 274, |
| "end": 292, |
| "text": "(Jia et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 295, |
| "end": 315, |
| "text": "(Chien et al., 2021)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 499, |
| "end": 518, |
| "text": "(Shen et al., 2018)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 356, |
| "end": 393, |
| "text": "Table \u4f86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u5c07\u6bd4\u8f03\u5206\u5225\u4f7f\u7528\u8a9e", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "\u7dd2\u8ad6 (Introduction)", |
| "sec_num": "1." |
| }, |
| { |
| "text": "\uf0b7 \u7de8\u78bc\u5668: \u5c07\u8f38\u5165\u7684\u6587\u5b57\u7de8\u78bc\u6210\u4e00\u7a2e\u6f5b\u5728\u8868\u793a\uff0c\u901a\u5e38\u70ba\u4e86\u4f7f\u6a21\u578b\u64f4\u5c55\u5230\u591a\u8a9e\u8005\uff0c\u6703\u5c07\u6587\u5b57 \u6f5b\u5728\u8868\u793a\u8207\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4e32\u63a5\u3002 \uf0b7 \u89e3 \u78bc \u5668 : \u65bc \u8a13 \u7df4 \u671f \u9593 \uff0c \u5c07 \u6587 \u5b57 \u6f5b \u5728 \u8868 \u793a \u8207 \u76ee \u6a19 \u983b \u8b5c \u7684 \u6bcf \u500b \u97f3 \u6846 \u5efa \u7acb \u6ce8 \u610f \u529b \u5c0d \u9f4a (Chorowski et al., 2015)\uff0c\u65bc\u63a8\u8ad6\u671f\u9593\uff0c\u4f9d\u64da\u7576\u524d\u97f3\u6846\u8207\u6587\u5b57\u6f5b\u5728\u8868\u793a\u63a8\u6e2c\u51fa\u4e0b\u4e00\u500b\u97f3\u6846 \u7684\u503c\uff0c\u76f4\u81f3\u6ce8\u610f\u529b\u6a5f\u5236\u5c0d\u9f4a\u5230\u505c\u6b62\u7b26\u865f (\u4f8b\u5982\uff1a\u6587\u5b57\u4e2d\u7684\u53e5\u9ede)\u70ba\u6b62\u3002 \uf0b7 Post-Net: \u63d0\u5347\u6574\u9ad4\u983b\u8b5c\u7684\u54c1\u8cea\u3002 Tacotron 2 \u7684\u6a21\u578b\u67b6\u69cb\u5982\u5716 1 \u6240\u793a\uff1a \u5716 1. Tacotron 2 \u6a21\u578b\u67b6\u69cb", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7dd2\u8ad6 (Introduction)", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Tacotron 2 \u6574\u9ad4\u67b6\u69cb\u5c0d\u65bc\u76ee\u524d\u795e\u7d93\u7db2\u8def\u7684\u6280\u8853\u4f86\u8aaa\u662f\u76f8\u5c0d\u820a\u7684\uff0c\u96a8\u8457 Self-Attention (Vaswani et al., 2017) (Li et al., 2019) \u548c Fastspeech 2 (Ren et al., 2020) \uff1b\u4e5f\u6709\u8457\u5404\u7a2e\u6ce8\u610f\u529b\u6a5f\u5236\u7684\u65b9\u6cd5\u88ab\u63d0\u51fa\uff0c\u4ee5\u6539\u5584 Tacotron 2 \u820a\u6709\u6ce8\u610f\u529b\u6a5f\u5236\u8a13\u7df4\u901f\u5ea6\u6162\u6216\u662f\u8f03\u9577\u7684\u53e5\u5b50\u6703\u767c\u751f\u6f0f\u5b57\u6216\u91cd\u8907\u767c\u97f3\u7684\u554f\u984c\uff0c\u5982 Forward Attention (Zhang et al., 2018) \u53ca Dynamic Convolution Attention (Battenberg et al., 2020) ", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 74, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 75, |
| "end": 92, |
| "text": "(Li et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 108, |
| "end": 126, |
| "text": "(Ren et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 213, |
| "end": 233, |
| "text": "(Zhang et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 266, |
| "end": 291, |
| "text": "(Battenberg et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "[Figure 1. Tacotron 2 model architecture]", |
| "sec_num": null |
| }, |
| { |
| "text": "\u5927\u91cf\u88ab\u904b\u7528\u65bc\u8a9e\u97f3\u5408\u6210\u7684\u4efb\u52d9\u4e0a\uff0c\u6539\u5584\u4e86\u5982 Tacotron 2 \u56e0\u4f7f\u7528 RNN \u795e \u7d93 \u7db2 \u8def \u9700 \u8981 \u4f9d \u7167 \u9806 \u5e8f \u50b3 \u64ad \u7684 \u5927 \u91cf \u8a08 \u7b97 \uff0c \u5982 Transformer TTS", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "[Figure 1. Tacotron 2 model architecture]", |
| "sec_num": null |
| }, |
| { |
| "text": "\u6211\u5011\u4f7f\u7528 Learnable Dictionary Encoding (Cooper et al., 2020 ) \u7c21\u7a31 LDE\uff0c\u4f5c\u70ba\u672c\u6b21\u7814\u7a76\u7684 \u8a9e\u8005\u8fa8\u8b58\u6a21\u578b\uff0c\u5b83\u662f\u57fa\u65bc X-Vector (Snyder et al., 2018 \u5f9e\u5716 11 \u7684\u7d05\u7dda\u5206\u6bb5\u8655\uff0c\u6211\u5011\u767c\u73fe Bahdanau Attention \u63d0\u4f9b\u4e86\u6bcf\u6bb5\u8a9e\u97f3\u5927\u6982\u7684\u97f3\u6846\u7bc4\u570d\uff0c \u5716\u4e2d\u865b\u7dda\u5de6\u53f3\u8655\u5206\u5225\u662f\"qi4\"\u8ddf\"hen2\"\u7684\u767c\u97f3\uff0c\u7531\u65bc\u5b83\u5011\u4e3b\u8981\u90fd\u662f\u6c23\u97f3\uff0c\u5c0e\u81f4\u5206\u6bb5\u6c92 \u6709\u5f88\u660e\u986f\uff0c\u800c\u6700\u5de6\u5074\u53ca\u6700\u53f3\u5074\u5c0d\u7a31\u6027\u7684\u689d\u7d0b\u53ef\u4ee5\u5224\u65b7\u70ba\u7a7a\u683c\u8cc7\u8a0a\uff0c\u5373\u8a72\u7247\u6bb5\u662f\u975c\u97f3\u7684\u3002 ", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 55, |
| "text": "(Cooper et al., 2020", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 94, |
| "end": 114, |
| "text": "(Snyder et al., 2018", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u8a9e\u8005\u8fa8\u8b58 (Voice Verification)\u4efb\u52d9", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ") \u6240\u505a\u7684\u6539\u9032\uff0c\u4e26\u4e14\u5728\u8a9e\u8005\u8fa8\u8b58\u7684 \u4efb\u52d9\u4e0a\u4ee5\u53ca\u591a\u8a9e\u8005 TTS \u7cfb\u7d71\u4e0a\u7686\u662f\u512a\u65bc X-Vector \u7684\u3002 X-Vector \u7684\u904b\u4f5c\u65b9\u5f0f\u662f\u5c07\u6574\u500b\u8a9e\u97f3\u5206\u6210\u6578\u500b\u7247\u6bb5\u4e26\u900f\u904e\u6578\u5c64\u5377\u7a4d\u8a08\u7b97\u5176\u8f38\u51fa\u7279\u5fb5\uff0c \u518d\u5c07\u6240\u6709\u7279\u5fb5\u53d6\u5e73\u5747\u8207\u6a19\u6e96\u5dee\u901a\u904e\u7dda\u6027\u8f49\u63db\u4f86\u8a08\u7b97\u8a72\u8a9e\u8005\u7684\u5d4c\u5165\u5411\u91cf\u3002LDE \u8207 X-Vector \u4e0d\u540c\u7684\u5730\u65b9\u662f LDE \u5f15\u5165\u4e86\u6578\u500b Dictionary Clusters\uff0c\u9019\u4e9b Clusters \u662f\u9700\u8981\u900f\u904e\u795e\u7d93\u7db2\u8def\u53bb \u5b78\u7fd2\u7684\uff0c\u5b83\u5011\u4ee3\u8868\u67d0\u4e9b\u8aaa\u8a71\u4eba\u7684\u7279\u5fb5\uff0cLDE \u4f7f X-Vector \u5f97\u5230\u7684\u8f38\u51fa\u7279\u5fb5\u8207\u6240\u6709 Clusters \u8a08\u7b97\u5f7c\u6b64\u5dee\u8ddd\u7684\u5e73\u5747\u503c\u8207\u6a19\u6e96\u5dee\u4f86\u5224\u65b7\u8a72\u8a9e\u97f3\u63a5\u8fd1\u54ea\u4e00\u500b Clusters\uff0c\u7136\u5f8c\u518d\u9032\u4e00\u6b65\u8b93\u795e \u7d93\u7db2\u8def\u5224\u65b7\u8a72\u8a9e\u8005\u7684\u5d4c\u5165\u5411\u91cf\uff0c\u5176\u6a21\u578b\u67b6\u69cb\u5982\u5716 3 \u6240\u793a\uff1a \u5716 3. LDE \u6a21\u578b\u67b6\u69cb [Figure 3. LDE", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u8a9e\u8005\u8fa8\u8b58 (Voice Verification)\u4efb\u52d9", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u6539\u9032\u4e86\u591a\u8a9e\u8005 Tacotron 2 \u7684\u67b6\u69cb\uff0c\u900f\u904e\u52a0\u5165\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4fbf\u53ef\u5408\u6210 \u672a\u77e5\u8a9e\u8005\u7684\u8a9e\u97f3\uff0c\u4e26\u4e14\u6bd4\u8f03\u8a9e\u97f3\u8f49\u63db\u8207\u8a9e\u8005\u8fa8\u8b58\u9019\u5169\u500b\u4e0d\u540c\u4efb\u52d9\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u7528\u65bc TTS \u7684\u6210\u6548\uff0c\u7531\u5be6\u9a57\u7d50\u679c\u5f97\u77e5\u8a9e\u97f3\u8f49\u63db\u7684\u6548\u679c\u662f\u512a\u65bc\u8a9e\u8005\u8fa8\u8b58\u7684\uff0c\u4f7f\u7528 Post-Filter \u4f86\u63d0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u7d50\u8ad6 (Conclusion)", |
| "sec_num": "5." |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Location-relative attention mechanisms for robust long-form speech synthesis", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Battenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Skerry-Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mariooryad", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Stanton", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kao", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Shannon", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Bagby", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6194--6198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Battenberg, E., Skerry-Ryan, R. J., Mariooryad, S., Stanton, D., Kao, D., Shannon, M., & Bagby, T. (2020). Location-relative attention mechanisms for robust long-form speech synthesis. In Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 6194-6198.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Investigating on incorporating pretrained and learnable speaker representations for multi-speaker multistyle text-to-speech", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "M" |
| ], |
| "last": "Chien", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "H" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "C" |
| ], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "8588--8592", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chien, C. M., Lin, J. H., Huang, C. Y., Hsu, P. C., & Lee, H. Y. (2021). Investigating on incorporating pretrained and learnable speaker representations for multi-speaker multi- style text-to-speech. In Proceedings of ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 8588-8592.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Attention-based models for speech recognition", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Serdyuk", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.07503" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chorowski, J., Bahdanau, D., Serdyuk, D., Cho, K., & Bengio, Y. (2015). Attention-based models for speech recognition. arXiv preprint arXiv:1506.07503.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "One-shot voice conversion by separating speaker and content representations with instance normalization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Chou", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "C" |
| ], |
| "last": "Yeh", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.05742" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chou, J. C., Yeh, C. C., & Lee, H. Y. (2019). One-shot voice conversion by separating speaker and content representations with instance normalization. arXiv preprint arXiv:1904.05742.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Zeroshot multi-speaker text-to-speech with state-of-the-art neural speaker embeddings", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "I" |
| ], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yasuda", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Yamagishi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6184--6188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cooper, E., Lai, C. I., Yasuda, Y., Fang, F., Wang, X., Chen, N., & Yamagishi, J. (2020). Zero- shot multi-speaker text-to-speech with state-of-the-art neural speaker embeddings. In Proceedings of ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 6184-6188.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Denoising diffusion probabilistic models", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ho", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Abbeel", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.11239" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ho, J., Jain, A., & Abbeel, P. (2020). Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Transfer learning from speaker verification to multispeaker text-to-speech synthesis", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "L" |
| ], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.04558" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia, Y., Zhang, Y., Weiss, R. J., Wang, Q., Shen, J., Ren, F., Chen, Z., Nguyen, P., Pang, R., Moreno, I. L., & Wu, Y. (2018). Transfer learning from speaker verification to multispeaker text-to-speech synthesis. arXiv preprint arXiv:1806.04558.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Stargan-vc: Non-parallel many-tomany voice conversion using star generative adversarial networks", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kameoka", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kaneko", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Tanaka", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Hojo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE Spoken Language Technology Workshop (SLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "266--273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kameoka, H., Kaneko, T., Tanaka, K., & Hojo, N. (2018). Stargan-vc: Non-parallel many-to- many voice conversion using star generative adversarial networks. In Proceedings of 2018 IEEE Spoken Language Technology Workshop (SLT), 266-273.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Cyclegan-vc: Non-parallel voice conversion using cycleconsistent adversarial networks", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kaneko", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kameoka", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 26th European Signal Processing Conference (EUSIPCO)", |
| "volume": "", |
| "issue": "", |
| "pages": "2100--2104", |
| "other_ids": { |
| "DOI": [ |
| "10.23919/EUSIPCO.2018.8553236" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaneko, T., & Kameoka, H. (2018). Cyclegan-vc: Non-parallel voice conversion using cycle- consistent adversarial networks. In Proceedings of 2018 26th European Signal Processing Conference (EUSIPCO), 2100-2104. https://doi.org/10.23919/EUSIPCO.2018.8553236", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.05646" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kong, J., Kim, J., & Bae, J. (2020). Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. arXiv preprint arXiv:2010.05646.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Diffwave: A versatile diffusion model for audio synthesis", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Ping", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Catanzaro", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.09761" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kong, Z., Ping, W., Huang, J., Zhao, K., & Catanzaro, B. (2020). Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Neural speech synthesis with transformer network", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6706--6713", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v33i01.33016706" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, N., Liu, S., Liu, Y., Zhao, S., & Liu, M. (2019, July). Neural speech synthesis with transformer network. In Proceedings of the AAAI Conference on Artificial Intelligence, 33(01), 6706-6713. https://doi.org/10.1609/aaai.v33i01.33016706", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Natural tts synthesis by conditioning wavenet on mel spectrogram predictions", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Skerrv-Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "A" |
| ], |
| "last": "Saurous", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Agiomvrgiannakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4779--4783", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8461368" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., Chen, Z., Zhang, Y., Wang, Y., Skerrv-Ryan, R., Saurous, R. A., Agiomvrgiannakis, Y., & Wu, Y. (2018). Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),4779-4783. https://doi.org/10.1109/ICASSP.2018.8461368", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "X-vectors: Robust dnn embeddings for speaker recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Snyder", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Garcia-Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Sell", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5329--5333", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8461375" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Snyder, D., Garcia-Romero, D., Sell, G., Povey, D., & Khudanpur, S. (2018). X-vectors: Robust dnn embeddings for speaker recognition. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5329-5333. https://doi.org/10.1109/ICASSP.2018.8461375", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Autovc: Zero-shot voice style transfer with only autoencoder loss", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hasegawa-Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 36th International Conference on Machine Learning(PMLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "5210--5219", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian, K., Zhang, Y., Chang, S., Yang, X., & Hasegawa-Johnson, M. (2019). Autovc: Zero-shot voice style transfer with only autoencoder loss. In Proceedings of the 36th International Conference on Machine Learning(PMLR), 5210-5219.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Fastspeech 2: Fast and high-quality end-to-end text to speech", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.04558" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ren, Y., Hu, C., Tan, X., Qin, T., Zhao, S., Zhao, Z., & Liu, T. Y. (2020). Fastspeech 2: Fast and high-quality end-to-end text to speech. arXiv preprint arXiv:2006.04558.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems(NIPS'17", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., & Polosukhin, I. (2017). Attention is all you need. In Proceedings of the 31st International Conference on Neural Information Processing Systems(NIPS'17), 5998-6008.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Forward attention in sequence-to-sequence acoustic modeling for speech synthesis", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "X" |
| ], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "R" |
| ], |
| "last": "Dai", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4789--4793", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2018.8462020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, J. X., Ling, Z. H., & Dai, L. R. (2018, April). Forward attention in sequence-to-sequence acoustic modeling for speech synthesis. In Proceedings of 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 4789-4793. https://doi.org/10.1109/ICASSP.2018.8462020", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "8. \u5167\u5916\u90e8\u5973\u6027\u8a9e\u8005\u7684\u8a9e\u8005\u7a7a\u9593\u3002 [Figure 8. Speaker space for inside and outside female speakers.] Figure 9. Speaker space for inside and outside male speakers.]", |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "\u7b49\u65b9\u6cd5\uff0c\u5b83\u5011\u90fd\u6709\u76f8\u7576\u4e0d\u932f \u7684\u6548\u679c\uff0c\u552f\u4e00\u7684\u4fb7\u9650\u5c31\u662f\u50c5\u80fd\u66f4\u63db\u8a9e\u8005\u4e0d\u80fd\u66f4\u6539\u5167\u5bb9\u3002 \uf0b7 \u6a21\u578b\u81ea\u9069\u61c9\uff1a \u4e3b\u8981\u662f\u5728 TTS \u7cfb\u7d71\u4e2d\u52a0\u5165 Speaker ID Table \u4f86\u4f7f\u6a21\u578b\u80fd\u5920\u4f9d\u7167 Speaker ID \u751f\u6210\u5c0d\u61c9\u8a9e\u8005\u7684\u8072\u97f3\uff0c\u5b83\u65e2\u80fd\u66f4\u63db\u5167\u5bb9\u4e5f\u80fd\u66f4\u63db\u8a9e\u8005\uff0c\u4f46\u662f\u9700\u8981\u5927\u91cf\u4e0d\u540c\u8a9e\u8005\u7684\u8a9e\u97f3\u6578 \u64da\u4ee5\u53ca\u8f03\u591a\u7684\u8a13\u7df4\u6642\u9593\u4f86\u9054\u6210\u76ee\u6a19\uff0c\u4e14\u7121\u6cd5\u64f4\u5c55\u5230\u6c92\u770b\u904e\u7684\u8a9e\u8005\u3002 \u57fa\u65bc\u8a9e\u97f3\u8f49\u63db\u548c\u6a21\u578b\u81ea\u9069\u61c9\u5728\u591a\u8a9e\u8005 TTS \u4e0a\u7684\u4e0d\u8db3\uff0c\u65bc\u662f\u6709\u8457", |
| "html": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>\u3002</td></tr><tr><td>\u56e0\u6b64\uff0c\u6211\u5011\u5c07\u904b\u7528\u8fd1\u671f\u7684\u795e\u7d93\u7db2\u8def\u6280\u8853\u4f86\u66f4\u52d5 Tacotron 2 \u6a21\u578b\uff0c\u671f\u671b\u6a21\u578b\u8a13\u7df4\u901f\u5ea6\u52a0\u5feb\u3001</td></tr><tr><td>\u5408\u6210\u8a9e\u97f3\u54c1\u8cea\u7684\u63d0\u5347\u4ee5\u53ca\u52a0\u5f37\u5408\u6210\u591a\u8a9e\u8005\u8a9e\u97f3\u7684\u8a9e\u8005\u76f8\u4f3c\u5ea6\u3002</td></tr><tr><td>\u6211\u5011\u5c07\u5728\u7b2c\u4e8c\u7ae0\u7bc0\u95e1\u8ff0\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u6240\u7528\u5230\u7684\u8a9e\u97f3\u8f49\u63db\u53ca\u8a9e\u8005\u8fa8\u8b58\u6a21\u578b\uff0c\u5728\u7b2c\u4e09\u7ae0</td></tr><tr><td>\u7bc0\u95e1\u8ff0\u672c\u6b21\u7814\u7a76\u6539\u52d5 Tacotron 2 \u7684\u65b9\u6cd5\uff0c\u7b2c\u56db\u7ae0\u7bc0\u95e1\u8ff0\u5be6\u9a57\u7d50\u679c\uff0c\u6700\u5f8c\uff0c\u5728\u7b2c\u4e94\u7ae0\u7bc0\u95e1</td></tr><tr><td>\u8ff0\u672c\u6b21\u7814\u7a76\u7684\u7d50\u8ad6\u3002</td></tr><tr><td>\u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u4f7f\u7528 AdaIN-VC \u4f5c\u70ba\u672c\u6b21\u7814\u7a76\u7684\u8a9e\u97f3\u8f49\u63db\u6a21\u578b\uff0c\u96d6\u7136\u5982\u4e4b\u524d\u6240\u8ff0\uff0c</td></tr><tr><td>\u8a9e\u97f3\u8f49\u63db\u7684\u6a21\u578b\u6709\u5f88\u591a\u7a2e\uff0c\u4f46\u4e26\u4e0d\u662f\u90fd\u80fd\u63d0\u53d6\u51fa\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u5982 StarGan \u53ca CycleGAN</td></tr><tr><td>\u7b49 GAN \u6a21\u578b\u96d6\u7136\u4e5f\u662f\u8a9e\u97f3\u8f49\u63db\uff0c\u4f46\u5b83\u5011\u662f\u900f\u904e\u5728\u8a13\u7df4\u671f\u9593\u5224\u5225\u5668 (Discriminator) \u7684\u7d04</td></tr><tr><td>\u675f\uff0c\u4f7f\u5f97\u751f\u6210\u7684\u8a9e\u97f3\u63a5\u8fd1\u5167\u90e8\u8a9e\u8005\uff0c\u9019\u7121\u6cd5\u63d0\u53d6\u51fa\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff1b\u5c6c\u65bc AutoEncoder \u6a21</td></tr><tr><td>\u578b\u7684 AutoVC \u4e5f\u7121\u6cd5\u63d0\u53d6\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u56e0\u5b83\u662f\u5229\u7528\u7de8\u78bc\u5c64\u5c07\u8a9e\u8005\u8a0a\u606f\u53bb\u9664\uff0c\u4e26\u5728\u89e3\u78bc</td></tr><tr><td>\u5c64\u52a0\u5165 Speaker ID Table \u4f86\u9032\u884c\u8f49\u63db\u7684\uff0c\u800c AdaIN-VC (Adaptive Instance Normalization-</td></tr><tr><td>Voice Conversion) \u662f\u4e00\u7a2e\u5c07\u5716\u7247\u98a8\u683c\u8f49\u63db\u7684\u6280\u8853\u5957\u7528\u5230\u8a9e\u97f3\u8f49\u63db\u4e0a\u7684 VAE \u6a21\u578b\uff0c\u5b83\u900f</td></tr><tr><td>\u904e\u5169\u500b\u7de8\u78bc\u5c64\u5c07\u8a9e\u97f3\u7de8\u78bc\u6210\u8a9e\u8005\u6f5b\u5728\u8868\u793a\u53ca\u5167\u5bb9\u6f5b\u5728\u8868\u793a\uff0c\u4e26\u900f\u904e\u89e3\u78bc\u5c64\u7d44\u5408\u5169\u8005\u5f8c\u751f</td></tr><tr><td>\u6210\u8f49\u63db\u5f8c\u7684\u8a9e\u97f3\uff0c\u6211\u5011\u53ef\u4ee5\u85c9\u7531\u66f4\u63db\u8a9e\u8005\u6f5b\u5728\u8868\u793a\u4f86\u9054\u5230\u8a9e\u97f3\u8f49\u63db\u7684\u6548\u679c\uff0c\u5176\u6a21\u578b\u67b6\u69cb</td></tr><tr><td>\u5982\u5716 2 \u6240\u793a\uff1a</td></tr></table>", |
| "text": "", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td/><td/><td colspan=\"2\">\u738b\u8056\u582f\u8207\u9ec3\u5955\u6b3d</td></tr><tr><td colspan=\"5\">\u6211\u5011\u5229\u7528\u4e0a\u8ff0\u7684\u539f\u7406\uff0c\u5c07 Diffwave \u4fee\u6539\u6210\u983b\u8b5c\u9593\u7684\u8f49\u63db\u7684 Post-Filter\uff0c\u671f\u671b\u900f\u904e\u6dfb \u52a0\u566a\u97f3\u80fd\u4f7f\u751f\u6210\u7684\u983b\u8b5c\u6709\u8457\u66f4\u591a\u7684\u7d30\u7bc0\uff0c\u5176\u904b\u4f5c\u6d41\u7a0b\u5982\u5716 6\uff1a \u8868 3. \u8a08\u7b97 Post-Filter \u8207 Post-Net \u7684 MCD\uff0c\u503c\u8d8a\u5c0f\u8d8a\u597d\u3002 [Table 3. Calculate the MCD of Post-Filter and Post-Net, the smaller the value, the better.]</td></tr><tr><td/><td>Inside</td><td/><td>Outside</td><td/></tr><tr><td/><td>Men</td><td>Women</td><td>Men</td><td>Women</td></tr><tr><td>Post-Filter</td><td>6.99</td><td>7.30</td><td>8.15</td><td>8.65</td></tr><tr><td>Post-Net</td><td>7.31</td><td>7.98</td><td>9.20</td><td>9.11</td></tr><tr><td colspan=\"5\">Epochs\u3002 \u6211 \u5011 \u9084 \u4f7f \u7528 Resemblyzer \u5206 \u6790 \u5668 \u8a08 \u7b97 \u4e0d \u540c \u6027 \u5225 \u5728 \u8a9e \u97f3 \u8f49 \u63db \u4e0a \u7684 \u8a9e \u8005 \u7a7a \u9593 \uff0c</td></tr><tr><td colspan=\"5\">4.3 \u5be6\u9a57\u7d50\u679c (Results) Resemblyzer \u662f\u4e00\u500b\u900f\u904e\u795e\u7d93\u7db2\u8def\u4f86\u6bd4\u8f03\u6216\u5206\u6790\u8a9e\u97f3\u7684 Python \u5957\u4ef6\u3002\u7814\u7a76\u4e2d\uff0c \u7537\u6027\u8207\u5973</td></tr><tr><td colspan=\"5\">\u6027\u6bcf\u4f4d\u8a9e\u8005\u7686\u5408\u6210 10 \u53e5 Post-Filter \u548c Post-Net \u500b\u97f3\u6a94\u8207\u539f\u8a9e\u8005\u6bd4\u8f03\uff0c\u5176\u7d50\u679c\u5982\u5716 8 \u548c\u5716</td></tr><tr><td colspan=\"5\">4.3.1 \u8a9e\u97f3\u54c1\u8cea (Speech quality) 9\uff0c\u6211\u5011\u53ef\u4ee5\u5f9e\u9019\u5169\u5f35\u5716\u4e2d\u767c\u73fe\uff0c\u5728\u5167\u90e8\u8a9e\u8005\u4e2d\uff0c\u5408\u6210\u7684\u5973\u6027\u97f3\u6a94\u90fd\u5f88\u63a5\u8fd1\u539f\u97f3\u6a94\uff0c\u5728\u7537</td></tr><tr><td colspan=\"5\">\u6027\u97f3\u6a94\u4e2d\u5247\u53ef\u4ee5\u767c\u73fe Diffwave \u8f03 Post-Net \u63a5\u8fd1\u539f\u59cb\u97f3\u6a94\uff0c\u4e0d\u7ba1\u662f\u8a9e\u97f3\u5408\u6210\u6216\u8a9e\u8005\u8fa8\u8b58\u6548 \u9996\u5148\uff0c\u6211\u5011\u4f7f\u7528\u5ba2\u89c0\u8a55\u6e2c (MOS) \u4f86\u8b49\u5be6\u5be6\u9a57\u7d50\u679c\uff0c\u5206\u5225\u5408\u6210\u8a9e\u97f3\u8f49\u63db\u548c\u8a9e\u8005\u8fa8\u8b58\u6240\u8a13 \u679c\u7686\u76f8\u4f3c\uff1b\u5728\u5916\u90e8\u8a9e\u8005\u4e2d\uff0c\u53ef\u4ee5\u767c\u73fe\u8a9e\u97f3\u5408\u6210\u7684\u8a9e\u8005\u7a7a\u9593\u8f03\u70ba\u96c6\u4e2d\uff0c\u800c\u8a9e\u8005\u8fa8\u8b58\u7684\u8a9e\u8005 \u7df4\u7684 TTS \u7cfb\u7d71\u5404 10 \u500b\u5167\u90e8\u8a9e\u8005\u7684\u97f3\u6a94\u4f86\u6bd4\u8f03\u54c1\u8cea\uff0c\u53e6\u5916\u518d\u5408\u6210\u5404 10 \u500b\u5167\u90e8\u8a9e\u8005\u7684\u97f3\u6a94 \u7a7a\u9593\u8f03\u70ba\u767c\u6563\uff0c\u4e26\u4e14 Diffwave \u6bd4 Post-Net \u7a0d\u5fae\u63a5\u8fd1\u539f\u59cb\u97f3\u6a94\u3002\u6211\u5011\u53ef\u4ee5\u65b7\u5b9a\u8a9e\u97f3\u5408\u6210\u4efb \u6bd4\u8f03\u8a9e\u8005\u76f8\u4f3c\u5ea6\uff0c\u5176\u7d50\u679c\u5982\u8868 1\uff1a Similarity \u8868 1Quality \u52d9\u4ee5\u53ca\u91dd\u5c0d Post-</td></tr><tr><td>Tacotron 2 with VC Tacotron 2 with SV</td><td colspan=\"2\">\u5716 6. Diffwave \u6d41\u7a0b 2.67 \u00b1 0.35 [Figure 6. Diffwave process] 2.54 \u00b1 0.37</td><td colspan=\"2\">2.70 \u00b1 0.41 2.31 \u00b1 0.18</td></tr><tr><td colspan=\"5\">\u57fa\u65bc\u539f\u672c\u7684 Tacotron 2 \u67b6\u69cb\uff0c\u6211\u5011\u5c07 LSTM \u7684\u8f38\u51fa\u964d\u7dad\u964d\u81f3 128 \u7dad\u4e26\u901a\u904e Self-Attention \u7576\u4f5c\u53e6\u4e00\u500b\u8f38\u51fa\uff0cSelf-Attention \u6703\u5c07 LSTM \u8f38\u51fa\u7684\u6f5b\u5728\u8868\u793a\u9032\u884c\u5168\u57df\u76f8\u95dc\u6027\u7684\u9023\u63a5\uff0c\u9019 \u4e9b\u8cc7\u8a0a\u5c07\u5728\u89e3\u78bc\u5c64\u5e6b\u52a9\u6ce8\u610f\u529b\u6a5f\u5236\u66f4\u5feb\u7684\u5c0d\u9f4a\uff0c\u6211\u5011\u5c07\u539f LSTM \u8f38\u51fa\u7a31\u70ba\u5167\u5bb9\u8cc7\u8a0a (Content Information)\uff0c\u53e6\u4e00\u500b\u901a\u904e Self-Attention \u7684\u8f38\u51fa\u7a31\u70ba\u9577\u8ddd\u96e2\u5167\u5bb9\u8cc7\u8a0a (Long-\u5716 4\u89e3\u78bc\u5668\u90e8\u4efd\u6211\u5011\u505a\u4e86\u8f03\u591a\u7684\u6539\u52d5\uff0c\u9996\u5148\uff0c\u7531\u65bc\u7de8\u78bc\u5c64\u6709\u5169\u500b\u8f38\u51fa\uff0c\u56e0\u6b64\u6211\u5011\u5206\u5225\u5f15\u5165\u4e86 \u5169\u500b\u4e0d\u540c\u7684\u6ce8\u610f\u529b\u6a5f\u5236\uff0c\u6211\u5011\u70ba\u5167\u5bb9\u8cc7\u8a0a\u5f15\u5165\u4e86 Forward Attention \u53d6\u4ee3 Tacotron 2 \u820a\u6709 \u7684\u6ce8\u610f\u529b\u6a5f\u5236\uff0c\u5b83\u53ef\u4ee5\u66f4\u5feb\u5730\u5f15\u767c\u5c0d\u9f4a\uff0c\u4e26\u4e14\u80fd\u6539\u5584\u56e0\u9577\u53e5\u6240\u5f15\u767c\u7684\u91cd\u8907\u767c\u97f3\u6216\u6f0f\u5b57\u7684 \u554f\u984c\uff1b\u9577\u8ddd\u96e2\u5167\u5bb9\u8cc7\u8a0a\u5247\u5f15\u5165\u4e86 Bahdanau Attention (Bahdanau et al., 2014)\uff0c\u5b83\u662f\u4e00\u500b\u50b3 \u7d71\u7684 Additive Attention\uff0c\u56e0\u5176\u67b6\u69cb\u8f03\u70ba\u7c21\u55ae\uff0c\u53ef\u4ee5\u5feb\u901f\u5730\u5f97\u5230\u67d0\u4e9b\u983b\u8b5c\u8207\u6587\u5b57\u7684\u95dc\u4fc2\uff0c \u9019\u5c07\u80fd\u5920\u5e6b\u52a9 Forward Attention \u66f4\u5feb\u5730\u5f15\u767c\u5c0d\u9f4a\uff0c\u4e26\u4e14\u56e0\u70ba\u4f4e\u7dad\u5ea6\u7684\u95dc\u4fc2\uff0c\u5b83\u4e0d\u6703\u8207 Forward Attention \u7af6\u722d\u6587\u5b57\u8207\u983b\u8b5c\u9593\u7684\u5c0d\u9f4a\uff0c\u5728\u5be6\u9a57\u7d50\u679c\u6703\u6709\u66f4\u8a73\u7d30\u5730\u8aaa\u660e\u3002 \u6b64\u5916\uff0c\u70ba\u4e86\u52a0\u5f37\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5c0d\u65bc\u6a21\u578b\u7684\u4f5c\u7528\uff0c\u6211\u5011\u5728 Pre-Net \u5c64\u52a0\u5165\u4e86\u8a9e\u8005\u5d4c\u5165 \u5411\u91cf\uff0c\u900f\u904e\u795e\u7d93\u7db2\u8def\u7684\u5b78\u7fd2\uff0c\u80fd\u5920\u4f7f\u6a21\u578b\u66f4\u770b\u91cd\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u3002 \u5982\u5716 6 \u6240\u793a\uff0cDiffwave \u900f\u904e\u6a21\u578b\u53cd\u8986\u904b\u4f5c\u4e26\u4ee5\u566a\u97f3\u8868 (Noise Schedule\uff0c\u5f37\u5ea6\u7531\u5c0f\u5230 \u6839\u64da\u8868 1 \u53ef\u4ee5\u767c\u73fe\u8a9e\u97f3\u8f49\u63db\u63d0\u53d6\u51fa\u4f86\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5c0d\u65bc\u6211\u5011\u7684 TTS \u7cfb\u7d71\u6548\u679c\u8f03\u597d\uff0c\u56e0 \u5927\u7684\u566a\u97f3) \u8207\u6885\u723e\u983b\u8b5c\u4f5c\u70ba\u8f38\u5165\u689d\u4ef6\u4f7f\u6a21\u578b\u5728\u8a13\u7df4\u671f\u9593\u5b78\u7fd2\u5230\u5982\u4f55\u900f\u904e\u8f38\u5165\u689d\u4ef6\u4f86\u6dfb\u52a0 \u566a\u97f3\u5206\u4f48\u7834\u58de\u8f38\u5165\u76ee\u6a19\uff1b\u7531\u65bc\u6a21\u578b\u5df2\u7d93\u5b78\u5f97\u5982\u4f55\u4f9d\u7167\u8f38\u5165\u689d\u4ef6\u6dfb\u52a0\u566a\u97f3\u5206\u4f48\uff0c\u5728\u63a8\u8ad6\u671f \u9593\uff0c\u904b\u7528\u53cd\u51fd\u5f0f\u7684\u4f5c\u6cd5\uff0c\u5c07\u6dfb\u52a0\u7684\u566a\u97f3\u5206\u4f48\u9664\u53bb\uff0c\u4f7f\u8f38\u5165\u7684\u9ad8\u65af\u566a\u97f3\u9010\u6f38\u9084\u539f\u6210\u76ee\u6a19\uff0c \u5176\u6a21\u578b\u67b6\u69cb\u5982\u5716 7 \u6240\u793a\uff1a \u6b64\u9032\u4e00\u6b65\u4f7f\u7528\u8a9e\u97f3\u8f49\u63db\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u4f86\u6bd4\u8f03 Post-Filter \u8207\u539f\u59cb Post-Net \u7684\u6548\u679c\uff0c\u7d50\u679c \u5982\u4e0b\u8868\uff1a \u5716 \u6211\u5011\u4f7f\u7528 AISHELL-3 \u9ad8\u4fdd\u771f\u4e2d\u6587\u8a9e\u97f3\u6578\u64da\u5eab\u4f5c\u70ba\u672c\u6b21\u5be6\u9a57\u7684\u8cc7\u6599\u96c6\uff0c\u5171\u6709 88035 \u500b\u97f3 \u6a94\uff0c218 \u4f4d\u8a9e\u8005\uff0c\u63a1\u6a23\u7387\u70ba 44.1kHz\uff0c16bit\u3002\u6211\u5011\u5c07\u6240\u6709\u97f3\u6a94\u4e0b\u63a1\u6a23\u81f3 22050Hz\uff0c\u4e26\u5f9e\u4e2d \u63d0\u53d6\u51fa 173 \u4f4d\u8a9e\u8005(\u7d04\u4f54\u6574\u9ad4\u8a9e\u8005 80%)\uff0c\u6bcf\u4f4d\u8a9e\u8005\u96a8\u6a5f\u53d6 100 \u53e5\u97f3\u6a94\u4f5c\u70ba\u8a13\u7df4\u96c6\uff0c\u5171 17300 \u500b\u97f3\u6a94\uff0c\u5176\u9918 45 \u70ba\u8a9e\u8005\u7576\u6210\u672a\u770b\u904e\u8a9e\u8005\u6e2c\u9a57\u6a21\u578b\u5408\u6210\u5916\u90e8\u8a9e\u8005\u7684\u6027\u80fd\u3002 4.2 \u5be6\u9a57\u8a2d\u7f6e (Experimental Setups) \u8868 2Quality Similarity Post-Filter 3.75 \u00b1 0.35 3.75 \u00b1 0.71 Post-Net 2.67 \u00b1 0.71 2.50 \u00b1 0.30 \u63a5\u8457\u6211\u5011\u4f7f\u7528 Mel Cepstral Distortions (MCD) \u4f5c\u70ba\u5ba2\u89c0\u8a55\u6e2c\u7684\u65b9\u6cd5\uff0c\u96a8\u6a5f\u5f9e\u5167\u90e8\u8a9e\u8005\u8207 \u5916\u90e8\u8a9e\u8005\u5404\u6311\u9078 5 \u500b\u7537\u6027\u8207\u5973\u6027\u8a9e\u8005\uff0c\u6bcf\u500b\u8a9e\u8005\u5408\u6210 10 \u500b\u97f3\u6a94\u4f86\u8a08\u7b97 MCD \u503c\uff0c\u7d50\u679c\u5982 \u5716 (Langevin Dynamics) \u53cd\u5411\u9084\u539f \u4e0b\u8868\uff1a</td></tr><tr><td colspan=\"5\">distance Content Information)\uff0c\u540c\u6642\uff0c\u70ba\u4e86\u4f7f\u6a21\u578b\u80fd\u5920\u5408\u6210\u591a\u8a9e\u8005\u7684\u8a9e\u97f3\uff0c\u6211\u5011\u5728\u9019\u5169\u500b \u6700\u5f8c\uff0c\u5728\u901a\u904e LSTM \u89e3\u78bc\u5f8c\uff0c\u6211\u5011\u53c8\u518d\u4e00\u6b21\u5f15\u5165 Self-Attention \u5c07\u983b\u8b5c\u6f5b\u5728\u8868\u793a\u7684\u8cc7 \u81f3\u76ee\u6a19\u3002</td></tr><tr><td colspan=\"5\">\u6f5b\u5728\u8868\u793a\u5f8c\u65b9\u4e32\u63a5\u4e86\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\uff0c\u8a73\u7d30\u67b6\u69cb\u5982\u5716 4 \u6240\u793a\uff1a \u8a0a\u9032\u884c\u5168\u57df\u76f8\u95dc\u6027\u7684\u9023\u63a5\uff0c\u4ee5\u5e6b\u52a9\u5f8c\u7e8c\u7dda\u6027\u8f49\u63db\u66f4\u5feb\u5730\u512a\u5316\uff0c\u5176\u8a73\u7d30\u67b6\u69cb\u5982\u5716 5 \u6240\u793a\uff1a</td></tr></table>", |
| "text": "\u7684\u8ad6\u6587\u88e1\u63d0\u5230\uff0c\u6709 Post-Net \u7684 MOS \u8a55\u5206\u662f\u6bd4\u8f03\u9ad8\u7684\u3002 \u5728\u672c\u6b21\u7814\u7a76\u4e2d\uff0c\u6211\u5011\u984d\u5916\u5f15\u5165\u4e86\u53e6\u4e00\u500b\u67b6\u69cb Diffwav(Kong, Z. et al., 2020) \u4f5c\u70ba Post-Filter \u4f86\u8207 Post-Net \u6bd4\u8f03\u3002Diffwave \u662f Nvidia \u65bc 2020 \u5e74\u63a8\u51fa\u7684 Vocoder\uff0c\u80fd\u5920\u5c07\u983b\u8b5c\u8f49 \u63db\u6210\u6ce2\u5f62\u8a0a\u865f\uff0c\u5b83\u7684\u57fa\u790e\u7406\u8ad6\u662f Denoising Diffusion Probabilistic Model (Ho et al., 2020)\uff0c \u7c21\u7a31 DDPM\u3002DDPM \u662f\u4e00\u500b\u99ac\u53ef\u592b\u934a (Markov Chain) \u6a21\u578b\uff0c\u900f\u904e\u6307\u5b9a\u6b65\u6578\u70ba\u76ee\u6a19\u6dfb\u52a0 \u9ad8\u65af\u566a\u97f3\u76f4\u81f3\u76ee\u6a19\u8b8a\u6210\u9ad8\u65af\u4e82\u6578\uff0c\u518d\u900f\u904e\u6717\u4e4b\u842c\u52d5\u529b\u5b78 \u9996\u5148\uff0c\u6211\u5011\u4f7f\u7528 HiFiGAN (Kong, J. et al., 2020) \u4f5c\u70ba\u672c\u6b21\u5be6\u9a57\u7684 Vocoder\uff0c\u6c92\u6709\u91cd\u65b0\u8a13\u7df4 \u4e5f\u6c92\u6709\u9032\u884c\u53c3\u6578\u7684\u5fae\u8abf\uff0c\u50c5\u4f7f\u7528\u539f\u4f5c\u8005\u5be6\u73fe\u7684 Github \u4e2d\u6240\u63d0\u4f9b\u7684\u9810\u8a13\u7df4\u6a21\u578b\u3002\u63a5\u8457\uff0c\u6211 \u5011\u5229\u7528\u8cc7\u6599\u96c6\u7684\u97f3\u6a94\u5206\u5225\u5c0d\u65bc\u8a9e\u97f3\u8f49\u63db\u7684 AdaIN-VC \u548c\u8a9e\u8005\u8fa8\u8b58\u7684 LDE \u6a21\u578b\u8a13\u7df4\uff0c\u4f7f\u5176 \u751f\u6210 128 \u7dad\u5ea6\u7684\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u3002\u5728\u6211\u5011\u63d0\u51fa\u6539\u52d5\u7684 Tacotron 2 \u6a21\u578b\u67b6\u69cb\u4e4b\u4e2d\uff0c\u7de8\u78bc\u5c64\u7684 \u8f38\u51fa Content Information \u8f38\u51fa\u7dad\u5ea6\u4ecd\u7dad\u6301 512 \u7dad\uff0c\u4e32\u63a5\u4e0a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5f8c\u70ba 640 \u7dad\uff1bLongdistance Content Information \u8f38\u51fa\u7dad\u5ea6\u70ba 128 \u7dad\uff0c\u4e32\u63a5\u4e0a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5f8c\u70ba 256 \u7dad \u3002 \u5728 \u89e3\u78bc\u5c64\u4e2d\uff0c\u6211\u5011\u628a\u8a9e\u8005\u5d4c\u5165\u5411\u91cf\u5347\u7dad\u81f3 256 \u7dad\u4e26\u4ee5 Softsign \u6fc0\u6d3b\u51fd\u6578\u6fc0\u6d3b\uff0c\u65bc Pre-Net \u5c64 \u4e2d\u8207\u983b\u8b5c\u76f8\u52a0\uff0c\u5176\u9918\u8a2d\u7f6e\u7686\u6309\u7167\u539f Tacotron 2\u3002 \u6211\u5011\u63d0\u51fa\u7684 TTS \u6a21\u578b\u662f\u5728 Pytorch \u795e\u7d93\u7db2\u8def\u6846\u67b6\u4e0a\u904b\u884c\uff0c\u4e26\u4ee5 Nvidia GeForce RTX 2070 GPU \u8a13\u7df4\uff0c\u6279\u91cf\u5927\u5c0f (Batch Size) \u8a2d\u70ba 8\uff0c\u5171\u8a13\u7df4 208,000 \u500b Steps\uff0c\u7d04\u70ba 96 \u500b Net \u6240\u63d0\u51fa\u7684 Diffwave \u67b6\u69cb\u5c0d\u65bc\u6211\u5011\u7684\u591a\u8a9e\u8005 TTS \u7cfb\u7d71\u4f86\u8aaa\u662f\u66f4\u6709\u5e6b \u52a9\u7684\u3002", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "\u65e2\u7136 Bahdanau Attention \u593e\u5e36\u6bcf\u6bb5\u8a9e\u97f3\u5927\u6982\u7684\u97f3\u6846\u7bc4\u570d\u8cc7\u8a0a\uff0c\u90a3\u9019\u4e9b\u8cc7\u8a0a\u662f\u5426\u80fd\u5e6b \u52a9\u6a21\u578b\u5feb\u901f\u5efa\u7acb\u5c0d\u9f4a\u5462?\u4e0b\u5716\u5c07\u986f\u793a\u6709\u7121 Bahdanau Attention \u7684\u5dee\u7570\uff1a \u5716 \u5f9e\u5716 12 \u5f97\u77e5\uff0c\u6a21\u578b\u8a13\u7df4\u5230 16000 \u500b Steps \u6642\uff0c\u5118\u7ba1\u96d9\u65b9\u90fd\u7121\u6cd5\u5efa\u7acb\u826f\u597d\u7684\u5c0d\u9f4a\uff0c\u4f46\u6709 Bahdanau Attention \u7684\u5c0d\u9f4a\u662f\u512a\u65bc\u6c92\u6709 Bahdanau Attention \u7684\uff0c\u5728 19000 \u500b Steps \u6642\uff0c\u6709 Bahdanau Attention \u5df2\u7d93\u80fd\u5efa\u7acb\u5c0d\u9f4a\u4e86\uff0c\u53e6\u4e00\u500b\u5247\u96b1\u7d04\u6709\u5c0d\u9f4a\u7dda\u800c\u5df2\uff0c\u56e0\u6b64\u53ef\u5f97\u77e5\uff0c Bahdanau Attention \u52a0\u4e0a Forward Attention \u7684\u67b6\u69cb\u662f\u80fd\u5920\u5e6b\u52a9\u6a21\u578b\u5feb\u901f\u5730\u5efa\u7acb\u5c0d\u9f4a\u3002\u53ef\u4ee5 \u65bc\u6211\u5011\u7684\u7db2\u7ad9\u4e0a\u8046\u807d\u6a23\u672c\uff1ahttps://babaili.github.io/rocling2021_demo/", |
| "html": null |
| } |
| } |
| } |
| } |