ACL-OCL / Base_JSON /prefixR /json /rocling /2019.rocling-1.25.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2019",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T14:54:39.676228Z"
},
"title": "Real-Time Mandarin Speech Synthesis System",
"authors": [
{
"first": "An-Chieh",
"middle": [],
"last": "\u912d\u5b89\u5091",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Information Engineering National Sun Yat-sen University",
"location": {}
},
"email": ""
},
{
"first": "\u570b\u7acb\u4e2d\u5c71\u5927\u5b78\u8cc7\u8a0a\u5de5\u7a0b\u5b78\u7cfb",
"middle": [],
"last": "Cheng",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Information Engineering National Sun Yat-sen University",
"location": {}
},
"email": "ajcheng@g-mail.nsysu.edu.tw"
},
{
"first": "Chia-Ping",
"middle": [],
"last": "\u9673\u5609\u5e73",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "\u570b\u7acb\u4e2d\u5c71\u5927\u5b78\u8cc7\u8a0a\u5de5\u7a0b\u5b78\u7cfb",
"middle": [],
"last": "Chen",
"suffix": "",
"affiliation": {},
"email": "cpchen@mail.cse.nsysu.edu.tw"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This thesis studies and implements the real time Chinese speech synthesis system. This system uses a conversion model of the text sequence to the Mel spectrum sequence, and then",
"pdf_parse": {
"paper_id": "2019",
"_pdf_hash": "",
"abstract": [
{
"text": "This thesis studies and implements the real time Chinese speech synthesis system. This system uses a conversion model of the text sequence to the Mel spectrum sequence, and then",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "concatenates a vocoder from the Mel spectrum to the synthesized speech. We use Tacotron2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "to implement a sequence-to-sequence conversion model with several different vocoders, including Griffin-Lim, World-Vocoder, and WaveGlow. The WaveGlow neural network vocoder, which implements the reversible codec function, is the most prominent, and is impressive in terms of synthesis speed or speech quality. We use a single speaker with 12-hour corpus implementation system. In terms of voice quality, the MOS of the synthesized system voice using the WaveGlow vocoder is 4.08, which is slightly lower than the 4.41 of the real voice, and far better than the other two vocoders (average 2.93). In terms of processing speed, if the GeForce RTX 2080 TI GPU is used, the synthesis system using the WaveGlow vocoder produces a voice of 10 seconds and 48 kHz in 1.4 seconds, so it is a real time system. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF1": {
"ref_id": "b1",
"title": "Natural TTS Synthesis by Conditioning WaveNet on",
"authors": [
{
"first": "Zhifeng",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Yuxuan",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Rif",
"middle": [
"A"
],
"last": "Skerry-Ryan",
"suffix": ""
},
{
"first": "Yannis",
"middle": [],
"last": "Saurous",
"suffix": ""
},
{
"first": "Yonghui",
"middle": [],
"last": "Agiomyrgiannakis",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yang, Zhifeng Chen, Yu Zhang, Yuxuan Wang, RJ Skerry-Ryan, Rif A. Saurous, Yannis Agiomyrgiannakis, Yonghui Wu, \"Natural TTS Synthesis by Conditioning WaveNet on",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Attention-Based Models for Speech Recognition",
"authors": [
{
"first": "Jan",
"middle": [],
"last": "Chorowski",
"suffix": ""
},
{
"first": "Dzmitry",
"middle": [],
"last": "Bahdanau",
"suffix": ""
},
{
"first": "Dmitriy",
"middle": [],
"last": "Serdyuk",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1506.07503v1"
]
},
"num": null,
"urls": [],
"raw_text": "Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, Kyunghyun Cho, Yoshua Bengio, \"Attention-Based Models for Speech Recognition,\" \u65bc arXiv:1506.07503v1 , 2015.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "WaveNet: A Generative Model for Raw Audio",
"authors": [
{
"first": "Aaron",
"middle": [],
"last": "Van Den Oord",
"suffix": ""
},
{
"first": "Sander",
"middle": [],
"last": "Dieleman",
"suffix": ""
},
{
"first": "Heiga",
"middle": [],
"last": "Zen",
"suffix": ""
},
{
"first": "Karen",
"middle": [],
"last": "Simonyan",
"suffix": ""
},
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Graves",
"suffix": ""
},
{
"first": "Nal",
"middle": [],
"last": "Kalchbrenner",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Senior",
"suffix": ""
},
{
"first": "Koray",
"middle": [],
"last": "Kavukcuoglu",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1609.03499v2"
]
},
"num": null,
"urls": [],
"raw_text": "Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, Koray Kavukcuoglu, \"WaveNet: A Generative Model for Raw Audio,\" \u65bc arXiv:1609.03499v2, 2016.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Multi-Scale Context Aggregation by Dilated Convolutions",
"authors": [
{
"first": "Fisher",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Vladlen",
"middle": [],
"last": "Koltun",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1511.07122v3"
]
},
"num": null,
"urls": [],
"raw_text": "Fisher Yu, Vladlen Koltun, \"Multi-Scale Context Aggregation by Dilated Convolutions,\" \u65bc arXiv:1511.07122v3, 2015.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Signal estimation from modified short-time fourier transform",
"authors": [
{
"first": "Daniel",
"middle": [],
"last": "Griffin",
"suffix": ""
},
{
"first": "Jae",
"middle": [],
"last": "Lim",
"suffix": ""
}
],
"year": 1984,
"venue": "\u65bc IEEE Transactions on Acoustics, Speech, and Signal Processing",
"volume": "32",
"issue": "2",
"pages": "236--243",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Daniel Griffin and Jae Lim, \"Signal estimation from modified short-time fourier transform,\" \u65bc IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 32, no. 2, pp. 236-243, 1984.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "WORLD: A Vocoder-Based High-Quality Speech Synthesis System for Real-Time Applications",
"authors": [
{
"first": "Morise",
"middle": [],
"last": "Masanori",
"suffix": ""
},
{
"first": "Yokomori",
"middle": [],
"last": "Fumiya",
"suffix": ""
},
{
"first": "Ozawa",
"middle": [],
"last": "Kenji",
"suffix": ""
}
],
"year": 2016,
"venue": "\u65bc IEICE Trans. on Information and Systems",
"volume": "99",
"issue": "7",
"pages": "1877--1884",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Masanori MORISE,Fumiya YOKOMORI,Kenji OZAWA, \"WORLD: A Vocoder-Based High-Quality Speech Synthesis System for Real-Time Applications,\" \u65bc IEICE Trans. on Information and Systems, vol. 99, no. 7, pp. 1877-1884, 2016.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "WaveGlow: A Flow-based Generative Network for Speech Synthesis",
"authors": [
{
"first": "Ryan",
"middle": [],
"last": "Prenger",
"suffix": ""
},
{
"first": "Rafael",
"middle": [],
"last": "Valle",
"suffix": ""
},
{
"first": "Bryan",
"middle": [],
"last": "Catanzaro",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1811.00002v1"
]
},
"num": null,
"urls": [],
"raw_text": "Ryan Prenger, Rafael Valle, Bryan Catanzaro, \"WaveGlow: A Flow-based Generative Network for Speech Synthesis,\" \u65bc arXiv:1811.00002v1, 2018.",
"links": null
}
},
"ref_entries": {}
}
}