| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:34:12.783871Z" |
| }, |
| "title": "Manual Speech Synthesis Data Acquisition -From Script Design to Recording Speech", |
| "authors": [ |
| { |
| "first": "Atli", |
| "middle": [ |
| "\u00de\u00f3r" |
| ], |
| "last": "Sigurgeirsson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Reykjavik University Menntavegur 1 -Reykjavik Iceland", |
| "location": { |
| "addrLine": "Laugavegur 13 -Reykjavik Iceland, Menntavegur 1", |
| "country": "-Reykjavik Iceland" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Gunnar", |
| "middle": [], |
| "last": "Thor\u00f6rn\u00f3lfsson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Reykjavik University Menntavegur 1 -Reykjavik Iceland", |
| "location": { |
| "addrLine": "Laugavegur 13 -Reykjavik Iceland, Menntavegur 1", |
| "country": "-Reykjavik Iceland" |
| } |
| }, |
| "email": "gunnarthor@hi.is" |
| }, |
| { |
| "first": "J\u00f3n", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Reykjavik University Menntavegur 1 -Reykjavik Iceland", |
| "location": { |
| "addrLine": "Laugavegur 13 -Reykjavik Iceland, Menntavegur 1", |
| "country": "-Reykjavik Iceland" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper we present the work of collecting a large amount of high quality speech synthesis data for Icelandic. 8 speakers will be recorded for 20 hours each. A script design strategy is proposed and three scripts have been generated to maximize diphone coverage, varying in length. The largest reading script contains 14,400 prompts and includes 87.3% of all Icelandic diphones at least once and 81% of all Icelandic diphones at least twenty times. A recording client was developed to facilitate recording sessions. The client supports easily importing scripts and maintaining multiple collections in parallel. The recorded data can be downloaded straight from the client. Recording sessions are carried out in a professional studio under supervision and started October of 2019. As of writing, 58.7 hours of high quality speech data has been collected. The scripts, the recording software and the speech data will later be released under a CC-BY 4.0 license.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper we present the work of collecting a large amount of high quality speech synthesis data for Icelandic. 8 speakers will be recorded for 20 hours each. A script design strategy is proposed and three scripts have been generated to maximize diphone coverage, varying in length. The largest reading script contains 14,400 prompts and includes 87.3% of all Icelandic diphones at least once and 81% of all Icelandic diphones at least twenty times. A recording client was developed to facilitate recording sessions. The client supports easily importing scripts and maintaining multiple collections in parallel. The recorded data can be downloaded straight from the client. Recording sessions are carried out in a professional studio under supervision and started October of 2019. As of writing, 58.7 hours of high quality speech data has been collected. The scripts, the recording software and the speech data will later be released under a CC-BY 4.0 license.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "High quality speech data is imperative to the development of good speech synthesis systems. This fact is often a hurdle for under-resourced languages, like Icelandic, since the acquisition of quality speech data is both labor-intensive and a costly process. Meanwhile, as the language technology (LT) community has grown, the development cost of Text-to-speech (TTS) and Automatic Speech recognition (ASR) systems has decreased. The availability of Icelandic ASR data has increased tremendously in recent years. The Althingi corpus (Helgad\u00f3ttir et al., 2017) contains over 500 hours of transcribed parliament speeches, the M\u00e1lr\u00f3mur corpus (Steingr\u00edmsson et al., 2017) includes 152 hours of recorded speech from 563 participants and the Almannar\u00f3mur corpus which was collected from 563 participants that provided 219 read sentences on average each (Gu\u00f0nason et al., 2012) . All of these datasets are recorded by multiple speakers under different recording environments which is a benefit when training ASR models while it is a hindrance when training naturalsounding TTS models. Speech synthesis datasets for Icelandic remain sparse.", |
| "cite_spans": [ |
| { |
| "start": 532, |
| "end": 558, |
| "text": "(Helgad\u00f3ttir et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 639, |
| "end": 667, |
| "text": "(Steingr\u00edmsson et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 847, |
| "end": 870, |
| "text": "(Gu\u00f0nason et al., 2012)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Typically, speech synthesis datasets are recorded professionally by a single speaker in a controlled environment under supervision. The amount of data required for TTS depends on the chosen model. The CMU arctic corpus consists of 1150 phonetically balanced sentences and was designed for unit selection TTS (Kominek et al., 2003) . The Merlin toolkit was used to train a statistical parametric speech synthesis (SPSS) model based on neural networks with 2400 training utterances (Wu et al., 2016) . Deep voice (Arik et al., 2017) , an end-to-end TTS based entirely on neural networks was trained on approximately 20 hours of speech and reached a mean opinion score (MOS) of 3.94 \u00b1 0.26.", |
| "cite_spans": [ |
| { |
| "start": 308, |
| "end": 330, |
| "text": "(Kominek et al., 2003)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 480, |
| "end": 497, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 511, |
| "end": 530, |
| "text": "(Arik et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "1.1." |
| }, |
| { |
| "text": "The first development of an Icelandic TTS started around the turn of the century. At least three TTS systems for Icelandic exist today and are in use. Most recently, in 2010, the Icelandic association of the visually impaired hired the Polish software company Ivona to develop a unit selection TTS system. These three systems have all had mixed successes. An important downside to these developments is the fact that all three voices were carried out by foreign firms and no open and available TTS datasets for Icelandic exist today (Nikul\u00e1sd\u00f3ttir et al., 2020) . A voice recording client is necessary to facilitate the recording sessions. Common Voice 1 is a well known recording client for crowd sourcing ASR data. Google has used a tool referred to as Datahound for collecting and building transcribed speech corpora for many languages (Hughes et al., 2010) . A speech data acquisition system made in Iceland referred to as Eyra was developed in 2016 (Petursson et al., 2016) . Eyra was developed as a crowd sourcing tool and was later used to collect about 35 hours ASR data (Gu\u00f0nason et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 533, |
| "end": 561, |
| "text": "(Nikul\u00e1sd\u00f3ttir et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 839, |
| "end": 860, |
| "text": "(Hughes et al., 2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 954, |
| "end": 978, |
| "text": "(Petursson et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1079, |
| "end": 1102, |
| "text": "(Gu\u00f0nason et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "1.1." |
| }, |
| { |
| "text": "This paper presents an overview of the speech data acquisition process for a new Icelandic TTS system. The system is being developed as a part of the Icelandic language technology programme (Nikul\u00e1sd\u00f3ttir et al., 2020) . The programme spans 4 years and many different projects in LT. This paper presents two of the main goals of the TTS project:", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 218, |
| "text": "(Nikul\u00e1sd\u00f3ttir et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "1.2." |
| }, |
| { |
| "text": "\u2022 To generate 3 scripts that maximize a diphone coverage. They should be designed for 1 hour, 10 hour and 20 hour collections.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "1.2." |
| }, |
| { |
| "text": "\u2022 To record unit selection TTS data from 4 female speakers and 4 male speakers. 20 hours should be collected from each speaker.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "1.2." |
| }, |
| { |
| "text": "The 3 scripts should be suitable for TTS recipe development on a varying scale, from unit-selection models to endto-end neural speech synthesis models. This work started in autumn 2019 and as of writing, the scripts have been finalized. The twenty hour script contains 14400 unique sentences. The list contains at least one occurrence of 87.3% of all possible diphones and 81% of them appear at least 20 times. Speech recording is an ongoing process and we have collected approximately 59 hours of data as of date. Once all speakers have been recorded, the dataset will be published under a CC-BY 4.0 license.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview", |
| "sec_num": "1.2." |
| }, |
| { |
| "text": "Before designing the script, 500,000 sentences were extracted from Risam\u00e1lheild (Steingr\u00edmsson et al., 2018) , a large Icelandic text corpus containing more than one billion word tokens. All of these sentences had to pass a naive preprocessing step. To pass, the sentence must:", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 108, |
| "text": "(Steingr\u00edmsson et al., 2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 be at least 10 letters", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 be between 5 and 15 words", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 only contain characters from the Icelandic alphabet or any of the Icelandic punctuation symbols", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 start with a capital letter", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 end with a punctuation symbol", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 appear in the database of modern Icelandic inflection (Bjarnad\u00f3ttir, 2012) Since Icelandic is a highly inflected language, simply checking if all words in a sentence appear in a dictionary would greatly limit the number of sentences that would pass this preprocessing step. Checking if all words appear in the inflection list does not guarantee grammatical correctness however. The length constraints were enforced to minimize prosodic difference between recordings, which can be an issue for very short sentences (Kominek et al., 2003) , and to limit the number of mispronunciations while recording the data.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 76, |
| "text": "(Bjarnad\u00f3ttir, 2012)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 516, |
| "end": 538, |
| "text": "(Kominek et al., 2003)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The phones in a randomly sampled list of sentences will follow an uneven distribution where a small number of phones will appear very frequently. Such a list will therefore likely not contain more than one occurrence of a substantial amount of the possible phonetic combinations in the language. This poses a problem for gathering speech synthesis data since it is critical to train a TTS on most phonetic combinations more than once to generate naturalsounding results. TTS scripts are therefore most often designed to maximize some phonetic coverage. A lot of different metrics have been used for measuring such a coverage. It varies both in terms of the phonetic unit used, e.g. diphones (Kominek et al., 2003) or triphones (Ursin, 2002) , and also in terms of the context each unit appears in, where in the sentence the unit appears or where in a word it appears and so on. We decided to maximize diphone coverage while limiting sentence length. A Sequitur grapheme-to-phoneme (G2P) model (Bisani and Ney, 2008) was trained on the Icelandic Pronunciation Dictionary (IPD) (Nikul\u00e1sd\u00f3ttir et al., 2018) To acquire predicted phonetization of the source text. This is needed to analyze the phonetic content of the source text. Icelandic is spoken with six rather similar dialects and IPD contains variants for four of those dialects. A standard dialect in the IPD is used to phonetically transcribe the training data in this work. The training set consists of approximately 40,000 verified word and phonetization pairs. The complete list of Icelandic phones in SAMPA format is given below A,ay,ay:,au,au:, A:,c,c0,ey,ey:,f,h,i, i:,j,k,k0,l,l0,m,m0,n,n0,ou,ou:,p,p0,r, r0,s,t,t0,U,U:,v,x,C,D,N,N0,9,9y,9y:, 9: ,O,oy,O:,E,E:,G,I,I:,J,J0,Y,yy,Y:,T", |
| "cite_spans": [ |
| { |
| "start": 691, |
| "end": 713, |
| "text": "(Kominek et al., 2003)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 727, |
| "end": 740, |
| "text": "(Ursin, 2002)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 993, |
| "end": 1015, |
| "text": "(Bisani and Ney, 2008)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1076, |
| "end": 1104, |
| "text": "(Nikul\u00e1sd\u00f3ttir et al., 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1606, |
| "end": 1708, |
| "text": "A:,c,c0,ey,ey:,f,h,i, i:,j,k,k0,l,l0,m,m0,n,n0,ou,ou:,p,p0,r, r0,s,t,t0,U,U:,v,x,C,D,N,N0,9,9y,9y:, 9:", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The trained model achieves a phone error rate (PER) of 3.4%. Using this G2P model, the phonetization for each source sentence was predicted. A special symbol was additionally prepended and appended to each phonetization to denote the start and end of sentences. Using this, a list of diphones was generated for each sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "A greedy algorithm was used to order the list by a reward function, R, which was constructed to reward sentences that both improve the phonetic coverage and are short. The final script is initialized as the empty set. Given the large list of sentences, all the sentences are scored by R at every time step and sorted. The sentence with the highest reward is inserted into the final script. The reward function is given by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "R(s) = 1 |s| n i=1 1 max(1, [d i \u2208 D]) s = s 1 , . . . , s m d(s) = d 1 , . . . , d n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Here, s is the sentence, d(s) is the grapheme-to-diphone mapping of s and D is the set of all diphones in the script already. We define a complete coverage to include at least 20 occurrences of each possible diphone. After that, a diphone does not count towards the reward. The algorithm runs until complete coverage is achieved or 25,000 sentences have been added to the script. The coverage at every insertion step is shown In Figure 1 . The blue curve shows the actual coverage, that is the coverage with regards to all diphones. It is important to point out that not all diphones are valid diphones in Icelandic and never appear. The red curve shows the coverage with regards to the diphones that appear in the source that the algorithm runs on. After about 6000 insertions the algorithm reaches the maximum possible coverage. The resulting script contains at least one occurrence of 87.3% of all possible diphones and 81% of them appear at least 20 times. Figure 2 shows phone-to-phone heat maps of the list generated by the proposed method and the same number of randomly sampled sentences. The heat map for the proposed script demonstrates much greater coverage than that of the randomly sampled list. This underlines the issue of randomly sampling sentences. After sorting the list by the reward, a number of sentences were added to the script in different categories: \u2022 2000 sentences between 15-25 words that could be used for learning acoustic alignment for longer sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 429, |
| "end": 437, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 961, |
| "end": 969, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 100 sentences containing only digits in written form", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u2022 30 sentences that contain only one word each", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "To generate the 20 hour script we take the first k sentences from the final list ordered by the reward such that k = 20 \u00d7 3600s/5s = 14, 400 where we estimate that it takes on average 5 seconds to read a single sentence. The 1 hour and 10 hour scripts are generated in a similar manner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Script Design", |
| "sec_num": "2." |
| }, |
| { |
| "text": "A recording client was developed to facilitate voice recording sessions and we call it Lobe. Previously, a speech data acquisition tool called Eyra (e. Ear) (Petursson et al., 2016) was developed at Reykjavik University. Eyra was used successfully for gathering ASR data (Gu\u00f0nason et al., 2017) . The ASR focused nature of Eyra did not fit the TTS data acquisition task which prompted the development of Lobe. Lobe is at the core a Python package with a Flask 2 web client. It is hosted on a Reykjavik University server and accessible in the browser. The data is stored in a PostgreSQL database with a weekly backup schedule. Lobe assigns roles to users, either an administration role or a basic user role. A basic user could be", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 181, |
| "text": "(Petursson et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 271, |
| "end": 294, |
| "text": "(Gu\u00f0nason et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 A speaker whose voice will be recorded", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 A person who controls the prompts while recording speech (prompt manager)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 Anyone else that wants to access the data An administrator starts by creating a collection through Lobe. After selecting a collection name and perhaps assigning a speaker to the collection, the prompts are uploaded through Lobe. Lobe accepts multiple file uploads where each line in a file is treated as a single prompt. As Lobe was designed with the script design in mind it also accepts prompts that include the following in a tab-separated format:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 The prompt itself.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 The source of the prompt (e.g. a certain newspaper).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 An order score. If a score is higher it appears earlier in the prompts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "\u2022 The phonetization of the prompt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "In this way, we can start by recording phonetically rich sentences as determined by the reward function. Next, the administrator creates a new user for the speaker through Lobe. Lobe stores user information such as age, sex and dialect. After that, recording sessions can be carried out. Each recording session contains 50 prompts. The prompt manager presses a key to initialize audio capture and a visual sign prompts the speaker to start speaking. After the speaker reads the prompt the prompt manager presses another key to stop audio capture. At that point the prompt manager can go to the next prompt or download the current audio capture. Since the scripts are not guaranteed to be grammatically correct, the prompt manager also has the option to skip the current prompt and the sentence will be marked as faulty in the database. That sentence will not appear further as a prompt. Lobe has a simple quality con- Figure 3 : The prompt screen shown to both the speaker and the prompt manager trol check that runs after each recording. It will prompt the manager if the recording is either too quiet or too loud. For further inspection, the recording can be downloaded and analyzed in any available audio software. We use the Me-diaRecorder 3 interface to record the audio. It is sampled at 41KHz with a 24 bit depth. After a session is finished, the prompt manager can start a new recording session or log out of Lobe. At any time, the collection can be downloaded as a separate dataset through Lobe. The client creates an archive that includes all prompts and recordings as well as information about each recording session, the speaker and the collection itself. Since we are using Merlin (Wu et al., 2016) for generating SPSS voice recipes, we made sure that the lobe dataset exports could be easily imported into Merlin.", |
| "cite_spans": [ |
| { |
| "start": 1694, |
| "end": 1711, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 918, |
| "end": 926, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Recording Client", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Eight speakers will be recorded, 20 hours each. As of date we have started recording four of those. One of our goals is to attain diversity in age, dialect and overall speaking style. Four female speakers will be recorded and four male speakers. The first four speakers are in the age range of 49-71 years as shown in Table 1 . They also all speak in the same standard dialect. It is therefore important to select the next four speakers with this fact and the goal of attaining diversity in mind. A voice sample of five sentences is recorded and analyzed before a speaker is added to the dataset. We evaluate the speech rate, volume and the overall pleasantness of the voice. Once the speaker has Speaker ID Age Sex Amount recorded M1 70 Male 20 hours F1 59 Female 17.3 hours M2 49 Male 9.8 hours F2 71 Female 11.6 hours Table 1 : The recording progress for the first 4 speakers been approved, the speaker is assigned a recording schedule with a prompt manager. A voice recording test is carried out during the first session. This is done to determine the external sound card level that ensures that the recording stays between -18dB and -12dB in playback with 0dB as the distortion threshold. The sound card level is recorded for future reference.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 318, |
| "end": 325, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 712, |
| "end": 845, |
| "text": "Sex Amount recorded M1 70 Male 20 hours F1 59 Female 17.3 hours M2 49 Male 9.8 hours F2 71 Female 11.6 hours Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Recording Speech", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Recording sessions are carried out in a studio at the national broadcaster of Iceland. The studio is separated into a recording space and a monitoring space. The recording space is sound proof and designed to limit resonance. Both prompt managers and the speakers monitor the distance from the pop filter attached to the microphone at the start of each recording session as the distance could affect the recorded results. The speakers are also told not to bring anything else into the recording space and limit movement. The prompt manager sits in the monitor space and communicates with the speaker using a talkback system in the studio. Before starting, the prompt manager starts a voice Each session is configured to go through 50 prompts. The speaker never records for more than two hours each day to reduce the risk of vocal strain. Typically eight to twelve such sessions can be completed in a two hour span. The session duration varies between speakers but is normally between seven to 13 minutes with an average duration of about 9 minutes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording Speech", |
| "sec_num": "4." |
| }, |
| { |
| "text": "We aim to finish recording 20 hours from the eight speakers each by the 1st of October 2020. The dataset will thereafter be made available. Parallel to this, work on unit selection TTS and SPSS model recipes will be carried out and trained for the speakers that have reached the 20 hour goal. Work to improve Lobe is ongoing. Most importantly is the work on expanding the built-in quality control. We additionally aim to make Lobe more configurable with regards to the number of prompts in a session, sample rate, bit depth and so on. More features will also soon be added to Lobe to facilitate different types of data collections. Firstly, support for multi-speaker collections will be added. This is necessary as part of the Icelandic language technology programme is to collect 2 hours from 40 speakers each for voice mixing synthesis projects. Secondly, support for video capture will be added to facilitate audio-visual speech recognition data acquisition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "5." |
| }, |
| { |
| "text": "https://voice.mozilla.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://flask.palletsprojects.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Deep voice: Real-time neural text-tospeech", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "\u00d6" |
| ], |
| "last": "Arik", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Chrzanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Coates", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Diamos", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gibiansky", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raiman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "195--204", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arik, S.\u00d6., Chrzanowski, M., Coates, A., Diamos, G., Gib- iansky, A., Kang, Y., Li, X., Miller, J., Ng, A., Raiman, J., et al. (2017). Deep voice: Real-time neural text-to- speech. In Proceedings of the 34th International Confer- ence on Machine Learning-Volume 70, pages 195-204. JMLR. org.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Joint-sequence models for grapheme-to-phoneme conversion", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Bisani", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "50", |
| "issue": "", |
| "pages": "434--451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bisani, M. and Ney, H. (2008). Joint-sequence models for grapheme-to-phoneme conversion. Speech communica- tion, 50(5):434-451.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The database of modern icelandic inflection (beygingarl\u1ef3sing\u00edslensks n\u00fat\u00edmam\u00e1ls). Language Technology for Normalisation of Less-Resourced Languages", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Bjarnad\u00f3ttir", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bjarnad\u00f3ttir, K. (2012). The database of modern icelandic inflection (beygingarl\u1ef3sing\u00edslensks n\u00fat\u00edmam\u00e1ls). Lan- guage Technology for Normalisation of Less-Resourced Languages, page 13.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Almannaromur: An open icelandic speech corpus", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Kjartansson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "J\u00f3hannsson", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Carstensd\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Vilhj\u00e1lmsson", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Loftsson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Helgad\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "M" |
| ], |
| "last": "J\u00f3hannsd\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00f6gnvaldsson", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Spoken Language Technologies for Under-Resourced Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gu\u00f0nason, J., Kjartansson, O., J\u00f3hannsson, J., Carstensd\u00f3ttir, E., Vilhj\u00e1lmsson, H. H., Loftsson, H., Helgad\u00f3ttir, S., J\u00f3hannsd\u00f3ttir, K. M., and R\u00f6gnvaldsson, E. (2012). Almannaromur: An open icelandic speech corpus. In Spoken Language Technologies for Under- Resourced Languages.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Building ASR corpora using Eyra", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "P\u00e9tursson", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Kjaran", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kl\u00fcpfel", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Nikul\u00e1sd\u00f3ttir", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "2173--2177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gu\u00f0nason, J., P\u00e9tursson, M., Kjaran, R., Kl\u00fcpfel, S., and Nikul\u00e1sd\u00f3ttir, A. B. (2017). Building ASR corpora us- ing Eyra. In INTERSPEECH, pages 2173-2177.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Building an ASR corpus using Althingi's parliamentary speeches", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [ |
| "R" |
| ], |
| "last": "Helgad\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Kjaran", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Nikul\u00e1sd\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "2163--2167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helgad\u00f3ttir, I. R., Kjaran, R., Nikul\u00e1sd\u00f3ttir, A. B., and Gu\u00f0nason, J. (2017). Building an ASR corpus using Althingi's parliamentary speeches. In INTERSPEECH, pages 2163-2167.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Building transcribed speech corpora quickly and cheaply for many languages", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Vasu", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "J" |
| ], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lebeau", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Eleventh Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hughes, T., Nakajima, K., Ha, L., Vasu, A., Moreno, P. J., and LeBeau, M. (2010). Building transcribed speech corpora quickly and cheaply for many languages. In Eleventh Annual Conference of the International Speech Communication Association.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "CMU ARC-TIC databases for speech synthesis", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kominek", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Ver", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kominek, J., Black, A. W., and Ver, V. (2003). CMU ARC- TIC databases for speech synthesis.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "An icelandic pronunciation dictionary for tts", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Nikul\u00e1sd\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00f6gnvaldsson", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 IEEE Spoken Language Technology Workshop (SLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "339--345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikul\u00e1sd\u00f3ttir, A. B., Gu\u00f0nason, J., and R\u00f6gnvaldsson, E. (2018). An icelandic pronunciation dictionary for tts. In 2018 IEEE Spoken Language Technology Workshop (SLT), pages 339-345. IEEE.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Language technology programme for icelandic 2019-2023", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Nikul\u00e1sd\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "K" |
| ], |
| "last": "Ingason", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "R\u00f6gnvaldsson", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "R\u00f6gnvaldsson", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "F" |
| ], |
| "last": "Sigur\u00f0sson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Steingr\u00edmsson", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "LREC. LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikul\u00e1sd\u00f3ttir, A. B., Gu\u00f0nason, J., Ingason, A. K., R\u00f6gnvaldsson, H., R\u00f6gnvaldsson, E., Sigur\u00f0sson, E. F., and Steingr\u00edmsson, S. (2020). Language technology programme for icelandic 2019-2023. In LREC. LREC.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Eyraspeech data acquisition system for many languages", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Petursson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kl\u00fcpfel", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gudnason", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Procedia Computer Science", |
| "volume": "81", |
| "issue": "", |
| "pages": "53--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petursson, M., Kl\u00fcpfel, S., and Gudnason, J. (2016). Eyra- speech data acquisition system for many languages. Pro- cedia Computer Science, 81:53-60.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "M\u00e1lr\u00f3mur: A manually verified corpus of recorded icelandic speech", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Steingr\u00edmsson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Helgad\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00f6gnvaldsson", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Nordic Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "237--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steingr\u00edmsson, S., Gu\u00f0nason, J., Helgad\u00f3ttir, S., and R\u00f6gnvaldsson, E. (2017). M\u00e1lr\u00f3mur: A manually ver- ified corpus of recorded icelandic speech. In Proceed- ings of the 21st Nordic Conference on Computational Linguistics, pages 237-240.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Risam\u00e1lheild: A very large icelandic text corpus", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Steingr\u00edmsson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Helgad\u00f3ttir", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "R\u00f6gnvaldsson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Barkarson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gu\u00f0nason", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steingr\u00edmsson, S., Helgad\u00f3ttir, S., R\u00f6gnvaldsson, E., Barkarson, S., and Gu\u00f0nason, J. (2018). Risam\u00e1lheild: A very large icelandic text corpus. In Proceedings of the Eleventh International Conference on Language Re- sources and Evaluation (LREC 2018).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Triphone clustering in finnish continuous speech recognition. Diplomity\u00f6", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ursin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ursin, M. (2002). Triphone clustering in finnish contin- uous speech recognition. Diplomity\u00f6, Teknillinen ko- rkeakoulu.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Merlin: An open source neural network speech synthesis system", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Watts", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "SSW", |
| "volume": "", |
| "issue": "", |
| "pages": "202--207", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wu, Z., Watts, O., and King, S. (2016). Merlin: An open source neural network speech synthesis system. In SSW, pages 202-207.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Phonetical coverage at every insertion step of the greedy algorithm. (a) Randomly sampled script (b) The proposed script", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Heat maps that visualize the diphone distribution of a randomly sampled script and the proposed script. Both axis include all possible phonemes in the same order.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "The recording environment shown from inside the recording space. recording test. A single sentence is recorded and then analyzed. If the monitor value is not within the (-18dB;-12dB) range, the prompt manager changes the sound card level accordingly and records a new sound card level. At this point a session can start.", |
| "uris": null, |
| "type_str": "figure" |
| } |
| } |
| } |
| } |