| { |
| "paper_id": "Y14-1022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:44:56.021999Z" |
| }, |
| "title": "Emphasized Accent Phrase Prediction from Text for Advertisement Text-To-Speech Synthesis", |
| "authors": [ |
| { |
| "first": "Hideharu", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "1-1 Hikarino-oka", |
| "postCode": "239-0847", |
| "settlement": "Yokosuka", |
| "region": "Kanagawa", |
| "country": "JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hideyuki", |
| "middle": [], |
| "last": "Mizuno", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "1-1 Hikarino-oka", |
| "postCode": "239-0847", |
| "settlement": "Yokosuka", |
| "region": "Kanagawa", |
| "country": "JAPAN" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sumitaka", |
| "middle": [], |
| "last": "Sakauchi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "1-1 Hikarino-oka", |
| "postCode": "239-0847", |
| "settlement": "Yokosuka", |
| "region": "Kanagawa", |
| "country": "JAPAN" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Realizing expressive text-to-speech synthesis needs both text processing and the rendering of natural expressive speech. This paper focuses on the former as a front-end task in the production of synthetic speech, and investigates a novel method for predicting emphasized accent phrases from advertisement text information. For this purpose, we examine features that can be accurately extracted by text processing based on current Text-tospeech synthesis technologies. Among features, the word surface string of the main content and function words and the part-of-speech of main function words in an accent phrase are found to have higher potential on predicting whether the accent phrase should be emphasized or not through the calculation of mutual information between emphasis label and features of Japanese advertisement sentences. Experiments confirm that emphasized accent phrase prediction using support vector machine (SVM) offers encouraging accuracies for the system which requires emphasized accent phrase locations as context information to improve speech synthesis qualities. 1. expressing linguistic \"focus\": (e.g., \" Taro did.\" (as an answer to \"who did ...?\")) 2. expressing \"contrast\": (e.g., \"not A but B\") 3. expressing \"element of surprise\": (e.g., \"I heard he was sick, but he had much energy.\") 4. disambiguating grammatical structure: clarifying parallel and dependency structure (e.g., to distinguish \"{old men} and women\" from \"old {men and women}\" in \"old men and women\")", |
| "pdf_parse": { |
| "paper_id": "Y14-1022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Realizing expressive text-to-speech synthesis needs both text processing and the rendering of natural expressive speech. This paper focuses on the former as a front-end task in the production of synthetic speech, and investigates a novel method for predicting emphasized accent phrases from advertisement text information. For this purpose, we examine features that can be accurately extracted by text processing based on current Text-tospeech synthesis technologies. Among features, the word surface string of the main content and function words and the part-of-speech of main function words in an accent phrase are found to have higher potential on predicting whether the accent phrase should be emphasized or not through the calculation of mutual information between emphasis label and features of Japanese advertisement sentences. Experiments confirm that emphasized accent phrase prediction using support vector machine (SVM) offers encouraging accuracies for the system which requires emphasized accent phrase locations as context information to improve speech synthesis qualities. 1. expressing linguistic \"focus\": (e.g., \" Taro did.\" (as an answer to \"who did ...?\")) 2. expressing \"contrast\": (e.g., \"not A but B\") 3. expressing \"element of surprise\": (e.g., \"I heard he was sick, but he had much energy.\") 4. disambiguating grammatical structure: clarifying parallel and dependency structure (e.g., to distinguish \"{old men} and women\" from \"old {men and women}\" in \"old men and women\")", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The introduction of corpus-based speech synthesis methods such as unit selection synthesis ( (Hunt, et al., 1996) etc.) and Hidden Markov Model speech synthesis ( (Zen, et al., 2009) etc.) makes expressive speech synthesis possible if an adequate speech database is prepared. However, the synthesized speech often fails to recreate emphasis or phrase boundary tone, even though both are key characteristics of expressive speech. The location markers of emphasis and phrase boundary tone have been confirmed useful in improving expressive speech synthesis qualities; they form part of the context information for speech synthesis (Meng, et al., 2012; Maeno, et al., 2014; Strom, et al., 2007; Yu, et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 113, |
| "text": "(Hunt, et al., 1996)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 163, |
| "end": 182, |
| "text": "(Zen, et al., 2009)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 629, |
| "end": 649, |
| "text": "(Meng, et al., 2012;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 650, |
| "end": 670, |
| "text": "Maeno, et al., 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 671, |
| "end": 691, |
| "text": "Strom, et al., 2007;", |
| "ref_id": null |
| }, |
| { |
| "start": 692, |
| "end": 709, |
| "text": "Yu, et al., 2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For establishing Text-To-Speech (TTS) synthesis for expressive speech, it is necessary to predict locations of emphasis and phrase boundary tone from the input text. The phrase boundary tone occurs at the phrase end, and existence/non-existence of the tone can be accurately classified, from the text to be synthesized, by using machine learning approaches (Nakajima, et al., 2013; Ross, et al., 1996) . Thus, this paper focuses on the remaining target of emphasis positions. In this work, we use the word \"emphasis (emphasized)\" to denote portions that are perceptually more salient to the listeners in a sentence.", |
| "cite_spans": [ |
| { |
| "start": 357, |
| "end": 381, |
| "text": "(Nakajima, et al., 2013;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 382, |
| "end": 401, |
| "text": "Ross, et al., 1996)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In human speech, emphasis can be regrouped at least into four functions based on analysis in conventional literatures as (Hovy et. al, 2013; Sridhar et. al, 2008 ) (bold portions show emphasized words and phrases).", |
| "cite_spans": [ |
| { |
| "start": 121, |
| "end": 140, |
| "text": "(Hovy et. al, 2013;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 141, |
| "end": 161, |
| "text": "Sridhar et. al, 2008", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper focuses on items 1 to 3. For the purpose of establishing TTS for expressive speech, item 4, structural disambiguation, is hard to resolve when the text has ambiguities. On the other hand, it is not a problem when there is no ambiguity; the prosodic structure can be accurately fixed by following the clear structure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "Emphasis on location of focus, contrast, and element of surprise (items 1 to 3) are related to the novelty status of the information to be conveyed; status is normally obtained from the context. In the conversation domain, conversation history is the previous context. Consider, for example, the example of item 1. The query \"who?\" is answered by \"Taro\", which is new information to the questioner and is often focused on and emphasized in the responder's speech. In the story telling domain, the sentences before the current sentence form the context, and are the source for judging the novelty status of information in the current sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "In some domains, however, the previous context does not always exist, for example, as in sales pitches or advertisements in mass media services. Sales pitch sentences are composed by copywriters based on their belief of what consumers will find newsworthy and only the sentences are read aloud and broadcasted. The sentence does not include the background that copywriters considered before fixing the sales pitch. Thus, narrators, actors/actress, directors, or producers decode the sales pitch sentence to extract which portions should be emphasized when read aloud. This suggests that it is possible to predict emphasized portions from the words of the sentence being synthesized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "This paper focuses on emphasis in Japanese advertisement sentences and defines accent phrases as the prediction unit, while words have been used as the unit for predicting emphasis in the conversation domain (Hovy et. al, 2013) . Exclamation marks are one of the characters indicating emphasis in written texts; they are often observed in advertisement sentences and must be a good cue for emphasis prediction. The expressive speech database, explained in Section 2, includes examples of Japanese emphasized words (in bold style) with exclamation marks (' ' denotes word delimiter and translations are indicated by parentheses): ex.1) ! (before that!) ex.2) ! (you can enjoy!) ex.3) 110 ! (more than 110 types!) ex.4) ! (don't need water exchange!) The words immediately before exclamation marks are not always emphasized as in the Japanese word sequences of ex.1 and 2. However, the marks must have influence on emphasized words beyond their intermediate neighbors. As units longer than words might effectively include this long distance influence and accent phrases are one of the important units for Japanese speech synthesis and some studies on Japanese speech synthesis have adopted accent phrases as a unit of emphasis and confirmed improvements in speech wave generation (Maeno, et al., 2014) , we adopt accent phrases as the prediction unit as well.", |
| "cite_spans": [ |
| { |
| "start": 208, |
| "end": 227, |
| "text": "(Hovy et. al, 2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1278, |
| "end": 1299, |
| "text": "(Maeno, et al., 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "This paper proposes a method for predicting emphasized accent phrases from sales pitch sentences to establish expressive TTS. As far as we know, this is the first paper that proposes the emphasis prediction from Japanese sales pitch sentences and adopts accent phrases as the prediction unit. Section 2 describes the expressive speech database used in this paper. Section 3 analyzes the distributions of emphasized accent phrases in terms of linguistic expressions and their locations in both sentences and intonation phrases. Section 4 explains our method of predicting emphasized accent phrases and its experimental confirmation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "2 Expressive speech database 2.1 Target domain This paper targets sales pitch texts for expressive speech synthesis. Given the increase of Internetoriented advertisements, it is essential to establish technologies that can convert advertisement text to speech with emphasis in the appropriate positions to ensure that the advertisements reach the consumers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "As ambiguous and misleading messages are not suitable as advertisements, we can expect that sales pitch texts do not include ambiguities, and so we can focus research efforts on emphasis prediction. Sales pitch texts are written in Japanese and are Japanese sentences collected from advertisement pages on the Internet (Nakajima, et al., 2010) . These include expressions that appear frequently in sales as \" (now on sale)\" and \" (Yen)\" and describe impressions and explanations of commercial products. ", |
| "cite_spans": [ |
| { |
| "start": 319, |
| "end": 343, |
| "text": "(Nakajima, et al., 2010)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PACLIC 28 ! 171", |
| "sec_num": null |
| }, |
| { |
| "text": "Although human annotators can tag speech data with emphasis labels, research has showed little agreement between human annotators (Hovy et. al, 2013) , and thus prediction targets cannot be fixed. As a practical solution, we asked one human subject to act as a recording director and decide emphasized accent phrases with the guideline that \"labels are put at accent phrases that tend to be emphasized in commercial message conveyed through mass media.\" The sales pitch database (Nakajima, et al., 2010) includes 248 utterances, which are divided into 363 texts (hereafter, sentences) by punctuation marks, and include 2,359 accent phrases as in Table 1 . Emphasis labels were assigned to 853 accent phrases (36.2% of all accent phrases) as shown in Table 1 . As 89% of the labels coincided with the labels set by at least one of the 3 annotators (based on listening to speech data), the labels extracted from the text are considered appropriate as emphasized labels. As reference, we also labeled emphasized words in the emphasized accent phrases as in Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 149, |
| "text": "(Hovy et. al, 2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 479, |
| "end": 503, |
| "text": "(Nakajima, et al., 2010)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 646, |
| "end": 653, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 750, |
| "end": 757, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1054, |
| "end": 1061, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasis labels", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As this study focuses on features contributing to emphasis prediction, we added correct linguistic features as follows: word boundaries, part-of-speech (POS), accent phrase (AP) boundaries, pause positions. These features can be accurately extracted by text processing modules in conventional TTS. The number of POS and lemma (Fuchi, et al., 1998) were 62 and 1,571, respectively.", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 347, |
| "text": "(Fuchi, et al., 1998)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for analysis", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We also automatically extracted, from above features, main content and function word in each accent phrase by rules frequently used in Japanese dependency parsing studies ( (Imamura, et al., 2007) etc.) . We also used these features in defining the portion between pauses as \"intonation phrase (IP)\", and entered the following binary information:", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 202, |
| "text": "(Imamura, et al., 2007) etc.)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for analysis", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 whether the IP is at the sentence end or not, \u2022 whether the AP is at the end of IP or not, \u2022 existence/non-existence of exclamation marks, punctuation marks and pause at the end of the AP. By predetermined table look up, we also added", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for analysis", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 existence/non-existence of expressions on commercial products' information, evaluation, and prices in the AP, and \u2022 existence/non-existence of sales-appeal words and qualifying words in the AP. Each word in the utterance including multiple sentences is examined if the word is mentioned in previous sentences in the utterance and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for analysis", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 the existence/non-existence of words showing newness in the AP are added as another feature. Above features can be accurately assigned automatically because ambiguities are small. While semantic roles were used in (Hovy et. al, 2013) , they are not used in our research, because automatic semantic role labeling is still immature and its accuracy remains insufficient and because our aim is to establish TTS and requires mature text processing.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 235, |
| "text": "(Hovy et. al, 2013)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for analysis", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "As shown in Fig.1 , about 70 percent of the sentences in the database have more than 2 emphasized accent phrases. Unlike conversation (Hovy et. al, 2013) , sales pitch speech synthesis requires the extraction of multiple emphasized accent phrases per sentence.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 153, |
| "text": "(Hovy et. al, 2013)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 17, |
| "text": "Fig.1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "With a view to identify phrase location, emphasized accent phrase distribution is summarized in Table 2 . Rows differ based on whether IP is emphasized (Emphasized IP (E-IP) or Not Emphasized The breakdown of E-IP lies in the four rows at the bottom of Table 2 ; the shares do not differ significantly (26.1, 16.5, 20.5 and 36.8 %). For detailed analysis, Fig.2 summarizes the likelihood of emphasized accent phrase by location in and length of intonation phrase whose lengths range from 1 to 5 (5 clusters correspond to length of intonation phrase).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 96, |
| "end": 103, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 253, |
| "end": 260, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 356, |
| "end": 361, |
| "text": "Fig.2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Upper number on the x axis denotes the location of emphasized accent phrase in each intonation phrase length. The larger the number is, the later in the intonation phrase does the emphasized accent phrase exist. Though later accent phrase locations showed higher likelihood of emphasized accent phrase, the likelihood values do not differ significantly. Thus, we decided to use whether the IP is at the sentence end or not and whether the AP is at the end of IP or not as location features in emphasized accent phrase distribution analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We also measured the distance between two adjacent emphasized accent phrases; results are summarized in Fig. 3 . 90% of emphasized accent phrases occurred within 0 to 4 accent phrases from the previous emphasized location. Thus, at most, the former PACLIC 28 ! 174 Entropy H(Y ) 0.94 1 Word surface string of the main content word in the AP 0.64 2 Word surface string of the main function word in the AP 0.15 3 Part-of-speech of the main function word in the AP 0.12 4 Whether the IP is at the sentence end or not 0.07 5 Existence/non-existence of exclamation marks at the end of the AP 0.07 6 Existence/non-existence of sales-appeal words in the AP 0.05 7 Existence/non-existence of expressions on commercial products' evaluation in the AP 0.05 8 Part-of-speech of the main content word in the AP 0.04 9 Whether the AP is at the end of IP or not 0.02 10 Existence/non-existence of pause at the end of the AP 0.02 11 Existence/non-existence of expressions on commercial products' information in the AP 0.01 12 Parallel structure 0.01 13 Existence/non-existence of punctuation marks at the end of the the AP 0.01 14 Existence/non-existence of expressions on commercial products' prices in the AP 4 and latter 4 accent phrases of the accent phrase might be a sufficient feature scope for emphasized accent phrase prediction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 110, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To identify the promising features for emphasized accent phrase prediction, we also calculated the prediction potential of features (locations of accent phrases and linguistic expressions) based on the mutual information between those features and emphasis labels. Since the numbers of words and POS are large, we used the mutual information instead of the likelihood shown in Fig.2 . When Y denotes em-phasis label (emphasis or not), X each feature expression, H(Y ) entropy of Y , and H(Y |X) is the conditional entropy of Y given X, then mutual information is calculated as H(Y ) \u2212 H(Y |X). The higher the mutual information value is, the greater is the contribution to emphasis prediction. Table 3 lists prediction potentials in descending order with the first row showing entropy H(Y ). As the ratio of emphasized AP to not emphasized AP was almost 1 to 2, H(Y ) was 0.94 which is very high. Middle column in Table 3 lists the feature expressions mentioned so far and rightmost column shows mutual information values as prediction potential.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 377, |
| "end": 382, |
| "text": "Fig.2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 694, |
| "end": 701, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 914, |
| "end": 921, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Word surface string of the main content word in the AP and word surface string and part-of-speech of the main function word in the AP showed higher mutual information (0.64, 0.15, 0.12, respectively) and are expected to contribute to emphasized accent phrase prediction. In the database, accent phrases accompanying exclamation marks at the end of the accent phrase are emphasized except for one sample, but too many accent phrases without the mark are emphasized, thus the mutual information was small (0.07). Though we also examined other binary features as \"whether \u2022 \u2022 \u2022 \" and \"existence/non-PACLIC 28 ! 175 Table 3 to confirm their contribution to prediction performance and the generality of features, their mutual information values were also small.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 612, |
| "end": 619, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4 Emphasized accent phrase prediction 4.1 Prediction method As more than 2 accent phrases are emphasized in an advertisement sentence as shown in Fig.1 , we decided that the proposed method predicts multiple emphasized accent phrases in a sentence. As there are features that had few samples but whose probabilities are higher like exclamation marks, we consider emphasized accent phrase prediction as a classification problem between the existence/nonexistence of emphasis. We used support vector machines (SVM) as classifiers and the features in Table 3 to establish and test the emphasized accent phrase prediction method.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 146, |
| "end": 151, |
| "text": "Fig.1", |
| "ref_id": null |
| }, |
| { |
| "start": 548, |
| "end": 555, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Emphasized accent phrase distributions", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The expressive speech database mentioned in section 2 were used for training and evaluating the SVM in 5-fold cross validation way. We used the polynomial kernel function of SVM and examined several parameter combinations of the kernel function (dimension and cost). Table 4 summarizes parameters and ranges. The dimension and cost are integers.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 267, |
| "end": 274, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Others are indexes showing locations of accent phrases. 'i' denotes the location index of the accent phrase to be classified to emphasized or not, '-m' the location index of 'm' preceding accent phrase from i and 'n' the location index of 'n' following accent phrase from i. As we can use only past prediction results, maximum integer is '-1' for the location index of past prediction results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For later description and discussion, F i+m i\u2212n denotes the features between (i \u2212 n) and (i + m) locations, H i\u2212h i\u2212n the history of past prediction results ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Predicted result\u015d EN Answers E A B N C D", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "between (i \u2212 n) and (i \u2212 h) locations, F i+m i+1 a \"future feature\", F i\u22121 i\u2212n a \"past feature,\" respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We used accuracy as the performance evaluation measure and evaluated the total accuracies of the proposed method using 5-fold cross validation. Accuracy is defined by the number of correctly predicted emphasis and not-emphasis (A + D in Table 5) divided by the sum of the number of all 4 prediction results (in addition to the above 2 correct cases, the 2 other cases are that emphasis is erroneously classified as not-emphasis (B) and vice versa (C)):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation measure", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Accuracy [%] = (A + D)/(A + B + C + D) \u00d7 100.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation measure", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We examined 12 combinations of dimension (1 to 4) and cost (1 to 3) of the kernel function. Use of larger dimensions means combining more features. Better accuracies were obtained by larger dimensions than smaller dimensions. Cost values did not derive significant changes in accuracies for the same kernel dimension. Thus, we fixed dimension 4 and cost 1 and examined several scopes of features and history lengths of past prediction results. Accuracy for test data varied from 74.1 to 77.4% under the feature scope changing from F i+4 i\u22124 to F i+1 i\u22121 and history changing from H i\u22121 i\u22124 to H i\u22121 i\u22121 . The smaller the feature scope and history length was, the better the accuracy was. As no use of future features F i+m i+1 decreased accuracies slightly (0.2 to 0.6 points), future features somewhat contributes to prediction. No use of past prediction result H i\u2212h i\u22121 derived both slight increase (0.1 to 1.0) and decrease (0.2 to 0.3) of accuracies, but balance between recall and precision of emphasized accent phrases became worse.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Based on these results and as we consider that both emphasized and not-emphasized cases should be correctly predicted, we chose using both future features and past prediction results. As a result, the best accuracy was 77.4% at F i+1 i\u22121 and H i\u22121 i\u22121 (-1 only), then recall and precision rates of emphasized accent phrase were 64.2% and 70.6%, respectively. Detailed prediction results were shown in Table 6 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 401, |
| "end": 408, |
| "text": "Table 6", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "As far as we know, there is no research for predicting emphasized accent phrases from Japanese advertisement text. As baseline calculations, if all the accent phrases are predicted emphasized (\u00ca), accuracy is 36.2% and the recall and precision of emphasized accent phrases are 100% and 36.2%, respectively. On the other hand, if all the accent phrases are predicted non-emphasized (N ), accuracy is 63.8%, then both recall and precision of emphasized accent phrases are 0%. Thus, the proposed method offered 13.6 points higher accuracy than these above forced predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Since Fig. 2 showed lowest likelihood of emphasized accent phrase at the top of each IP, we also examined another feature of whether the AP is at the top of IP or not. The feature showed smaller prediction potential 0.005 than the 9th feature in Table 3 (0.02) and did not offer prediction accuracy improvements.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 6, |
| "end": 12, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "This paper proposed a method for predicting which portions of an advertisement text should be emphasized; it uses only the text itself. The method uses accent phrases as the prediction unit and the features obtained by the text processing modules of cur-rent Text-to-speech synthesis systems. According to mutual information, features such as word surface string of the main content and function word and part-of-speech of the main function word offer higher prediction potential. Experiments showed the proposed method yielded encouraging accuracies for such an expressive TTS which uses emphasized accent phrase locations as a context information as (Maeno, et al., 2014) . Accuracy improvement was left as a future work.", |
| "cite_spans": [ |
| { |
| "start": 652, |
| "end": 673, |
| "text": "(Maeno, et al., 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Japanese morphological analyzer using word co-occurrence: JTAG", |
| "authors": [ |
| { |
| "first": "Takeshi", |
| "middle": [], |
| "last": "Fuchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Shin'ichiro", |
| "middle": [], |
| "last": "Takagi", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of Coling-ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "409--413", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takeshi Fuchi and Shin'ichiro Takagi. 1998. \"Japanese morphological analyzer using word co-occurrence: JTAG\" Proceedings of Coling-ACL, 409-413.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Analysis and Modeling of Focus in Context\" Proceedings of INTERSPEECH", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [], |
| "last": "Gopala", |
| "suffix": "" |
| }, |
| { |
| "first": "Alok", |
| "middle": [], |
| "last": "Anumanchipalli", |
| "suffix": "" |
| }, |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Parlikar", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Vaughn", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Lammert", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "402--406", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Hovy, Gopala Krishna Anumanchipalli, Alok Par- likar, Caroline Vaughn, Adam Lammert, Eduard Hovy, and Alan W. Black. 2013. \"Analysis and Modeling of Focus in Context\" Proceedings of INTERSPEECH, 402-406.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Unit selection in a concatenative speech synthesis system using a large speech database", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [ |
| "J" |
| ], |
| "last": "Hunt", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "373--376", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew J. Hunt and Alan W. Black. 1996. \"Unit selec- tion in a concatenative speech synthesis system using a large speech database,\" Proceedings of ICASSP, 373- 376.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Japanese dependency parsing using sequential labeling for semi-spoken language", |
| "authors": [ |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Imamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Gen", |
| "middle": [ |
| "'" |
| ], |
| "last": "Kikui", |
| "suffix": "" |
| }, |
| { |
| "first": "Norihito", |
| "middle": [], |
| "last": "Yasuda", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "225--228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenji Imamura, Gen'ichiro Kikui, and Norihito Yasuda. 2007. \"Japanese dependency parsing using sequen- tial labeling for semi-spoken language\" Proceedings of ACL, 225-228.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Prosodic variation enhancement using unsupervised context labeling for HMM-based expressive speech synthesis", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Maeno", |
| "suffix": "" |
| }, |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Nose", |
| "suffix": "" |
| }, |
| { |
| "first": "Takao", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoki", |
| "middle": [], |
| "last": "Koriyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Ijima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideharu", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideyuki", |
| "middle": [], |
| "last": "Mizuno", |
| "suffix": "" |
| }, |
| { |
| "first": "Osamu", |
| "middle": [], |
| "last": "Yoshioka", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Speech Communication", |
| "volume": "57", |
| "issue": "", |
| "pages": "144--154", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Maeno, Takashi Nose, Takao Kobayashi, Tomoki Ko- riyama, Yusuke Ijima, Hideharu Nakajima, Hideyuki Mizuno, and Osamu Yoshioka. 2014. \"Prosodic variation enhancement using unsupervised context labeling for HMM-based expressive speech synthesis,\" Speech Communication, 57: 144-154.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Hierarchical English emphatic speech synthesis based on HMM with limited training data", |
| "authors": [ |
| { |
| "first": "Fanbo", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyong", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Lianhong", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of INTERSPEECH, Mon.P2b.09", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fanbo Meng, Zhiyong Wu, Helen Meng, Jia Jia and Lianhong Cai. 2012. \"Hierarchical English emphatic speech synthesis based on HMM with limited training data,\" Proceedings of INTERSPEECH, Mon.P2b.09.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Creation and Analysis of a Japanese Speaking Style Parallel Database for Expressive Speech Synthesis", |
| "authors": [ |
| { |
| "first": "Hideharu", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| }, |
| { |
| "first": "Noboru", |
| "middle": [], |
| "last": "Miyazaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Akihiko", |
| "middle": [], |
| "last": "Yoshida", |
| "suffix": "" |
| }, |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideyuki", |
| "middle": [], |
| "last": "Mizuno", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of Oriental COCOSDA", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hideharu Nakajima, Noboru Miyazaki, Akihiko Yoshida, Takashi Nakamura, Hideyuki Mizuno. 2010. \"Cre- ation and Analysis of a Japanese Speaking Style Par- allel Database for Expressive Speech Synthesis\" Pro- ceedings of Oriental COCOSDA, paper id 30.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Which resemblance is useful to predict phrase boundary rise labels for Japanese expressive text-to-speech synthesis, numerically-expressed stylistic or distribution-based semantic", |
| "authors": [ |
| { |
| "first": "Hideharu", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideyuki", |
| "middle": [], |
| "last": "Mizuno", |
| "suffix": "" |
| }, |
| { |
| "first": "Osamu", |
| "middle": [], |
| "last": "Yoshioka", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Takahashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "1047--1051", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hideharu Nakajima, Hideyuki Mizuno, Osamu Yosh- ioka, and Satoshi Takahashi. 2013. \"Which resem- blance is useful to predict phrase boundary rise la- bels for Japanese expressive text-to-speech synthesis, numerically-expressed stylistic or distribution-based semantic?\" Proceedings of INTERSPEECH, 1047- 1051.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Prediction of abstract labels for speech synthesis", |
| "authors": [ |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Computer Speech & Language", |
| "volume": "10", |
| "issue": "3", |
| "pages": "155--185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ken Ross and Mari Ostendorf. 1996. \"Prediction of ab- stract labels for speech synthesis\" Computer Speech & Language, 10(3): 155-185.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Detecting prominence in conversational speech: pitch accent, givenness and focus", |
| "authors": [ |
| { |
| "first": "Virek", |
| "middle": [], |
| "last": "Kumar Rangarajan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Sridhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of Speech Prosody", |
| "volume": "", |
| "issue": "", |
| "pages": "453--456", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Virek Kumar Rangarajan Sridhar, Ani Nenkova, Shrikanth Narayanan, Dan Jurafsky. 2008. \"De- tecting prominence in conversational speech: pitch accent, givenness and focus\" Proceedings of Speech Prosody, 453-456.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Modelling prominence and emphasis improves unit-selection synthesis", |
| "authors": [ |
| { |
| "first": "Yolanda", |
| "middle": [], |
| "last": "Vazquez-Alvarez", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Brenier", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "1282--1285", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yolanda Vazquez-Alvarez, Jason Brenier, Simon King, and Dan Jurafsky. 2007. \"Modelling prominence and emphasis improves unit-selection synthesis,\" Proceedings of INTERSPEECH, 1282-1285.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Word-level emphasis modelling in HMM-based speech synthesis", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Mairesse", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [ |
| "Young" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "4238--4241", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Yu, Fran\u00e7ois Mairesse, and Steve Young. 2010. \"Word-level emphasis modelling in HMM-based speech synthesis,\" Proceedings of ICASSP, 4238- 4241.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Statistical parametric speech synthesis", |
| "authors": [ |
| { |
| "first": "Heiga", |
| "middle": [], |
| "last": "Zen", |
| "suffix": "" |
| }, |
| { |
| "first": "Keiichi", |
| "middle": [], |
| "last": "Tokuda", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Speech Communication", |
| "volume": "51", |
| "issue": "11", |
| "pages": "1039--1064", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heiga Zen, Keiichi Tokuda and Alan W. Black. 2009. \"Statistical parametric speech synthesis,\" Speech Communication, 51(11): 1039-1064.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Number of Emphasized Accent phrases in a sentence Accumulated Percentage [%] Sentence Frequency Figure 1: Sentence frequency associated with number of emphasized accent phrases in a sentence." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Likelihood of emphasized accent phrase by location in intonation phrase and its length. IP (NE-IP)), whether IP exists at the end of sentence (Final IP (F-IP) or Not Final IP (NF-IP)), and whether AP exists at the end of IP (Final AP (F-AP) or Not Final AP (NF-AP)). Sample accent phrases are written in Japanese and divided by '/' and English translations for each accent phrase are written and divided by '/' in parentheses. The row of E-IP (Emphasized Intonation Phrase) shows that 78.4% of IPs have at least one emphasized AP." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Distance between adjacent emphasized accent phrases." |
| }, |
| "TABREF0": { |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td/><td>accent phrase base count</td></tr><tr><td>emphasized</td><td>853</td></tr><tr><td>not-emphasized</td><td>1,506</td></tr><tr><td/><td>word base count</td></tr><tr><td>emphasized</td><td>1,010</td></tr><tr><td>not-emphasized</td><td>4,727</td></tr></table>", |
| "type_str": "table", |
| "text": "Emphasis labels" |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">Location</td><td/><td colspan=\"3\">IP ratio (%) E-AP ratio (%) Samples</td></tr><tr><td colspan=\"2\">NE-IP</td><td/><td>21.6</td><td>0</td><td/></tr><tr><td/><td>E-IP</td><td/><td>78.4</td><td>100</td><td/></tr><tr><td/><td colspan=\"2\">NF-IP NF-AP</td><td/><td>26.1</td><td>/</td></tr><tr><td/><td/><td/><td/><td>(</td><td colspan=\"2\">soon / do it up</td><td>)</td></tr><tr><td/><td/><td>F-AP</td><td/><td>16.5</td><td/><td>/</td></tr><tr><td/><td/><td/><td/><td>(</td><td colspan=\"2\">cholesterol / person indicating higher</td><td>)</td></tr><tr><td/><td colspan=\"2\">F-IP NF-AP</td><td/><td>20.5</td><td>/</td><td>/</td><td>/</td></tr><tr><td/><td/><td/><td/><td colspan=\"3\">(effectively/stiffness/flexed/will be)</td></tr><tr><td/><td/><td>F-AP</td><td/><td>36.8</td><td>/</td></tr><tr><td/><td/><td/><td/><td>(</td><td colspan=\"2\">dry skin /do not cry!)</td></tr><tr><td>Likelihood of</td><td>Emphasized Accent Phrase</td><td/><td/><td/><td/><td>Location</td></tr><tr><td/><td>1</td><td>2</td><td>3</td><td>4</td><td/><td>5</td><td>Length</td></tr><tr><td/><td colspan=\"6\">Location of Emphasized Accent Phrase in each Intonation Phrase Length (1 to 5)</td></tr></table>", |
| "type_str": "table", |
| "text": "Distribution of emphasized accent phrases (IP=Intonation Phrase, AP=Accent Phrase, NE=Not Emphasized, E=Emphasized, F=Final, NF=Not Final), bold phrases in samples are emphasized accent phrases in both Japanese and translations" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Prediction potential" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>Parameters</td><td>Range</td></tr><tr><td>dimension of polynomial kernel</td><td>1 to 4</td></tr><tr><td>cost of polynomial kernel</td><td>1 to 3</td></tr><tr><td>location index of features</td><td>-4 to 4</td></tr><tr><td colspan=\"2\">location index of past prediction results -3 to -1</td></tr><tr><td>existence of \u2022 \u2022 \u2022 \" in</td><td/></tr></table>", |
| "type_str": "table", |
| "text": "Range of parameters" |
| }, |
| "TABREF5": { |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Accuracy definition (\u00ca andN are Emphasized and Not emphasized accent phrases as prediction results, E and N are Emphasized and Not emphasized accent phrases as answers,respectively, A, B, C, D are counts for each case, Accuracy is defined as (A + D)/(A + B + C + D) \u00d7 100)" |
| }, |
| "TABREF6": { |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"2\">Predicted results recall</td></tr><tr><td/><td colspan=\"2\">EN</td></tr><tr><td colspan=\"2\">Answers E 548</td><td>305 64.2%</td></tr><tr><td/><td>N 228</td><td>1278</td></tr><tr><td>precision</td><td>70.6%</td></tr><tr><td>accuracy</td><td/><td>77.4%</td></tr></table>", |
| "type_str": "table", |
| "text": "Best prediction results at F i+1 i\u22121 andH i\u22121 i\u22121 (\u00ca,N , E, N are the same as inTable 5)" |
| } |
| } |
| } |
| } |