| { |
| "paper_id": "2005", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:49:38.533580Z" |
| }, |
| "title": "Using Language Modelling to Integrate Speech Recognition with a Flat Semantic Analysis", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "B\u00fchler", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Ulm", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "buehler@it.e-technik.uni-ulm.de" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Minker", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Ulm", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "minker@it.e-technik.uni-ulm.de" |
| }, |
| { |
| "first": "Artha", |
| "middle": [], |
| "last": "Elciyanti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Ulm", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "One-stage decoding as an integration of speech recognition and linguistic analysis into one probabilistic process is an interesting trend in speech research. In this paper, we present a simple one-stage decoding scheme that can be realised without the implementation of a specialized decoder, nor the use of complex language models. Instead, we reduce an HMMbased semantic analysis to the problem of deriving annotated versions of the conventional language model, while the acoustic model remains unchanged. We present experiments with the ATIS corpus (Price, 1990) in which the performance of the one-stage method is shown to be comparable with the traditional two-stage approach, while requiring a significantly smaller increase in language model size.", |
| "pdf_parse": { |
| "paper_id": "2005", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "One-stage decoding as an integration of speech recognition and linguistic analysis into one probabilistic process is an interesting trend in speech research. In this paper, we present a simple one-stage decoding scheme that can be realised without the implementation of a specialized decoder, nor the use of complex language models. Instead, we reduce an HMMbased semantic analysis to the problem of deriving annotated versions of the conventional language model, while the acoustic model remains unchanged. We present experiments with the ATIS corpus (Price, 1990) in which the performance of the one-stage method is shown to be comparable with the traditional two-stage approach, while requiring a significantly smaller increase in language model size.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In a spoken dialogue system, speech recognition and linguistic analysis play a decisive role for the overall performance of the system. Traditionally, word hypotheses produced by the automatic speech recognition (ASR) component are fed into a separate natural language understanding (NLU) module for deriving a semantic meaning representation. These semantic representations are the system's understanding of the user's intentions. Based on this knowledge the dialogue manager has to decide on the system reaction. Because speech recognition is a probabilistic pattern matching problem that ususally does not generate one single possible result, hard decisions taken after the speech recognition process could cause significant loss of information that could be important for the parsing and other subsequent processing steps and may thus lead to avoidable system failures. One common way of avoiding this problem is the use of N-best lists or word lattices as output representations, but these may require more complex NLU processing and/or increased processing times. In this paper, we follow an alternative approach: integrating flat HMM-based semantic analysis with the speech recognition process, resulting in a one-stage recognition system that avoids hard decisions between ASR and NLU. The resulting system produces word hypotheses where each word is annotated with a semantic label from which a frame-based semantic representation may easily be constructed. Fig. 1 sketches the individual processes involved in our integrative approach. The shaded portions in the figure indicate the models and processing steps that will be modified by versions using semantic labels. This will lead to an overall architecture, where a separate semantic decoding step (5) becomes dispensable.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1467, |
| "end": 1473, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "One contribution of this work is to show that compared to other one-stage approaches (Thomae et al., 2003) such an integrated recognition system does not require a specialized decoder or complex language model support. Instead, basic bi-gram language models may be used. We achieve the integration by \"reducing\" the NLU part to language modelling whilst enriching the lexicon and language model with semantic information. Conventional basic language modelling techniques are capable of representing this information. We redefine the units used in the language model: instead of using \"plain\" words, these are annotated with additional information. Such an additional information may consist of semantic labels and context information. For each of these annotated variants of a word, the phonetic transcription of the \"plain\" word is used. Consequently, the ASR cannot decide which variant to choose on the basis of the acoustic model. No retraining of the acoustic model is necessary. The speech recogniser produces word hypotheses enriched with semantic labels.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 106, |
| "text": "(Thomae et al., 2003)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of this paper is structured as follows: In the next section we give a brief overview of the Cam- bridge HTK software we used for our experiments with the ATIS corpus. In Section 3 we outline the HMM-based parsing method. The basic approach for adding information into the speech recogniser language model is described in Section 4. In Section 5 we discuss our experiments and present speech recognition results. Finally, we conclude by pointing out further possible improvements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Speech recognition may be formulated as an optimisation problem: Given a sequence of observations O consisting of acoustic feature vectors, determine the sequence of words W , such that it maximizes the conditional probability P (W |O). Bayes' rule is used to replace this conditional probability which is not directly computable by the product of two components: P (O|W ), the acoustic model, and P (W ), the language model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "[W ] opt = argmax W {P (W )P (O|W )}", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The Cambridge Hidden Markov Model Toolkit (HTK) (Young et al., 2004) can be used to build robust speaker-independent speech recognition systems. The tied acoustic model parameters are estimated by the forward-backward algorithm.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 68, |
| "text": "(Young et al., 2004)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The HTK Viterbi decoder can be used together with a probabilistic word network that may be computed from a finite state grammar or the bi-gram statistics of a text corpus. The decoder's token passing algorithm is able reference word : case frame or concept identifier case frame: set of cases related to a concept case:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "attribute of a concept case marker:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "surface structure indicator of a case case system: complete set of cases of the application Figure 2 : Semantic case grammar formalism.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 92, |
| "end": 100, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "to produce word hypotheses lattices or N-best lists of recognition results. Internally this word network is combined with a phonetic transcription dictionary to produce an expanded network of phoneme states. Usually, one phoneme or triphone is represented by five states.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For our experiments with the ATIS corpus, the acoustic model is constructed in conventional way. We use 4500 utterances to train a triphone recogniser with 8 Gaussian mixtures. A triphone count of 5929 physical triphones expand to 27619 logical ones. The acoustic model is used for both the two-stage and the one-stage experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acoustic Modelling and Speech Recognition Using HTK", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the domain of spoken language information retrieval, spontaneous effects in speech are very important (Minker, 1999) . These include false starts, repetitions and illformed utterances. Thus it would be improvident to base the semantic extraction exclusively on a syntactic analysis of the input utterance. Parsing failures due to ungrammatical syntactic constructs may be reduced if those phrases containing important semantic information could be extracted whilst ignoring the non-essential or redundant parts of the utterance. Restarts and repeats frequently occur between the phrases. Poorly syntactic constructs often consist of well-formed phrases which are semantically meaningful. One approach to extract semantic information is based on case frames. The original concept of a case frame as described by Fillmore (Fillmore, 1968) is based on a set of universally applicable cases or case values. They express the relationship between a verb and its nouns. Bruce (Bruce, 1975) extended the Fillmore theory to any concept-based system and defined an appropriate semantic grammar whose formalism is given in Fig. 2 \u2022 price: this reference word identifies the concept airfare (other concepts may be: book, flight, ...)", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 119, |
| "text": "(Minker, 1999)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 972, |
| "end": 985, |
| "text": "(Bruce, 1975)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1115, |
| "end": 1121, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 from: case marker of the case from-city corresponding to the departure city San Francisco", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 to: case marker of the case to-city corresponding to the arrival city Dallas", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 class: case marker of the case flight-class corresponding to first", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 case system: from, to, class, ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The parsing process based on a semantic case grammar typically considers less than 50% of the example query to be semantically meaningful. The hesitations and false starts are ignored. The approach therefore appears well suited for natural language understanding components where the need for semantic guidance in parsing is especially relevant. Case frame analysis may be used in a rule-based case grammar. Here, we apply HMM-based modelling instead (Pieraccini et al., 1992; . In the frame-based representation, the semantic labelling does not consider all the words of the utterance, but only those related to the concept and its cases. However, in order to estimate the model parameters, each word of the utterance must have a corresponding semantic label. Thus, the additional label (null) is assigned to those words not used by the case frame analyzer for the specific application. A semantic sequence consists of the basic labels <concept>, (m:case), (v:case) and (null) corresponding respectively to the reference words, case markers, values and irrelevant words.", |
| "cite_spans": [ |
| { |
| "start": 451, |
| "end": 476, |
| "text": "(Pieraccini et al., 1992;", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Relative occurrences of model states and observations are used to establish the Markov Model, whose topology needs to be fixed prior to training and decoding. Semantic labels are defined as the states s j . All states such as the examples (v:at-city), (null) and <ground-service> shown can follow each other; thus the model is ergodic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In direct analogy to the speech recognition problem (equation 1), the decoding consists of maximizing the conditional probability P (S|W ) of some state sequence S given the observation sequence W :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "[S] opt = argmax S {P (S)P (W |S)}", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given the dimensionality of the sequence W , the exact computation of the likelihood P (W |S) is intractable. Again, bi-grams are a common approximation in order to robustly estimate the Markov Model parameters, the state transitions probabilities P (s j |s i ) and the observation symbol probability distribution P (w m |s j ) in state j.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In contrast to speech recognition, the computation of the model parameters can be achieved through maximum likelihood estimation, i.e. by counting event occurrences. Usually a back-off and discounting strategy is applied in order to improve robustness in the face of unseen events.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "An HMM-based parsing module may be conceived as a probabilistic finite state transducer that translates a sequence of words into a sequence of semantic labels. The semantic labels denote word's function in the semantic representation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Although the flat semantic model has known limitations with respect to the representation of long-term dependencies, for practical applications it is often sufficient. It has been shown that several methods, such as contextual observations and garbage models, exist that enhance the performance of HMM-based stochastic parsing models (Beuschel et al., 2004) .", |
| "cite_spans": [ |
| { |
| "start": 334, |
| "end": 357, |
| "text": "(Beuschel et al., 2004)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "HMM-Based Semantic Case Frame Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As mentioned above, the language model P (W ) represents the probability of a state sequence. With the bigram approximation P (W ) \u2248 P (w i |w i\u22121 ) this probability becomes a transition probability between words in a word network. By adding information to the language model (cf. shaded parts of Fig. 1 ) we modify the word network in a way that instead of \"plain\" words as nodes the network should now contain \"annotated\" variants of these original words. The annotation then encodes some additional information that is relevant to the further processing in the dialogue system, but does not affect the pronunciation of the word. By introducing such labelled word variants, it is possible to encode some additional relations that exist between the labels rather than between the words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 297, |
| "end": 303, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Consider the following utterance and a corresponding labelling of each word with additional information: Show-(null) me-(null) ground-<ground-service> transportation-<ground-service> for-(null) Dallas-(v:at-city)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The word network computed from utterances of the latter form instead of plain texts will represent the fact that after a word labelled as (null), a city name labelled as (v:atcity) is much more likely than labelling as (v:from-city) or (v:to-city). In order to compute the modified version of the network it is only necessary to replace the words by their labelled variants in the training corpus and to compute the bi-gram statistics from this modified corpus (cf. step (2) in Fig. 1) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 478, |
| "end": 485, |
| "text": "Fig. 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For expanding the word network into a network of phoneme states as required by the speech recognition, it is necessary to modify the phonetic transcription dictionary accordingly: for each labelled variant of a word appearing in the labelled training texts, the respective unlabelled word entry is copied. The Viterbi decoder will now output sequences of annotated words (step (3) ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 371, |
| "end": 380, |
| "text": "(step (3)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The language model may not only be enriched by semantic labels. Other information, such as the context of the word may also be used. A language model labelled with a context that consists of one word on the left is essentially a tri-gram model. There is a trade-off between what the network can express and its size. Using too many different labels for each word in the network may quickly result in word networks impractical for realtime use. For our experiments within the ATIS domain, Table 1 summarises the word network sizes for different labelling methods. Here, \"ASR\" refers to the original base-line unlabelled language model. \"ASR/Cl\" is a simple class-based language model with manually defined classes. A left context of one word was used in \"ASR/Co\", and combined in with classes in \"ASR/CC\". These labelled versions may be used in the two-stage approach to improve the speech recognition results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 488, |
| "end": 495, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\"ASR+\", \"ASR+Co\", \"ASR+N\" refer to semantically labelled language models. \"ASR+\" is directly trained on the semantically labelled training texts. \"ASR+Co\" furthermore includes a left context of one semantic label, whereas \"ASR+N\" includes sub-label numbering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As can be seen from the numbers, word classes as well as the semantic methods only incur a modest increase in network size. The word-based methods, however, significantly inflate the model. Although we have not systematically recorded the time necessary for recognizing the test set with these networks, it is fair to say the time escalates from minutes to hours.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The last column in Table 1 denotes the estimated average per-utterance processing time in seconds. The numbers were obtained on a Pentium 4 with 2,6 GHz speed and 1 GB of RAM running Linux.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 19, |
| "end": 26, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adding Information to the Language Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For our experiments with the ATIS corpus, the stochastic parsing model is computed from 1500 utterances, manually annotated in a bootstrapping process. We use 13 semantic word classes (e.g. /weekday/, /number/, /month/, /cityname/). The semantic representation consists of 70 different labels. Splitting sequences of identical labels into numbered sub-labels results in 174 numbered labels. The semantic representation focuses on the identification of key slots, such as origin, destination and stop over locations, as well as airline names and flight numbers. Word sequences containing temporal information such as constraints on the departure or arrival time are not analysed in detail. Instead, all these words are marked with (v:arrive-time) or (v:depart-time), respectively. The test corpus consists of 418 utterances which were manually annotated with semantic labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speech Recognition Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For the two-stage approach different word-based language models (plain, class-based, context, combined) were used (cf. section 4). An N-best decoding was performed and 20 hypotheses were subsequently labelled by the stand-alone stochastic parser. After that, the result with the maximum combined probability value was chosen. In the one-stage approach, two refinements (context and numbering) were applied to the basic semantic language model. Tables 2 and 3 present the results of these experiments. They are based on word recognition and concept recognition performance, respectively. The columns titled \"Correct\" and \"Accuracy\" refer to word correct rate and word accuracy, as well as to their concept-level equivalents. The \"Sentence\" column lists the percentage of completely correctly decoded sentences. For the twostage approach, the numbers in Table 2 denote the performance of the speech recognition system alone (step (3) in Fig. 1 ). For the one-stage approach, the semantic labels were removed after decoding in order to obtain the plain word sequences. It can be seen that the word-based recognition benefits both from word-based additions to the language model, as well as from semantic labels in about the same rate. Table 3 summarizes the concept-level results. Here, the semantic labels are also compared against the reference. Numbers in sub-labels are ignored, however. The \"NLU\" row denotes the performance on perfectly recognized data, i.e. on the training transcriptions. One-stage integrated recognition produces competitive recognition rates when compared to the two-stage approach. Even though in the two-stage approach, each stage's representation can be fine-tuned separately.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 103, |
| "text": "(plain, class-based, context, combined)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 852, |
| "end": 859, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 935, |
| "end": 941, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1232, |
| "end": 1239, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Speech Recognition Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "It seems interesting to note a subtle difference between the decoding procedures of the two-stage and the one-stage architectures. In a stand-alone stochastic parser, Viterbi decoding is used for word-to-label correspondences. The probability of a transition from semantic state s i to s j is thus defined as the product P (w j |s j )P (s j |s i ), where P (w j |s j ) is the probability of observing w j in state s j . In contrast, if a labelled language model is used the transition probability is P (w j |w i ), where w i and w j are pairs of the actual words and their associated labels, so the surface form of the last word influences the transition as well (not only its label).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speech Recognition Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "It can be shown that a flat HMM-based semantic analysis does not require a separate decoding stage. Instead it seems possible to use the speech recogniser's language model to represent the semantic state model, without compromising recognition in terms of word or slot error rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "For a stand-alone speech recognition component, it seems advantageous to use a class-based or context-based language model, since it improves the word recognition score. For the stochastic parsing, numbered sub-labels provide best results. With N-best decoding, the stochastic parser can be used to select the best overall hypothesis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "A number of improvements and extensions may be considered for the different processing stages. Firstly, instead of representing compound airport and city names such as \"New York City\" as word sequences, they could be entered in the dictionary as single words, which should avoid certain recognition errors. In addition, an equivalent of a class-based language model should be defined for semantically annotated language models. Also, contextual observations, i.e. the use of a class of manually defined context words could help the stochastic parser to address long-term dependencies that have so far proved difficult. Finally, the ATIS task results in relatively simple semantic structures and yields a limited vocabulary size. It would be interesting to apply our proposed techniques to a more complex domain, such as an appointment scheduling task , implying a more natural speech-based interaction. This would enable us to validate our approach on larger vocabulary sizes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Strategies for Optimizing a Stochastic Spoken Natural Language Parser", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Beuschel", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Minker", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "B\u00fchler", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of International Conference of Speech and Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2177--2180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Beuschel, W. Minker, and D. B\u00fchler. 2004. Strate- gies for Optimizing a Stochastic Spoken Natural Lan- guage Parser. In Proceedings of International Con- ference of Speech and Language Processing, ICSLP, pages 2177-2180, Jeju Island (Korea), October.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Case Systems for Natural Language", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Bruce", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Artificial Intelligence", |
| "volume": "6", |
| "issue": "", |
| "pages": "327--360", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Bruce. 1975. Case Systems for Natural Language. Artificial Intelligence, 6:327-360.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The case for case", |
| "authors": [ |
| { |
| "first": ".", |
| "middle": [ |
| "J" |
| ], |
| "last": "Ch", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fillmore", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "Universals in Linguistic Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "1--90", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ch. J. Fillmore. 1968. The case for case. In Em- mon Bach and Robert T. Harms, editors, Universals in Linguistic Theory, pages 1-90. Holt and Rinehart and Winston Inc.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Stochastically-based Semantic Analysis for Machine Translation", |
| "authors": [], |
| "year": null, |
| "venue": "Computer Speech and Language", |
| "volume": "13", |
| "issue": "2", |
| "pages": "177--194", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stochastically-based Semantic Analysis for Machine Translation. Computer Speech and Language, 13(2):177-194.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Stochastically-based semantic analysis for ARISE -Automatic Railway Information Systems for", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Minker", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Europe. Grammars", |
| "volume": "2", |
| "issue": "2", |
| "pages": "127--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. Minker. 1999. Stochastically-based semantic analy- sis for ARISE -Automatic Railway Information Sys- tems for Europe. Grammars, 2(2):127-147.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A Speech Understanding System Based on Statistical Representation of Semantics", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Pieraccini", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Tzoukermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Gorelov", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "L" |
| ], |
| "last": "Gauvain", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| }, |
| { |
| "first": "C-H", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "G" |
| ], |
| "last": "Wilpon", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of International Conference on Acoustics Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "193--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Pieraccini, E. Tzoukermann, Z. Gorelov, J.L. Gauvain, E. Levin, C-H. Lee, and J.G. Wilpon. 1992. A Speech Understanding System Based on Statistical Represen- tation of Semantics. In Proceedings of International Conference on Acoustics Speech and Signal Process- ing, ICASSP, pages 193-196, March.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Evaluation of Spoken Language Systems: The ATIS Domain", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Price", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Proceedings of DARPA Speech and Natural Language Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "91--95", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Price. 1990. Evaluation of Spoken Language Systems: The ATIS Domain. In Proceedings of DARPA Speech and Natural Language Workshop, pages 91-95, June.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A one-stage decoder for the interpretation of natural speech", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Thomae", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Fabian", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Lieb", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Ruske", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of International Conference of Speech and Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "56--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Thomae, T. Fabian, R. Lieb, and G. Ruske. 2003. A one-stage decoder for the interpretation of natural speech. In Proceedings of International Conference of Speech and Language Processing, ICSLP, pages 56- 64.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "The HTK Book (for HTK Version 3.2). Cambridge University Engineering Department", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Evermann", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kershaw", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Odell", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ollason", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Valtchev", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Woodland", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Young, G. Evermann, D. Kershaw, J. Odell, D. Ol- lason, D. Povey, V. Valtchev, and P. Woodland, 2004. The HTK Book (for HTK Version 3.2). Cambridge Uni- versity Engineering Department.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Principal knowledge sources and models of speech recognition and semantic analysis. Shaded parts constitute the changes when using a one-stage approach. The numbers indicate the following computational steps:(1) acoustic model parameter estimation, (2) language modelling, (3) Viterbi acoustic decoding, (4) semantic model parameter estimation, (5) Viterbi semantic decoding.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Word network sizes for different labelling techniques. \"Expanded\" refers to the phoneme state network. t denotes estimated per utterance processing time.", |
| "content": "<table><tr><td>Method</td><td colspan=\"2\">Words</td><td colspan=\"2\">Expanded</td><td>t</td></tr><tr><td/><td colspan=\"3\">Nodes Arcs Nodes</td><td>Arcs</td><td>[s]</td></tr><tr><td>ASR</td><td>465</td><td colspan=\"3\">2,939 5,985 15,482 14.1</td></tr><tr><td>ASR/Cl</td><td>709</td><td colspan=\"3\">4,382 8,775 21,360 67.8</td></tr><tr><td colspan=\"5\">ASR/Co 3,603 14,126 46,043 84,464 15.9</td></tr><tr><td colspan=\"5\">ASR/CC 6,632 18,117 83,207 115,703 117.0</td></tr><tr><td colspan=\"2\">ASR+ 1,243</td><td colspan=\"3\">7,108 16,210 38,314 13.4</td></tr><tr><td colspan=\"5\">ASR+Co 2,269 10,272 29,535 59,209 20.8</td></tr><tr><td colspan=\"2\">ASR+N 1,556</td><td colspan=\"3\">7,966 19,516 42,996 14.7</td></tr></table>" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Word correctness results.", |
| "content": "<table><tr><td colspan=\"4\">Method Correct Accuracy Sentence</td></tr><tr><td/><td>[%]</td><td>[%]</td><td>[%]</td></tr><tr><td>ASR</td><td>82.66</td><td>67.20</td><td>20.56</td></tr><tr><td>ASR/Co</td><td>85.53</td><td>72.74</td><td>26.61</td></tr><tr><td>ASR/Cl</td><td>84.33</td><td>70.96</td><td>24.60</td></tr><tr><td>ASR/CC</td><td>85.43</td><td>72.68</td><td>27.42</td></tr><tr><td>ASR+</td><td>85.04</td><td>72.03</td><td>25.60</td></tr><tr><td>ASR+Co</td><td>85.02</td><td>71.90</td><td>25.60</td></tr><tr><td>ASR+N</td><td>85.13</td><td>72.16</td><td>25.81</td></tr></table>" |
| }, |
| "TABREF3": { |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Concept correctness results.", |
| "content": "<table><tr><td colspan=\"4\">Method Correct Accuracy Sentence</td></tr><tr><td/><td>[%]</td><td>[%]</td><td>[%]</td></tr><tr><td>NLU</td><td>96.97</td><td>96.97</td><td>85.69</td></tr><tr><td>ASR</td><td>76.67</td><td>60.73</td><td>18.18</td></tr><tr><td>ASR/Co</td><td>78.62</td><td>65.92</td><td>24.80</td></tr><tr><td>ASR/Cl</td><td>78.69</td><td>66.97</td><td>24.60</td></tr><tr><td>ASR/CC</td><td>78.02</td><td>63.77</td><td>13.31</td></tr><tr><td>ASR+</td><td>77.72</td><td>64.80</td><td>21.57</td></tr><tr><td>ASR+Co</td><td>77.74</td><td>64.69</td><td>21.77</td></tr><tr><td>ASR+N</td><td>77.44</td><td>64.58</td><td>22.18</td></tr></table>" |
| } |
| } |
| } |
| } |