|
{ |
|
"paper_id": "H92-1015", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:28:43.479230Z" |
|
}, |
|
"title": "SPEECH UNDERSTANDING IN OPEN TASKS", |
|
"authors": [ |
|
{ |
|
"first": "Wayne", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xuedong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hsiao-Wuen", |
|
"middle": [], |
|
"last": "Hon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mei-Yuh", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sheryl", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Matessa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Fu-Hua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The Air Traffic Information Service task is currently used by DARPA as a common evaluation task for Spoken Language Systems. This task is an example of open type tasks. Subjects are given a task and allowed to interact spontaneously with the system by voice. There is no fixed lexicon or grammar, and subjects are likely to exceed those used by any given system. In order to evaluate system performance on such tasks, a common corpus of training data has been gathered and annotated. An independent test corpus was also created in a similar fashion. This paper explains the techniques used in our system and the performance results on the standard set of tests used to evaluate systems.", |
|
"pdf_parse": { |
|
"paper_id": "H92-1015", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The Air Traffic Information Service task is currently used by DARPA as a common evaluation task for Spoken Language Systems. This task is an example of open type tasks. Subjects are given a task and allowed to interact spontaneously with the system by voice. There is no fixed lexicon or grammar, and subjects are likely to exceed those used by any given system. In order to evaluate system performance on such tasks, a common corpus of training data has been gathered and annotated. An independent test corpus was also created in a similar fashion. This paper explains the techniques used in our system and the performance results on the standard set of tests used to evaluate systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Our Spoken Language System uses a speech recognizer which is loosely coupled to a natural language understanding system. The SPHINX-II speech recognition system produces a single best hypothesis for the input. It uses a backed-off class bigram language model in decoding the input. This type of smoothed stochastic language model provides some flexibility when presented with unusual grammatical constructions. The single best hypothesis is passed to the natural language understanding system which uses flexible parsing techniques to cope with novel phrasings and misrecognitions. In addition to the basic speech recognition and natural language understanding modules, we have developed techniques to enhance the performance of each. We have developed an environmental robustness module to minimize the effects of changing environments on the recognition. We have also developed a system to use a knowledge base to asses and correct the parses produced by our natural language parser. We present each of the modules separately and discuss their evaluation results in order to understand how well the techniques perform. The authors on each line in the paper heading reflect those people who worked on each module respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SYSTEM OVERVIEW", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Our NL understanding system (Phoenix) is flexible at several levels. It uses a simple frame mechanism to represent task semantics. Frames are associated with the various types of actions that can be taken by the system. Slots in a frame represent the various pieces of information relevant to the action that may be specified by the subject. For example, the most frequently used frame is the one corresponding to a request to display some type of flight information. Slots in the frame specify what information is to be displayed (flights, fares, times, airlines, etc), how it is to be tabulated (a list, a count, etc) and the constraints that are to be used (date ranges, time ranges, price ranges, etc).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FLEXIBLE PARSING", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The Phoenix system uses recursive Iransition networks to specify word patterns (sequences of words) which correspond to semantic tokens understood by the system. A subset of tokens are considered as top-level tokens, which means they can be recognized independently of surrounding context. Nets call other nets to produce a semantic parse tree. The top-level tokens appear as slots in frame structures. The frames serve to associate a set of semantic tokens with a function. Information is often represented redundantly in different nets. Some nets represent more complex bindings between tokens, while others represent simple stand-alone values. In our system, slots (pattern specifications) can be at different levels in a hierarchy. Higher level slots can contain the information specified in several lower level slots. These higher level forms allow more specific relations between the lower level slots to be specified. For example, from denver arriving in dallas after two pm will have two parses,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FLEXIBLE PARSING", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "[city] denver [ARRIVE_LOC] arnvmg is the preferred-interpretation. In picking which interpretation is correct, higher level slots are preferred to lower level ones because the associations be-tween concepts is more tightly bound, thus the second (correct) interpretation is picked here. The simple heuristic to select for the interpretation which has fewer slots (with the same number of words accounted for) allows the situation to be resolved correctly.", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 26, |
|
"text": "[ARRIVE_LOC]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The parser operates by matching the word patterns for tokens against the input text. A set of possible interpretations are pursued simultaneously. A subsumption algorithm is used to find the longest version of a phrase for efficiency purposes. As tokens (phrases) are recognized, they are added to frames to which they apply. The algorithm is basically a dynamic programming beam search. Many different frames, and several different versions of a frame, are pursued simultaneously. The score for each frame hypothesis is the number of words that it accounts for. At the end of an utterance the parser picks the best scoring frame as the result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The parse is flexible at the slot level in that it allows slots to be filled independent of order. It is not necessary to represent all different orders in which the slot patterns could occur. Grammatical restarts and repeats are handled by overwriting a slot if the same slot is subsequently recognized again.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The pattern matches are also flexible because of the way the grammars are written. The patterns for a semantic token consist of mandatory words or tokens which are necessary to the meaning of the token and optional elements. The patterns are also written to overgenerate in ways that do not change the semantics. This overgeneration not only makes the pattern matches more flexible but also serves to make the networks smaller. For example, the nets are collapsed at points such that tense, number and case restrictions are not enforced. Articles A and AN are treated identically.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The slots in the best scoring frame are then used to build objects. In this process, all dates, times, names, etc. are mapped into a standard form for the routines that build the database query. The objects represent the information that was extracted from the utterance. There is also a currently active set of objects which represent constraints from previous utterances. The new objects created from the frame are merged with the current set of objects. At this step ellipsis and anaphora are resolved. Resolution of ellipsis and anaphora is relatively simple in this system. The slots in frames are semantic, thus we know the type of object needed for the resolution. For ellipsis, we add the new objects. For anaphora, we simply have to check that an object of that type already exists.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each frame has an associated function. After the information is extracted and objects built, the frame function is executed. This function takes the action appropriate for the frame. It builds a database query (if appropriate) from objects, sends it to SYBASE (the DataBase Management System we use) and displays output to the user. This system has been described in previous papers. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[DEPART LOC] from [de part_loc]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The frame structures and patterns for the Recursive Transition Networks were developed by processing transcripts of subjects performing scenarios of the ATIS task. The data were gathered by several sites using Wizard paradigms. This is a paradigm where the subjects are told that they are using a speech recognition system in the task, but an unseen experimenter is actually controlling the responses to the subjects screen. The data were submitted to NIST and released by them. There have been three sets of training data released by NIST: ATIS0, ATIS1 and ATIS2. We used only data from these releases in developing our system. A subset of this data (approximately 5000 utterances) has been annotated with reference answers. We have used only a subset of the ATIS2 data, including all of the annotated data. The development test sets (for ATIS0 and ATIS1) were not included in the training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Training Data", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "A set of 980 utterances comprised of 123 sessions from 37 speakers was set aside as a test set. Transcripts of these utterances were processed by the systems to evaluate the performance of the Natural Language Understanding modules. This will provide an upper bound on the performance of the Spoken Language Systems, i.e. this represents the performance given perfect recognition. The utterances for sessions provided dialog interaction with a system, not just the processing of isolated utterances. All of the utterances were processed by the systems as dialogs. For result reporting purposes, the utterances were divided into three classes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Processing Results", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "\u2022 Class A -utterances requiring no context for interpretation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Processing Results", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "\u2022 Class D -utterances that can be interpreted only in the context of previous utterances", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Processing Results", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "\u2022 Class X -utterances that for one reason or another were not considered answerable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Processing Results", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Our results for processing the test set transcripts are shown in Table 1 . There were 402 utterances in Class A and 285 utterances in Class D for a combined total of 687 utterances. The remainder of the 980 utterances were Class X and thus were not scored. The database output of the system is scored. The percent correct figure is the percent of the utterances for which the system returned the (exactly) correct output from the database. The percent wrong is the percent of the utterances for which the system returned an answer from the database, but the answer was not correct. The percent NO_ANS is the percentage of the utterances that the system did not attempt to answer. The Weighted Error measure is computed as (2 * %Wrong) + %NO_ANSWER. These NL results (both percent correct and weighted error) were the best of any site reporting. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Natural Language Processing Results", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "The purpose of evaluations is not only to measure current performance, but also to measure progress over time. A similar evaluation was conducted in February 1991.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "For Class A data, our percent correct performance increased from 80.7 to 88.6. This means that the percentage of errors decreased from 19.3 to 11.4, representing a decrease in errors of 41 percent. The weighted error decreased from 36.0 to 22.9.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "For Class D data, our percent correct increased from 60.5 to 79.3. The represents a decrease in errors of 48 percent. The weighted error was reduced from 115.8 to 40.4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "The basic algorithms used are the same as for previous versions of the system. The increase in performance came primarily from \u2022 Bug fixes (primarily to the SQL generation code)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "\u2022 Extension of the semantics, grammar and lexicon from processing part of the ATIS2 training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "\u2022 Improved context mechanism", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "In our system, we use the NO_ANSWER response differently than other sites. If our results are compared to others, we output far fewer NO_ANSWER responses. This is because we use a different criteria for choosing not to answer. In order to optimize the weighted error measure, one would want to choose not to answer an utterance if the system believed that the input was not completely understood correctly, i.e. if it thought that the answer would not be completely correct. However, if the system chooses not to answer, it should ignore all information in the utterance. Since our goal is to build interactive spoken language understanding systems, we prefer a strategy that shows the user what is understood and engages in a clarification dialog with the user to get missing information or correct misunderstandings. For this procedure we need to retain the information that was understood from the utterance for dialog purposes. The user must also be clearly shown what was understood. Therefore, we only output a NO_ANSWER response when the system did not arrive at even a partial understanding of the utterance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Understanding", |
|
"sec_num": "2.4." |
|
}, |
|
{ |
|
"text": "For our recognizer, we use the SPHINX-II speech recognition system. In comparison with the SPHINX system, the SPHINX-II system incorporates multiple dynamic features (extended from three codebooks to four), a speakernormalized front-end, sex-dependent semi-continuous hidden Markov models (which replace discrete models), and the shared-distribution representation (which replaces generalized between-word triphones).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SPEECH PROCESSING", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "[3] [4] For the Feb. 1992 ATIS evaluation, we used SPmNX-II (without the speaker normalization component) to construct vocabulary-independent models and adapted vocabularyindependent models with ATIS training data. The system used a backoff class bigram language model and a Viterbi beam search.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SPEECH PROCESSING", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In order to efficiently share parameters across word models, the SPHINX-II system uses shared-distribution models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Training", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "[5] The states in the phonetic HMMs are treated as the basic unit for modeling and are referred to as senones.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Training", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "[4] There were 6500 senones in the systems. Vocabulary-independent acoustic models were trained on approximately 12,000 general English utterances. These models were used to initialize vocabulary specific models (the vocabulary-independent mapping table was used) which were then trained on the task-specific data. Approximately 10,000 utterances from the ATIS0, ATIS 1 and ATIS2 training sets were used in the adaptation training. The original vocabulary-independent models were then interpolated with the vocabulary-dependent models to give the adapted models used in the recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Training", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "A backoff class bigram grammar was trained on a total of approximately 12,000 utterances from the same three NIST ATIS distributions. The grammar used a lexicon of 1389 words with 914 word classes defined. The system used seven models for non-speech events. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon and Language Model", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "The Speech recognition results for the test set are shown in Table 2 . The Error column is the sum of Substitutions, Insertions and Deletions. The output from the recognizer was then sent to the NL system to get the complete Spoken Language System results. These are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 68, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 283, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "KNOWLEDGE BASED CORRECTION", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "For Class A data, our word error percentage was reduced from 28.7 to 10.4 representing a decrease in errors of 64 percent. The overall SLS error is a function of both the speech recognition and natural language errors. Our percentage of errors in SLS output decreased from 39 to 26 representing a decrease in errors of 33 percent. The weighted error decreased from 65.5 to 51.7.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "For Class D data, our word error percentage was reduced from 26.9 to 14.5 representing a decrease in errors of 46 percent. Our percentage of errors in SLS output decreased from 61 to 44 representing a decrease in errors of 28 percent. The weighted error decreased from 116 to 87.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "The increase in speech recognition performance came from using the SPHINX-II system where we used SPHINX in 1991. The primary differences are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 Semi-continuous shared-distribution HMMs replaced discrete HMM generalized triphones", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 Sex-dependent models were added", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 Added second order difference cepstrum codebook", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "The MINDS-II SLS system is a back-end module which applies constraints derived from syntax, semantics, pragmatics, and applicable discourse context and discourse structure to detect and correct erroneous parses, skipped or overlooked information and out of domain requests. MINDS-II transcript processor is composed of a dialog module, an utterance analyzer and a domain constraints model. Input to the CMU MINDS-II NL system is the transcribed string, the parse produced by the PHOENIX caseframe parser and the parse matrix. The system first looks for out of domain requests by looking for otherwise reasonable domain objects and relations among objects not included in this application database. Second, it tries to detect and correct all misparses by searching for alternate interpretations of both strings and relations among identified domain concepts. Further unanswerable queries are detected in this phase, although the system cannot determine whether the queries are unanswerable because the speaker mis-spoke or intentionally requested extra-domain information. Third, the system evaluates all word strings not contained in the parsed representation to assess their potential importance and attempt to account for the information. Unaccounted for information detected includes interjections, regions with inadequate grammatical coverage and regions where the parser does not have the knowledge to include the information in the overall utterance interpretation. All regions containing interjections or on-line edits and corrections are deemed unimportant and passed over. When the system finds utterances with important unaccounted for information, it searches through the parse matrix to find all matches performed in the region. It then applies abductive reasoning and constraint satisfaction techniques to form a new interpretation of the utterance. Semantic and pragmatic knowledge is represented with multi-layered hierarchies of frames. Each knowledge layer contains multiple hierarchies and relations to other layers. Semantic information of similar granularity is represented in a single layer. base contains knowledge of objects, attributes, values, actions, events, complex events, plans and goals. Syntactic knowledge is represented as a set of rules. The discourse model makes use of current focus stack, inferred speaker goals and plans, and dialog principles which constrain \"what can come next\" in a variety of contexts. Goal and plan inference and tracking are performed. Constraints are derived by first applying syntactic constraints, constraining theses by utterance level semantic and pragmatic constraints followed by discourse level constraints when applicable. The system outputs either semantically inter\u00b0 preted utterances represented as variables and bindings for the database interface or error codes for \"No_Anwser\" items.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "The system was trained using 115 dialogs, approximately 1000 of the utterances from the MADCOW ATIS-2 training. Previously, the system had been trained on the ATIS-0 training set. This system incorporates the SOUL utterance analysis system as well as a dialog module for the Feb92 benchmark tests.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to February 1991 system", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Due to mechanical problems, the results from this test were submitted to NIST after the deadline for official submissions. Therefore, they were not scored by NIST and are not official benchmark results. However, the results were generated observing all procedures for benchmark tests. They were run on the official test set, without looking at the data first. One version control bug was fixed when the system crashed while running the test. No code was changed, we realized that the wrong version (an obsolete one) of one function was used, and we substituted the correct one. The results were scored using the most recent comparator software released by NIST and the official answers (after adjudication).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Based Processing Results", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "This year we incorporated the Code-Word Dependent Cepstral Normalization (CDCN) procedure developed by Acero into the ATIS system. For the official ATIS evaluations we used the original version of this algorithm, as described in [6] . (Recent progress on this and similar algorithms for acoustical pre-processing of speech signals are described in elsewhere in these proceedings [7] .)", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 232, |
|
"text": "[6]", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 382, |
|
"text": "[7]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ENVIRONMENTAL ROBUSTNESS", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The recognition system used for the robust speech evaluation was identical to that with which the baseline results were obtained except that the CDCN algorithm was used to transform the cepstral coefficients in the test data so that they would most closely approximate the statistics of the ensemble of cepstra observed in the training environment. All incoming speech was processed with the CDCN algorithm, regardless of whether the testing environment was actually the standard Sennheiser close-talking microphone or the desktop Crown PCC-160 microphone, and the algorithm does not have explicit knowledge of the identity of the environment within which it is operating.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ENVIRONMENTAL ROBUSTNESS", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Because of time constraints, we did not train the system used for the official robust-speech evaluations as thoroughly as the baseline system was trained. Specifically, the robust-speech system was trained on only 10,000 sentences from the ATIS domain, while the baseline system was trained on an additional 12,000 general English utterances as well. The acoustic models for the robustspeech system using CDCN were created by initializing the HMM training process with the models used in the baseline SPmNX-II system. The official evaluations were performed after only a single iteration through training data that was processed with the CDCN algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ENVIRONMENTAL ROBUSTNESS", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The official speech recognition scores using the CDCN algorithm and the Sennheiser HMD-414 and Crown PCC-160 microphones are summarized in Table 4 . We summarize the word error scores for all 447 utterances that were recorded using both the Sennheiser HMD-414 and Crown PCC-160 microphones. For comparison purposes, we include figures for the baseline system on this subset of utterances, as well as figures for the system using the CDCN algorithm for the same sentences. We believe that the degradation in performance from 13.9% to 16.6% for these sentences using the close-talking Sennheiser HMD-414 microphone is at least in part a consequence of the more limited training of the system with the CDCN algorithm. We note that the change from the HMD-414 to the PCC-160 produces only a 30% degradation in error rate. Only two sites submitted data for the present robust speech evaluation, and CMU's percentage degradation in error rate in changing to the new testing environment, as Table 6 : Comparison of SLS performance of SPHINX-II with the CDCN algorithm on the 332 A+D sentences in the test set which were recorded using the PCC-160 microphone as well as the Sennheiser HMD-414.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 146, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 991, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ENVIRONMENTAL ROBUSTNESS", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "well as the absolute error rate in that environment, were the better of the results from these two sites.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ENVIRONMENTAL ROBUSTNESS", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Summary results for the corresponding SLS scores for the 332 Class A+D utterances that were recorded using the 3. Crown PCC-160 microphone are provided in Table 6 . Switching the testing environment from the Sennheiser HMD-414 to the Crown PCC-160 degraded the number of 4. correct SQL queries by only 21.8%, which corresponds to a degradation of 39.3% for the weighted error score. CMU was the only site to submit SLS data using the PCC-160 5. microphone for the official evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 162, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "2.", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The CMU Air Travel Information Service: Understanding Spontaneous Speech", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Proceedings of the DARPA Speech and Natural Language Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ward, W., \"The CMU Air Travel Information Service: Understanding Spontaneous Speech\", Proceedings of the DARPA Speech and Natural Language Workshop, June1990, pp. 127, 129.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Evaluation of the CMU ATIS System", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the DARPA Speech and Natural Language Workshop, Feb1991", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ward, W., \"Evaluation of the CMU ATIS System\", Proceedings of the DARPA Speech and Natural Language Workshop, Feb1991, pp. 101, 105.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Improved Acoustic Modeling for the SPHINX Speech Recognition System", |
|
"authors": [ |
|
{ |
|
"first": "Lee", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hon", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "345--348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, Lee, Hon, and Hwang,, \"Improved Acoustic Modeling for the SPHINX Speech Recognition System\", ICASSP, 1991, pp. 345-348.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Subphonetic Modeling with Markov States -Senone", |
|
"authors": [ |
|
{ |
|
"first": "Huang", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwang and Huang, \"Subphonetic Modeling with Markov States -Senone\", ICASSP, 1992.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Acoustic Classification of Phonetic Hidden Markov Models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Eurospeech Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwang, M. and Huang X., \"Acoustic Classification of Phonetic Hidden Markov Models\", Eurospeech Proceedings, 1991.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Environmental Robusmess in Automatic Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Acero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "849--852", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Acero, A. and Stem, R. M., \"Environmental Robusmess in Automatic Speech Recognition\", ICASSP-90, April 1990, pp. 849-852.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Multiple Approaches to Robust Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F.-H", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Ohshima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sullivan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Acero", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "DARPA Speech and Natural Language Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stern, R. M., Liu, F.-H., Ohshima, Y., Sullivan, T. M., and Acero, A., \"Multiple Approaches to Robust Speech Recognition\", DARPA Speech and Natural Language Workshop, February 1992.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">In [arrive loc]</td></tr><tr><td>[ city ]</td><td>dallas</td><td/><td>[DEPART_TIME ]</td></tr><tr><td colspan=\"2\">[depart_time_range]</td><td>after</td><td>[start_time]</td></tr><tr><td>[time] twopm</td><td/><td/></tr><tr><td>and</td><td/><td/></tr><tr><td colspan=\"4\">[DEPART LOC] from [depart loc] [city] den-</td></tr><tr><td colspan=\"4\">ver [ARRIVE] an/ving in [ar~ve loc] [city]</td></tr><tr><td colspan=\"4\">dallas [ a r rive_t ime_range ] after~ s t a rt_t ime ]</td></tr><tr><td>[time] twopm</td><td/><td/></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "The existence of the higher level slot [ARRIVE] allows this to be resolved. It allows the two lower level nets [arrive loc] and [arrive_time_range] to be specifically associated. The second parse which has [arrive loc] and [arrive time] as subnets of the slot [ARRIVE]" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>Class</td><td>% Correct</td><td>% Wrong</td><td>% NOANS</td><td>! Weighted Error</td></tr><tr><td>A+D</td><td>84.7</td><td>14.8</td><td>0.4</td><td>30.1</td></tr><tr><td>A</td><td>88.6</td><td>11.4</td><td>0.0</td><td>22.9</td></tr><tr><td>D</td><td>79.3</td><td>19.6</td><td>1.1</td><td>40.4</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Table 1: NL results from processing test set transcripts." |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>Class</td><td>% Correct</td><td>% Wrong</td><td>% NOANS</td><td>Weighted Error</td></tr><tr><td>A+D</td><td>66.7</td><td>32.9</td><td>0.4</td><td>66.2</td></tr><tr><td>A</td><td>74.1</td><td>25.9</td><td>0.0</td><td>51.7</td></tr><tr><td>D</td><td>56.1</td><td>42.8</td><td>1.1</td><td>86.7</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "SPHINX-II Speech Recognition results." |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "SLS results from processing test set speech input." |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td>System</td><td>Class</td><td>% Correct</td><td>% Wrong</td><td>% NO_ANS</td><td>Weighted Error</td></tr><tr><td>Phoenix</td><td>A + D</td><td>66.7</td><td>32.9</td><td>0.4</td><td>66.2</td></tr><tr><td>MINDS-II</td><td>A + D</td><td>64.3</td><td>25.3</td><td>10.3</td><td>61.0</td></tr><tr><td/><td/><td/><td/><td/><td>The knowledge</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Table 4: UNOFFICIAL Comparison on MINDS-II and Phoenix results from processing test set speech input." |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparison of speech recognition performance of SPHINX-II with and without the CDCN algorithm on the 447 A+D+X sentences in the test set which were recorded using the PCC-160 microphone as well as the Sennheiser HMD-414." |
|
} |
|
} |
|
} |
|
} |