|
{ |
|
"paper_id": "Y05-1024", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:39:47.753773Z" |
|
}, |
|
"title": "Speech-Activated Text Retrieval System for Cellular Phones with Web Browsing Capability", |
|
"authors": [ |
|
{ |
|
"first": "Takahiro", |
|
"middle": [], |
|
"last": "Ikeda", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "t-ikeda@di.jp.nec.com" |
|
}, |
|
{ |
|
"first": "Shin-Ya", |
|
"middle": [], |
|
"last": "Ishikawa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "s-ishikawa@dg.jp.ne.com" |
|
}, |
|
{ |
|
"first": "Kiyokazu", |
|
"middle": [], |
|
"last": "Miki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "k-miki@bq.jp.nec.com" |
|
}, |
|
{ |
|
"first": "Fumihiro", |
|
"middle": [], |
|
"last": "Adachi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "f-adachi@aj.jp.nec.com" |
|
}, |
|
{ |
|
"first": "Ryosuke", |
|
"middle": [], |
|
"last": "Isotani", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "r-isotani@bp.jp.nec.com" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Satoh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "k-satoh@da.jp.nec.com" |
|
}, |
|
{ |
|
"first": "Akitoshi", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NEC Corporation", |
|
"location": { |
|
"addrLine": "Nakahara-Ku", |
|
"postCode": "1753, 211-8666", |
|
"settlement": "Shimonumabe, Kawasaki", |
|
"region": "Kanagawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "a-okumura@bx.jp.nec.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes a text retrieval system for cellular phones with Web browsing capability, which accepts spoken queries over the cellular phone and provides the search result on the cellular phone screen. This system recognizes spoken queries by large vocabulary continuous speech recognition (LVCSR), retrieves relevant document by text retrieval, and provides the search result on the World Wide Web by the integration of the Web and the voice systems. The text retrieval in this system improves the performance for spoken short queries by: 1) utilizing word pairs with dependency relations, 2) distinguishing affirmative and negative expressions, and 3) converging synonyms. The LVCSR in this system shows enough performance level for speech over the cellular phone with acoustic and language models derived from a query corpus with target contents. The system constructed for user's manual for a cellular phone navigates users to relevant passages for 81.4% of spoken queries.", |
|
"pdf_parse": { |
|
"paper_id": "Y05-1024", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes a text retrieval system for cellular phones with Web browsing capability, which accepts spoken queries over the cellular phone and provides the search result on the cellular phone screen. This system recognizes spoken queries by large vocabulary continuous speech recognition (LVCSR), retrieves relevant document by text retrieval, and provides the search result on the World Wide Web by the integration of the Web and the voice systems. The text retrieval in this system improves the performance for spoken short queries by: 1) utilizing word pairs with dependency relations, 2) distinguishing affirmative and negative expressions, and 3) converging synonyms. The LVCSR in this system shows enough performance level for speech over the cellular phone with acoustic and language models derived from a query corpus with target contents. The system constructed for user's manual for a cellular phone navigates users to relevant passages for 81.4% of spoken queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Cellular phones are now widely used and those with Web browsing capability are becoming very popular. Users can easily browse information provided on the World Wide Web such as news, weather, and traffic report with the cellular phone screen in mobile environment. However, obtaining necessary information from large database such as user's manual or travelers' guide is quite a task for users since searching for appropriate information from seas of data requires cumbersome key operations. I n m o s t cases, users have to carefully navigate through deep hierarchical structures of menus or have to type in complex combination of keys to enter some keywords.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Text retrieval by voice input is one of the solutions for this problem. This paper presents a telephone-based voice query retrieval system in Japanese which enables cellular phone users to search through the user's manual. This system accepts spoken queries over the cellular phone with large vocabulary continuous speech recognition (LVCSR) and retrieves relevant parts from the user's manual with text retrieval. T h e r e s u l t s a r e p r o v i d e d t o t h e u s e r a s a W e b p a g e b y s y n c h r o n o u s l y activating the Web and the voice systems (Yoshida et al., 2002) . Users can input queries without complicated keystrokes and can view the list of results on the cellular phone screen.", |
|
"cite_spans": [ |
|
{ |
|
"start": 566, |
|
"end": 588, |
|
"text": "(Yoshida et al., 2002)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "With respect to voice input systems, a large number of interactive voice responses (IVR) systems and spoken dialogue systems has been designed and developed over the years (Zue, 1997) . As for user's manual retrieval systems which accept voice input, Kawahara et al. (2003) has developed a spoken dialogue system for appliance manuals. However, they mainly focus on the dialogue strategy to select the appropriate result on screen-less systems such as VTR and FAX. On the other hand, retrieval methods for voice input have been examined on a TREC query set (Barnett et al., 1997; Crestani, 2000) . .", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 183, |
|
"text": "(Zue, 1997)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 273, |
|
"text": "Kawahara et al. (2003)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 579, |
|
"text": "(Barnett et al., 1997;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 595, |
|
"text": "Crestani, 2000)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "However, text retrieval in TREC mainly aims to search open domain documents from long queries, while our system is required to search closed domain documents such as user's manuals based on short queries spoken over the cellular phone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In order to apply text retrieval technique to speech-activated user's manual retrieval, we have investigated queries for searching manuals in addition to the text of the manuals from a linguistic viewpoint. We found that text retrieval for a user's manual has the following three difficulties.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "1) The difficulty of identifying passages in a user's manual based on an individual word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "2) The difficulty of distinguishing affirmative and negative sentences which mean two different features in the manual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "3) The difficulty of retrieving appropriate passages for a query using words not appearing in the manual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "This paper presents how we overcome these difficulties using three techniques: 1) utilizing word pairs with dependency relations, 2) distinguishing affirmative and negative expressions by auxiliary verbs, and 3) converging synonyms with synonym dictionary. The rest of the paper is organized as follows. Section 2 describes the system configuration of our speech-activated text retrieval system and how it works. Section 3 discusses the difficulties in text retrieval in our system and presents our proposed techniques in detail. Section 4 shows the developed prototype system and Section 5 reports its evaluation results. Finally Section 6 concludes the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Our system receives spoken queries on the usage of the cellular phone and provides the list of relevant passages in the user's manual. In this paper, a passage denotes a part of the document corresponding to a feature in the user's manual. Figure 1 shows the configuration of our retrieval system. The telephone service module receives a phone call from the user. This module prepares the search operation by calling the LVCSR module, which recognizes the query spoken over the phone, and the text retrieval module, which provides the search result for the query. The telephone service module sends the list of the relevant passages to the Web service module, and then hangs up the phone. The Web service module provides the result to the user according to the user's request via the internet. We assume that the cellular phone screen displays about 30 letters per line and 15 lines of text according to the specifications of recent popular cellular phones in Japan. We assign top ten potential passages as the search result and display the title of them in order for the user to see with ease. Figure 2 shows the screen of the cellular phone displaying the search result.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 248, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1095, |
|
"end": 1103, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Speech-Activated Text Retrieval System", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "This section describes how our system works. Our system works in Japanese, but in the following section, English translation is provided for the reader's convenience. In our system, the user obtains the relevant passage in the user's manual with the voice query according to the following steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Step 1: The user first accesses the system's main page of our system with the cellular phone ( Figure 3 ). The page contains two hyperlinks along with brief instructions and query examples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 104, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Step 2: The user follows the first link labeled \"Input query by voice.\" It is linked to the telephone service module, allowing the user to call the telephone service module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Step 3: The user inputs a query following the voice guidance from the system. The LVCSR module recognizes it and outputs the result text. The text retrieval module searches the user's manual from recognized text and outputs the top ten results. The user goes back to the main page after the telephone service module hangs up the phone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Step 4: The user follows the second link labeled \"Show search results,\" which is linked to our Web service module. Then the user views the result page which contains the title list of top ten results (each passage consists of a title and a body). Figure 4 shows the example of the result page responding to the voice query \"How to change my email address.\"", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 255, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Step 5: By selecting a title of a passage from the result list, the user retrieves the corresponding body of the passage ( Figure 5 ). If the result list contains no relevant passages, the user can go back to the homepage and re-enter a query by speech. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 131, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example of Using the System", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "In general, user's manual of equipment explains all functions extensively. Since the phrasing used in a user's manual is often similar, expressions with small difference might appear in completely different entries. We have investigated queries for searching manuals in addition to the text of the manuals from a linguistic viewpoint and found that text retrieval for user's manual has the following three difficulties.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Problems on User's Manual Retrieval", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "1) It is difficult to identify passages in a user's manual based on an individual word. For example, a word \"mail\" shows up in passages explaining various functions such as sending mails, receiving mails, composing mails, and many others. In order to overcome this difficulty, we need to use relations between words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Problems on User's Manual Retrieval", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "2) It is difficult to distinguish affirmative and negative sentences based on independent words. Sentences with the same set of content words can mean two different features depending on whether the sentence is in the affirmative or in the negative. This is often true in manual writings where each function is described in pair: one activating and the other deactivating the function (ex. \"Sending the caller number\" and \"Not sending the caller number\"). In order to overcome this difficulty, we need to handle polarity indicated by auxiliary verbs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Problems on User's Manual Retrieval", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "3) It is difficult to retrieve appropriate passages for a query using words not appearing in the manual. While the expression denoting an object is generally standardized in a user's manual, users often indicate the object with other expressions. In order to overcome this difficulty, we need to assimilate difference of various synonymous expressions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Problems on User's Manual Retrieval", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "The system retrieves relevant passages from the user's manual with a word-based text retrieval method. The system generates indexes for content words in passages and obtains relevant passages from the words in the query based on Okapi BM25 probabilistic retrieval model without relevance feedback in principle (Robertson et al., 1993) . In this model, the weight W of a passage P for a query Q is defined as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 334, |
|
"text": "(Robertson et al., 1993)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Approaches for User's Manual Retrieval", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "\uf0e5 \uf0ce \uf03d Q T T TW W ) ( qtf k qtf k tf K k tf k w T TW \uf02b \uf0d7 \uf02b \uf0d7 \uf02b \uf0d7 \uf0d7 \uf02b \uf0d7 \uf03d 2 2 1 1 ) 1 ( ) 1 ( ) ( 5 . 0 5 . 0 log \uf02b \uf02b \uf02d \uf03d n n N w AVPL PL b b K \uf0d7 \uf02b \uf02d \uf03d ) 1 (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Approaches for User's Manual Retrieval", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "Here T denotes a term in the query Q, N denotes the number of passages in the whole text, n denotes the number of passages containing the term T, tf denotes the frequency of occurrence of the term T within the passage P, qtf denotes the frequency of occurrence of the term T within the query Q, PL denotes the length of the passage P, and AVPL denotes the average length of all passages. k 1 , k 2 , and b are predefined constants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Approaches for User's Manual Retrieval", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "In order to overcome the difficulties stated previously, we have expanded the retrieval model with the following three techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Approaches for User's Manual Retrieval", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "This technique assigns larger weight for passages including the same word pairs with dependency relations as in the query. The system uses the following weight W wp , which is simple extension of W:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1) Utilization of word pairs with dependency relations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "W k W NP \uf0d7 \uf03d wp wp", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1) Utilization of word pairs with dependency relations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where NP denotes the number of word pairs which appear both in the passage P and the query Q with dependency relations. k wp is predefined constants. We detect the dependency between words by shallow dependency analysis without parsing. The system assigns depend-to and depend-from attributes to each word based on its part of speech and connects them according to the surrounding relationship (Satoh et al., 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 414, |
|
"text": "(Satoh et al., 2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1) Utilization of word pairs with dependency relations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This technique assigns the different weight on the term according to the condition whether an auxiliary verb indicating negative polarity follows after the term. The system adds this condition to each word after morphological analysis, and distinguishes words with different conditions. The system uses the following weight W aux instead of W:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2) Distinction between the negative and the affirmative phrases by auxiliary verbs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\uf028 \uf029 \uf028 \uf029 \uf0e5 \uf0e5 \uf0ce \uf02b \uf02d \uf0ce \uf02d \uf02b \uf02d \uf02b \uf0d7 \uf02b \uf02b \uf0d7 \uf02b \uf03d Q T Q T T TW k T TW T TW k T TW W ) ( ) ( ) ( ) ( aux aux aux", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2) Distinction between the negative and the affirmative phrases by auxiliary verbs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where T + denotes the term T with this condition and Tdenotes the term T without this condition. k aux is predefined constants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2) Distinction between the negative and the affirmative phrases by auxiliary verbs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This technique assumes the occurrence of synonymous expressions for a word as the occurrence of the word itself in calculating the weight. The system converges various synonymous expressions into the standard expression by using predefined synonym dictionary. The system accepts a set of words with dependency relations as a synonymous expression in order to converge complex synonymous expressions. Table 1 shows an example of a synonym dictionary. An arrow sign denotes a dependency relation between words. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 407, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "3) Converging synonyms", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have constructed a prototype system to search through the manuals for cellular phone users (Ishikawa et al., 2004) . The user's manual contains about 14,000 passages and consists of about 4,000 unique words. The prototype system works in real time according to the user's operation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 117, |
|
"text": "(Ishikawa et al., 2004)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prototype System", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "A statistical language model (LM) with word and class n-gram estimates is used in our system. Word 3-gram is backed off to word 2-gram, and word 2-gram is backed off to class 2-gram. Partof-speech patterns are used as the classes of each word. The LM is trained on a text corpus of query samples for our target user's manual. Nouns in the manual document are added to the recognition dictionary apart from the training. A total of 15,000 queries were manually constructed and used for training the LM. The final LM for the prototype system has about 4,000 words in the recognition vocabulary, about 20,000 word 2gram entries, and about 40,000 word 3-gram entries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Model", |
|
"sec_num": "4.1.1." |
|
}, |
|
{ |
|
"text": "A speech signal is sampled at 8kHz, with MFCC analysis frame rate of 10ms. Spectral subtraction (SS) is applied to remove stationary additive noises. The feature set includes MFCC, pitch, and energy with their time derivatives. The LVCSR decoder supports triphone HMMs with tree-based state clustering on phonetic contexts. The state emission probability is represented by Gaussian mixtures with diagonal covariance matrices. For the prototype system, Gender-dependent acoustic models were prepared by the training on the speech corpus with 200,000 sentences read by 1,385 speakers collected through telephone line.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Model", |
|
"sec_num": "4.1.2." |
|
}, |
|
{ |
|
"text": "The LVCSR decoder recognizes the query utterances with the triphone acoustic model, the statistical language model, and a tree-structured word dictionary. It performs two-stage processing. On the first stage, input speech is decoded by frame-synchronous beam search to generate a word candidate graph using the acoustic model, 2-gram language model, and the word dictionary. On the second stage, the graph is searched to find the optimal word sequence using the 3-gram language model. Both male and female acoustic models are used and decoding is performed independently for each model except for the common beam pruning in every frame. Recognition results by male and female acoustic models are compared and the one with better score is used as the result. Gender-dependent models improve the recognition accuracy while curbing the increase of the computational amount by common beam pruning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LVCSR Decoder", |
|
"sec_num": "4.1.3." |
|
}, |
|
{ |
|
"text": "All the techniques described in Section 3.2 are implemented on the text retrieval module in the system. We fixed the constants as follows according to the preliminary experiments using query samples developed for training the LM:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Retrieval Module", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "3 . 0 , 3 . 1 , 3 . 0 , 1000 , 100 aux wp 2 1 \uf03d \uf03d \uf03d \uf03d \uf03d k k b k k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Retrieval Module", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "We developed the synonym dictionary with about 500 entries to converge synonymous expressions used to describe cellular phone functions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Retrieval Module", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "In order to evaluate the usefulness of our system, we have composed 150 new queries independently of the query corpus used for configuring the system. We have used 110 queries for evaluation, eliminating 40 queries without relevant passages in the manual. Table 2 shows some examples of the queries used for the evaluation. Each query contains 3.8 words in average.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 263, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The retrieval success rate, which we adopted as a criterion, measures how well the system is able to provide a relevant passage within the top predefined number of result passages. We have calculated the retrieval success rates at 1, 5, and 10 passages for several conditions. In order to discuss the effect of each technique presented in Section 3.2, we first present the result for transcriptions of the queries among the following text retrieval methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Method BL: This is the baseline method with no techniques applied. Method WP: This method utilizes word pairs with dependency relations. Method WP+AUX: This method distinguishes between the negative and the affirmative phrases by auxiliary verbs in addition to the method WP. Method ALL: This method converges synonyms in addition to the method WP+AUX. This is the same condition as the prototype system. Table 3 summarizes the result. The result shows each of the three techniques has contributed to the improvement of the retrieval success rate. Especially, converging synonyms enhances the performance as derived from the difference between methods WP+AUX and ALL. Next we present the performance of the total system. Table 4 shows the result for 660 utterances of the queries by 18 speakers where the LVCSR module and the text retrieval module in the prototype system are used. The retrieval success rates for utterances are almost the same as those for transcription. Since the cellular phones used in this system can display about 10 lines on the average, the 10th retrieval rate represents the rate of successfully delivering the passage requested by the user. The result shows that the system designed for cellular phone user's manual was able to direct user to appropriate information at 81.4%, which is sufficient for practical use.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 405, |
|
"end": 412, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 728, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In this paper, we presented a voice query retrieval system in Japanese applied to document search on user's manual for cellular phones with Web access capability. The system recognizes user's naturally spoken queries over the cellular phone by LVCSR and retrieves the relevant passages by text retrieval and then provides the output on the cellular phone screen. In order to improve the performance for spoken short queries, we apply three techniques into text retrieval: 1) utilizing word pairs with dependency relations, 2) distinguishing affirmative and negative expressions, and 3) converging synonyms. With respect to LVCSR for speech over the cellular phone, we adopt acoustic and language models derived from a query corpus for the target user's manual. The evaluation on the system designed for cellular phone user's manual shows that the system is able to direct users to appropriate data at 81.4% of the time, if the matching passage exists in the manual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "Our next step is to apply this system to different contents such as travelers' guide and customer surveys. We plan to clarify the problems for different contents and to enhance the portability of this system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6." |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Experiments in Spoken Queries for Document Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Barnett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Broglio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Hudson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Kuo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of Eurospeech'97", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1323--1326", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barnett, J., S. Anderson, J. Broglio, M. Singh, R. Hudson, and S. W. Kuo. 1997. Experiments in Spoken Queries for Document Retrieval. Proceedings of Eurospeech'97, pp.1323-1326.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Word recognition errors and relevance feedback in spoken query processing", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Crestani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the Fourth International Conference on Flexible Query Answering Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "267--281", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Crestani, F. 2000. Word recognition errors and relevance feedback in spoken query processing. Proceedings of the Fourth International Conference on Flexible Query Answering Systems, pp.267-281.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Speechactivated Text Retrieval System for Multimodal Cellular Phones", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ishikawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ikeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Miki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Adachi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Isotani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Iso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ishikawa, S., T. Ikeda, K. Miki, F. Adachi, R. Isotani, K. Iso, and A. Okumura. 2004. Speech- activated Text Retrieval System for Multimodal Cellular Phones. Proceedings of ICASSP 2004.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Spoken Dialogue System for Queries on Appliance Manuals using Hierarchical Confirmation Strategy", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Komatani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of Eurospeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1701--1704", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kawahara, T., R. Ito, and K. Komatani. 2003. Spoken Dialogue System for Queries on Appliance Manuals using Hierarchical Confirmation Strategy. Proceedings of Eurospeech 2003, pp.1701-1704.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Okapi at TREC-3. Proceedings of the 3rd Text Retrieval Conference (TREC-3)", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Robertson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hancock-Beaulieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gatford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robertson, S. E., S. Walker, S. Jones, M. Hancock-Beaulieu, and M. Gatford. 1995. Okapi at TREC-3. Proceedings of the 3rd Text Retrieval Conference (TREC-3), pp.109-126.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Design and Development of Japanese Processing Middleware for Customer Relationship Management", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Satoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ikeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Nakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Osada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 9th Annual Meeting of The Association for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satoh, K., T. Ikeda, T. Nakata, and S. Osada. 2003. Design and Development of Japanese Processing Middleware for Customer Relationship Management. Proceedings of the 9th Annual Meeting of The Association for Natural Language Processing, pp.109-112 (in Japanese).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Human-Voice Interface", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yoshida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Hagane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Hatazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Iso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Hattori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "NEC Research & Development", |
|
"volume": "43", |
|
"issue": "1", |
|
"pages": "33--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshida, K., H. Hagane, K. Hatazaki, K. Iso, and H. Hattori. 2002. Human-Voice Interface. NEC Research & Development, 43(1), pp.33-36.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Conversational Interfaces: Advances and Challenges. Proceedings of Eurospeech '97", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Zue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zue, V. 1997. Conversational Interfaces: Advances and Challenges. Proceedings of Eurospeech '97, KN9-18.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "The configuration of the prototype system.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "The screen of the cellular phone displaying the search result.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "The main page of our system.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "The result page displaying the title list of top ten results for the query.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"text": "The body of the passage displayed when the user selects the title in", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"text": "Figure 4", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF6": { |
|
"text": ".", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"content": "<table><tr><td>Standard Expression</td><td colspan=\"2\">Synonymous Expressions</td></tr><tr><td>saito</td><td>webu</td><td>h\u00f4mup\u00eaji</td></tr><tr><td>(site)</td><td>(web)</td><td>(homepage)</td></tr><tr><td>chakushin'on</td><td>chakushinmerod\u00ee</td><td>yobidashion</td></tr><tr><td>(ringtone)</td><td>(ring melody)</td><td>(phone beep)</td></tr><tr><td>ridaiaru</td><td colspan=\"2\">m\u00f4ichido \uf0ae kakeru</td></tr><tr><td>(redial)</td><td colspan=\"2\">(again \uf0ae call)</td></tr></table>", |
|
"text": "An example of a synonym dictionary", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"content": "<table><tr><td>Shashin-o m\u00earu-de okuritai</td></tr><tr><td>(I want to send a picture via email)</td></tr><tr><td>Aikon-o desukutoppu-ni t\u00f4roku shitai</td></tr><tr><td>(I want to register a new icon on the desktop)</td></tr><tr><td>Jushin-shita m\u00earu-o minagara henshin m\u00earu-o sakusei-suru h\u00f4h\u00f4</td></tr><tr><td>(How to write a reply mail while looking at the incoming mail)</td></tr></table>", |
|
"text": "Examples of queries used for evaluation.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table><tr><td>Number of</td><td colspan=\"4\">Retrieval Success Rate for Transcriptions</td></tr><tr><td>Result</td><td/><td/><td/><td/></tr><tr><td>Passages</td><td>BL</td><td>WP</td><td>WP+AUX</td><td>ALL</td></tr><tr><td>1</td><td>40.0%</td><td>42.7%</td><td>44.5%</td><td>49.1%</td></tr><tr><td>5</td><td>65.5%</td><td>69.1%</td><td>70.0%</td><td>77.3%</td></tr><tr><td>10</td><td>73.6%</td><td>73.6%</td><td>74.5%</td><td>87.3%</td></tr></table>", |
|
"text": "The retrieval success rate for the transcriptions of queries.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"content": "<table><tr><td>Number of</td><td>Retrieval Success Rate</td></tr><tr><td>Result Passages</td><td>for Utterances</td></tr><tr><td>1</td><td>44.3%</td></tr><tr><td>5</td><td>72.5%</td></tr><tr><td>10</td><td>81.4%</td></tr></table>", |
|
"text": "The retrieval success rate for the utterances of queries.", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |