{ "paper_id": "N09-1005", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T14:42:08.373364Z" }, "title": "Learning Phoneme Mappings for Transliteration without Parallel Data", "authors": [ { "first": "Sujith", "middle": [], "last": "Ravi", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Southern California Information Sciences Institute Marina del Rey", "location": { "postCode": "90292", "region": "California" } }, "email": "sravi@isi.edu" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Southern California Information Sciences Institute Marina del Rey", "location": { "postCode": "90292", "region": "California" } }, "email": "knight@isi.edu" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "We present a method for performing machine transliteration without any parallel resources. We frame the transliteration task as a decipherment problem and show that it is possible to learn cross-language phoneme mapping tables using only monolingual resources. We compare various methods and evaluate their accuracies on a standard name transliteration task.", "pdf_parse": { "paper_id": "N09-1005", "_pdf_hash": "", "abstract": [ { "text": "We present a method for performing machine transliteration without any parallel resources. We frame the transliteration task as a decipherment problem and show that it is possible to learn cross-language phoneme mapping tables using only monolingual resources. We compare various methods and evaluate their accuracies on a standard name transliteration task.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Transliteration refers to the transport of names and terms between languages with different writing systems and phoneme inventories. Recently there has been a large amount of interesting work in this area, and the literature has outgrown being citable in its entirety. Much of this work focuses on backtransliteration, which tries to restore a name or term that has been transported into a foreign language. Here, there is often only one correct target spelling-for example, given jyon.kairu (the name of a U.S. Senator transported to Japanese), we must output \"Jon Kyl\", not \"John Kyre\" or any other variation.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "There are many techniques for transliteration and back-transliteration, and they vary along a number of dimensions:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 phoneme substitution vs. character substitution", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 heuristic vs. generative vs. discriminative models", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 manual vs. automatic knowledge acquisition", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We explore the third dimension, where we see several techniques in use:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Manually-constructed transliteration models, e.g., (Hermjakob et al., 2008) .", "cite_spans": [ { "start": 53, "end": 77, "text": "(Hermjakob et al., 2008)", "ref_id": "BIBREF39" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Models constructed from bilingual dictionaries of terms and names, e.g., (Knight and Graehl, 1998; Huang et al., 2004; Haizhou et al., 2004; Zelenko and Aone, 2006; Yoon et al., 2007; Li et al., 2007; Karimi et al., 2007; Sherif and Kondrak, 2007b; Goldwasser and Roth, 2008b) .", "cite_spans": [ { "start": 75, "end": 100, "text": "(Knight and Graehl, 1998;", "ref_id": "BIBREF43" }, { "start": 101, "end": 120, "text": "Huang et al., 2004;", "ref_id": "BIBREF40" }, { "start": 121, "end": 142, "text": "Haizhou et al., 2004;", "ref_id": "BIBREF38" }, { "start": 143, "end": 166, "text": "Zelenko and Aone, 2006;", "ref_id": "BIBREF59" }, { "start": 167, "end": 185, "text": "Yoon et al., 2007;", "ref_id": "BIBREF58" }, { "start": 186, "end": 202, "text": "Li et al., 2007;", "ref_id": "BIBREF47" }, { "start": 203, "end": 223, "text": "Karimi et al., 2007;", "ref_id": "BIBREF41" }, { "start": 224, "end": 250, "text": "Sherif and Kondrak, 2007b;", "ref_id": "BIBREF53" }, { "start": 251, "end": 278, "text": "Goldwasser and Roth, 2008b)", "ref_id": "BIBREF35" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Extraction of parallel examples from bilingual corpora, using bootstrap dictionaries e.g., (Sherif and Kondrak, 2007a; Goldwasser and Roth, 2008a) .", "cite_spans": [ { "start": 93, "end": 120, "text": "(Sherif and Kondrak, 2007a;", "ref_id": "BIBREF52" }, { "start": 121, "end": 148, "text": "Goldwasser and Roth, 2008a)", "ref_id": "BIBREF34" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Extraction of parallel examples from comparable corpora, using bootstrap dictionaries, and temporal and word co-occurrence, e.g., Klementiev and Roth, 2008) .", "cite_spans": [ { "start": 132, "end": 158, "text": "Klementiev and Roth, 2008)", "ref_id": "BIBREF42" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Extraction of parallel examples from web queries, using bootstrap dictionaries, e.g., (Nagata et al., 2001; Oh and Isahara, 2006; Kuo et al., 2006; Wu and Chang, 2007) .", "cite_spans": [ { "start": 88, "end": 109, "text": "(Nagata et al., 2001;", "ref_id": "BIBREF48" }, { "start": 110, "end": 131, "text": "Oh and Isahara, 2006;", "ref_id": "BIBREF49" }, { "start": 132, "end": 149, "text": "Kuo et al., 2006;", "ref_id": "BIBREF46" }, { "start": 150, "end": 169, "text": "Wu and Chang, 2007)", "ref_id": "BIBREF56" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Comparing terms from different languages in phonetic space, e.g., Goldberg and Elhadad, 2008) .", "cite_spans": [ { "start": 68, "end": 95, "text": "Goldberg and Elhadad, 2008)", "ref_id": "BIBREF33" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In this paper, we investigate methods to acquire transliteration mappings from non-parallel sources. We are inspired by previous work in unsupervised learning for natural language, e.g. (Yarowsky, 1995; WFSA -A", "cite_spans": [ { "start": 186, "end": 202, "text": "(Yarowsky, 1995;", "ref_id": "BIBREF57" }, { "start": 203, "end": 203, "text": "", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "English word sequence English sound sequence ( SPENCER ABRAHAM ) ( S P EH N S ER EY B R AH HH AE M )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "WFST -B", "sec_num": null }, { "text": "Japanese sound sequence", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "WFST -C WFST -D", "sec_num": null }, { "text": "( \u30b9 \u30da \u30f3 \u30b5 \u30fc \u30fb \u30a8 \u30fc \u30d6 \u30e9 \u30cf \u30e0 )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "WFST -C WFST -D", "sec_num": null }, { "text": "Japanese katakana sequence ( S U ", "cite_spans": [ { "start": 27, "end": 32, "text": "( S U", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "WFST -C WFST -D", "sec_num": null }, { "text": "Figure 1: Model used for back-transliteration of Japanese katakana names and terms into English. The model employs a four-stage cascade of weighted finite-state transducers (Knight and Graehl, 1998) . Goldwater and Griffiths, 2007) , and we are also inspired by cryptanalysis-we view a corpus of foreign terms as a code for English, and we attempt to break the code.", "cite_spans": [ { "start": 173, "end": 198, "text": "(Knight and Graehl, 1998)", "ref_id": "BIBREF43" }, { "start": 201, "end": 231, "text": "Goldwater and Griffiths, 2007)", "ref_id": "BIBREF36" } ], "ref_spans": [], "eq_spans": [], "section": "P E N S A A E E B U R A H A M U )", "sec_num": null }, { "text": "We follow (Knight and Graehl, 1998) in tackling back-transliteration of Japanese katakana expressions into English. Knight and Graehl (1998) developed a four-stage cascade of finite-state transducers, shown in Figure 1 .", "cite_spans": [ { "start": 10, "end": 35, "text": "(Knight and Graehl, 1998)", "ref_id": "BIBREF43" }, { "start": 116, "end": 140, "text": "Knight and Graehl (1998)", "ref_id": "BIBREF43" } ], "ref_spans": [ { "start": 210, "end": 218, "text": "Figure 1", "ref_id": null } ], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "\u2022 WFSA A -produces an English word sequence w with probability P(w) (based on a unigram word model).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "\u2022 WFST B -generates an English phoneme sequence e corresponding to w with probability P(e|w).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "\u2022 WFST C -transforms the English phoneme sequence into a Japanese phoneme sequence j according to a model P(j|e).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "\u2022 WFST D -writes out the Japanese phoneme sequence into Japanese katakana characters according to a model P(k|j).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "Using the cascade in the reverse (noisy-channel) direction, they are able to translate new katakana names and terms into English. They report 36% error in translating 100 U.S. Senators' names, and they report exceeding human transliteration performance in the presence of optical scanning noise.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "The only transducer that requires parallel training data is WFST C. Knight and Graehl (1998) take several thousand phoneme string pairs, automatically align them with the EM algorithm (Dempster et al., 1977) , and construct WFST C from the aligned phoneme pieces.", "cite_spans": [ { "start": 68, "end": 92, "text": "Knight and Graehl (1998)", "ref_id": "BIBREF43" }, { "start": 184, "end": 207, "text": "(Dempster et al., 1977)", "ref_id": "BIBREF32" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "We re-implement their basic method by instantiating a densely-connected version of WFST C with all 1-to-1 and 1-to-2 phoneme connections between English and Japanese. Phoneme bigrams that occur fewer than 10 times in a Japanese corpus are omitted, and we omit 1-to-3 connections. This initial WFST C model has 15320 uniformly weighted parameters. We then train the model on 3343 phoneme string pairs from a bilingual dictionary, using the EM algorithm. EM immediately reduces the connections in the model to those actually observed in the parallel data, and after 14 iterations, there are only 188 connections left with P(j|e) \u2265 0.01. We use this trained WFST C model and apply it to the U.S. Senator name transliteration task (which we update to the 2008 roster). We obtain 40% error, roughly matching the performance observed in (Knight and Graehl, 1998) .", "cite_spans": [ { "start": 831, "end": 856, "text": "(Knight and Graehl, 1998)", "ref_id": "BIBREF43" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "2" }, { "text": "The task of this paper is to learn the mappings in Figure 2 , but without parallel data, and to test those mappings in end-to-end transliteration. We imagine our problem as one faced by monolingual English speaker wandering around Japan, reading a multitude of katakana signs, listening to people speak Japanese, and eventually deciphering those signs into English. To mis-quote Warren Weaver: \"When I look at a corpus of Japanese katakana, I say to myself, this is really written in English, but it has been coded in some strange symbols. I will now proceed to decode.\" Our larger motivation is to move toward easily-built transliteration systems for all language pairs, regardless of parallel resources. While Japanese/English transliteration has its own particular features, we believe it is a reasonable starting point. e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) ", "cite_spans": [], "ref_spans": [ { "start": 51, "end": 59, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "A A CH I D O CH E N J I N E B A D A W A K O B I A A A K U P U R A Z A CH E S : W A N K A PP U A A N D O : O P U T I K U S U W A N T E N P O A A T I S U T O D E K O R A T I B U : W A S E R I N A A T O S E R I N A P I S U T O N D E T O M O R U T O P I I T A A Y U N I O N A I A N B I R U E P I G U R A M U P I KK A A Y U N I TT O SH I S U T E M U A I D I I D O E R A N D O P I N G U U Y U U A I K E N B E R I I : P I P E R A J I N A M I D O : A J I A K A PP U J Y A I A N TS U P I S A : A J I T O J Y A Z U P I U R A Z E N E R A R U E A K O N A K A SH I A K O O S U : P O I N T O Z E R O A K U A M Y U U Z E U M U : Z O N B I I Z U :", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": ": : : : : : :", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "Figure 3: Some Japanese phoneme sequences generated from the monolingual katakana corpus using WFST D.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "Our monolingual resources are:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "\u2022 43717 unique Japanese katakana sequences collected from web newspaper data. We split multi-word katakana phrases on the center-dot (\"\u2022\") character, and select a final corpus of 9350 unique sequences. We add monolingual Japanese versions of the 2008 U.S. Senate roster. 1", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "\u2022 The CMU pronunciation dictionary of English, 1 We use \"open\" EM testing, in which unlabeled test data is allowed to be part of unsupervised training. However, no parallel data is allowed.", "cite_spans": [ { "start": 47, "end": 48, "text": "1", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "with 112,151 entries.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "\u2022 The English gigaword corpus. Knight and Graehl (1998) already use frequently-occurring capitalized words to build the WFSA A component of their four-stage cascade.", "cite_spans": [ { "start": 31, "end": 55, "text": "Knight and Graehl (1998)", "ref_id": "BIBREF43" } ], "ref_spans": [], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "We seek to use our English knowledge (derived from 2 and 3) to decipher the Japanese katakana corpus (1) into English. Figure 3 shows a portion of the Japanese corpus, which we transform into Japanese phoneme sequences using the monolingual resource of WFST D. We note that the Japanese phoneme inventory contains 39 unique (\"ciphertext\") symbols, compared to the 40 English (\"plaintext\") phonemes.", "cite_spans": [], "ref_spans": [ { "start": 119, "end": 127, "text": "Figure 3", "ref_id": null } ], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "Our goal is to compare and evaluate the WFST C model learnt under two different scenarios-(a) using parallel data, and (b) using monolingual data. For each experiment, we train only the WFST C model and then apply it to the name transliteration task-decoding 100 U.S. Senator names from Japanese to English using the automata shown in Figure 1 . For all experiments, we keep the rest of the models in the cascade (WFSA A, WFST B, and WFST D) unchanged. We evaluate on whole-name error-rate (maximum of 100/100) as well as normalized word edit distance, which gives partial credit for getting the first or last name correct.", "cite_spans": [], "ref_spans": [ { "start": 335, "end": 343, "text": "Figure 1", "ref_id": null } ], "eq_spans": [], "section": "Task and Data", "sec_num": "3" }, { "text": "Non-Parallel Data", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "Our main data consists of 9350 unique Japanese phoneme sequences, which we can consider as a single long sequence j. As suggested by Knight et al (2006) , we explain the existence of j as the result of someone initially producing a long English phoneme sequence e, according to P(e), then transforming it into j, according to P(j|e). The probability of our observed data P(j) can be written as:", "cite_spans": [ { "start": 133, "end": 152, "text": "Knight et al (2006)", "ref_id": "BIBREF45" } ], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "P (j) =", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "e P (e) \u2022 P (j|e)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "We take P(e) to be some fixed model of monolingual English phoneme production, represented as a weighted finite-state acceptor (WFSA). P(j|e) is implemented as the initial, uniformly-weighted WFST C described in Section 2, with 15320 phonemic connections. We next maximize P(j) by manipulating the substitution table P(j|e), aiming to produce a result such as shown in Figure 2 . We accomplish this by composing the English phoneme model P(e) WFSA with the P(j|e) transducer. We then use the EM algorithm to train just the P(j|e) parameters (inside the composition that predicts j), and guess the values for the individual phonemic substitutions that maximize the likelihood of the observed data P(j). 2 We allow EM to run until the P(j) likelihood ratio between subsequent training iterations reaches 0.9999, and we terminate early if 200 iterations are reached.", "cite_spans": [ { "start": 702, "end": 703, "text": "2", "ref_id": null } ], "ref_spans": [ { "start": 369, "end": 377, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "Finally, we decode our test set of U.S. Senator names. Following Knight et al (2006) , we stretch out the P(j|e) model probabilities after decipherment training and prior to decoding our test set, by cubing their values.", "cite_spans": [ { "start": 65, "end": 84, "text": "Knight et al (2006)", "ref_id": "BIBREF45" } ], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "Decipherment under the conditions of transliteration is substantially more difficult than solving letter-substitution ciphers (Knight et al., 2006; Ravi and Knight, 2008; Ravi and Knight, 2009) or phoneme-substitution ciphers (Knight and Yamada, 1999) . This is because the target table contains significant non-determinism, and because each symbol has multiple possible fertilities, which introduces uncertainty about the length of the target string.", "cite_spans": [ { "start": 126, "end": 147, "text": "(Knight et al., 2006;", "ref_id": "BIBREF45" }, { "start": 148, "end": 170, "text": "Ravi and Knight, 2008;", "ref_id": "BIBREF50" }, { "start": 171, "end": 193, "text": "Ravi and Knight, 2009)", "ref_id": "BIBREF51" }, { "start": 226, "end": 251, "text": "(Knight and Yamada, 1999)", "ref_id": "BIBREF44" } ], "ref_spans": [], "eq_spans": [], "section": "Acquiring Phoneme Mappings from", "sec_num": "4" }, { "text": "Clearly, we can design P(e) in a number of ways. We might expect that the more the system knows about English, the better it will be able to decipher the Japanese. Our baseline P(e) is a 2-gram phoneme model trained on phoneme sequences from the CMU dictionary. The second row (2a) in Figure 4 shows results when we decipher with this fixed P(e). This approach performs poorly and gets all the Senator names wrong.", "cite_spans": [], "ref_spans": [ { "start": 285, "end": 293, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Baseline P(e) Model", "sec_num": "4.1" }, { "text": "When training under non-parallel conditions, we find that we would like to keep our WFST C model small, rather than instantiating a fully-connected model. In the supervised case, parallel training allows the trained model to retain only those connections which were observed from the data, and this helps eliminate many bad connections from the model. In the unsupervised case, there is no parallel data available to help us make the right choices.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Consonant Parity", "sec_num": "4.2" }, { "text": "We therefore use prior knowledge and place a consonant-parity constraint on the WFST C model. Prior to EM training, we throw out any mapping from the P(j|e) substitution model that does not have the same number of English and Japanese consonant phonemes. This is a pattern that we observe across a range of transliteration tasks. Here are ex-", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Consonant Parity", "sec_num": "4.2" }, { "text": "Name Transliteration Error whole-name error norm. edit distance 1 e \u2192 j = { 1-to-1, 1-to-2 } 40 25.9 + EM aligned with parallel data 2a e \u2192 j = { 1-to-1, 1-to-2 } 100 100.0 + decipherment training with 2-gram English P(e) 2b e \u2192 j = { 1-to-1, 1-to-2 } 98 89.8 + decipherment training with 2-gram English P(e) + consonant-parity 2c e \u2192 j = { 1-to-1, 1-to-2 } 94 73.6 + decipherment training with 3-gram English P(e) + consonant-parity 2d e \u2192 j = { 1-to-1, 1-to-2 } 77 57.2 + decipherment training with a word-based English model + consonant-parity 2e e \u2192 j = { 1-to-1, 1-to-2 } 73 54.2 + decipherment training with a word-based English model + consonant-parity + initialize mappings having consonant matches with higher probability weights Figure 4 : Results on name transliteration obtained when using the phonemic substitution model trained under different scenarios-(1) parallel training data, (2a-e) using only monolingual resources.", "cite_spans": [], "ref_spans": [ { "start": 739, "end": 747, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Phonemic Substitution Model", "sec_num": null }, { "text": "amples of mappings where consonant parity is violated:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Phonemic Substitution Model", "sec_num": null }, { "text": "K => a N => e e EH => s a EY => n", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Phonemic Substitution Model", "sec_num": null }, { "text": "Modifying the WFST C in this way leads to better decipherment tables and slightly better results for the U.S. Senator task. Normalized edit distance drops from 100 to just under 90 (row 2b in Figure 4 ).", "cite_spans": [], "ref_spans": [ { "start": 192, "end": 200, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Phonemic Substitution Model", "sec_num": null }, { "text": "Row 2c in Figure 4 shows decipherment results when we move to a 3-gram English phoneme model for P(e). We notice considerable improvements in accuracy. On the U.S. Senator task, normalized edit distance drops from 89.8 to 73.6, and whole-name error decreases from 98 to 94. When we analyze the results from deciphering with a 3-gram P(e) model, we find that many of the Japanese phoneme test sequences are decoded into English phoneme sequences (such as \"IH K R IH N\" and \"AE G M AH N\") that are not valid words. This happens because the models we used for decipherment so far have no knowledge of what constitutes a globally valid English sequence. To help the phonemic substitution model learn this information automatically, we build a word-based P(e) from English phoneme sequences in the CMU dictionary and use this model for decipherment train-ing. The word-based model produces complete English phoneme sequences corresponding to 76,152 actual English words from the CMU dictionary. The English phoneme sequences are represented as paths through a WFSA, and all paths are weighted equally. We represent the word-based model in compact form, using determinization and minimization techniques applicable to weighted finite-state automata. This allows us to perform efficient EM training on the cascade of P(e) and P(j|e) models. Under this scheme, English phoneme sequences resulting from decipherment are always analyzable into actual words.", "cite_spans": [], "ref_spans": [ { "start": 10, "end": 18, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Better English Models", "sec_num": "4.3" }, { "text": "Row 2d in Figure 4 shows the results we obtain when training our WFST C with a word-based English phoneme model. Using the word-based model produces the best result so far on the phonemic substitution task with non-parallel data. On the U.S. Senator task, word-based decipherment outperforms the other methods by a large margin. It gets 23 out of 100 Senator names exactly right, with a much lower normalized edit distance (57.2). We have managed to achieve this performance using only monolingual data. This also puts us within reach of the parallel-trained system's performance (40% whole-name errors, and 25.9 word edit distance error) without using a single English/Japanese pair for training.", "cite_spans": [], "ref_spans": [ { "start": 10, "end": 18, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Better English Models", "sec_num": "4.3" }, { "text": "To summarize, the quality of the English phoneme e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) e j P(j|e) model used in decipherment training has a large effect on the learnt P(j|e) phonemic substitution table (i.e., probabilities for the various phoneme mappings within the WFST C model), which in turn affects the quality of the back-transliterated English output produced when decoding Japanese. Figure 5 shows the phonemic substitution table learnt using word-based decipherment. The mappings are reasonable, given the lack of parallel data. They are not entirely correct-for example, the mapping \"S \u2192 s u\" is there, but \"S \u2192 s\" is missing.", "cite_spans": [], "ref_spans": [ { "start": 430, "end": 438, "text": "Figure 5", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Better English Models", "sec_num": "4.3" }, { "text": "Sample end-to-end transliterations are illustrated in Figure 6 . The figure shows how the transliteration results from non-parallel training improve steadily as we use stronger decipherment techniques. We note that in one case (LAUTENBERG), the decipherment mapping table leads to a correct answer where the mapping table derived from parallel data does not. Because parallel data is limited, it may not contain all of the necessary mappings.", "cite_spans": [], "ref_spans": [ { "start": 54, "end": 62, "text": "Figure 6", "ref_id": null } ], "eq_spans": [], "section": "Better English Models", "sec_num": "4.3" }, { "text": "Monolingual corpora are more easily available than parallel corpora, so we can use increasing amounts of monolingual Japanese training data during decipherment training. The table below shows that using more Japanese training data produces better transliteration results when deciphering with the word-based English model.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Size of Japanese Training Data", "sec_num": "4.4" }, { "text": "Error on name transliteration task (# of phoneme sequences) whole-name error normalized word edit distance 4,674 87 69.7 9,350 77 57.2 Figure 6 : Results for end-to-end name transliteration. This figure shows the correct answer, the answer obtained by training mappings on parallel data (Knight and Graehl, 1998) , and various answers obtained by deciphering nonparallel data. Method 1 uses a 2-gram P(e), Method 2 uses a 3-gram P(e), and Method 3 uses a word-based P(e).", "cite_spans": [ { "start": 287, "end": 312, "text": "(Knight and Graehl, 1998)", "ref_id": "BIBREF43" } ], "ref_spans": [ { "start": 135, "end": 143, "text": "Figure 6", "ref_id": null } ], "eq_spans": [], "section": "Japanese training data", "sec_num": null }, { "text": "So far, the P(j|e) connections within the WFST C model were initialized with uniform weights prior to EM training. It is a known fact that the EM algorithm does not necessarily find a global minimum for the given objective function. If the search space is bumpy and non-convex as is the case in our problem, EM can get stuck in any of the local minima depending on what weights were used to initialize the search. Different sets of initialization weights can lead to different convergence points during EM training, or in other words, depending on how the P(j|e) probabilities are initialized, the final P(j|e) substitution table learnt by EM can vary. We can use some prior knowledge to initialize the probability weights in our WFST C model, so as to give EM a good starting point to work with. Instead of using uniform weights, in the P(j|e) model we set higher weights for the mappings where English and Japanese sounds share common consonant phonemes.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "For example, mappings such as:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "N => n N => a n D => d D => d o", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "are weighted X (a constant) times higher than other mappings such as:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "N => b N => r D => B", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "EY => a a in the P(j|e) model. In our experiments, we set the value X to 100.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "Initializing the WFST C in this way results in EM learning better substitution tables and yields slightly better results for the Senator task. Normalized edit distance drops from 57.2 to 54.2, and the wholename error is also reduced from 77% to 73% (row 2e in Figure 4 ).", "cite_spans": [], "ref_spans": [ { "start": 260, "end": 268, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "P(j|e) Initialization", "sec_num": "4.5" }, { "text": "We saw earlier (in Section 4.4) that using more monolingual Japanese training data yields improvements in decipherment results. Similarly, we hypothesize that using more monolingual English data can drive the decipherment towards better transliteration results. On the English side, we build different word-based P(e) models, each trained on different amounts of data (English phoneme sequences from the CMU dictionary). The table below shows that deciphering with a word-based English model built from more data produces better transliteration results. This yields the best transliteration results on the Senator task with non-parallel data, getting 34 out of 100 Senator names exactly right.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Size of English Training Data", "sec_num": "4.6" }, { "text": "It is possible to improve our results on the U.S. Senator task further using external monolingual resources. Web counts are frequently used to automatically re-rank candidate lists for various NLP tasks (Al-Onaizan and Knight, 2002) . We extract the top 10 English candidates produced by our wordbased decipherment method for each Japanese test name. Using a search engine, we query the entire English name (first and last name) corresponding to each candidate, and collect search result counts. We then re-rank the candidates using the collected Web counts and pick the most frequent candidate as our choice.", "cite_spans": [ { "start": 219, "end": 232, "text": "Knight, 2002)", "ref_id": "BIBREF31" } ], "ref_spans": [], "eq_spans": [], "section": "Re-ranking Results Using the Web", "sec_num": "4.7" }, { "text": "For example, France Murkowski gets only 1 hit on Google, whereas Frank Murkowski gets 135,000 hits. Re-ranking the results in this manner lowers the whole-name error on the Senator task from 66% to 61%, and also lowers the normalized edit distance from 49.3 to 48.8. However, we do note that re-ranking using Web counts produces similar improvements in the case of parallel training as well and lowers the whole-name error from 40% to 24%.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Re-ranking Results Using the Web", "sec_num": "4.7" }, { "text": "So, the re-ranking idea, which is simple and requires only monolingual resources, seems like a nice strategy to apply at the end of transliteration experiments (during decoding), and can result in further gains on the final transliteration performance.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Re-ranking Results Using the Web", "sec_num": "4.7" }, { "text": "We also present decipherment results when using comparable corpora for training the WFST C model. We use English and Japanese phoneme sequences derived from a parallel corpus containing 2,683 phoneme sequence pairs to construct comparable corpora (such that for each Japanese phoneme se-quence, the correct back-transliterated phoneme sequence is present somewhere in the English data) and apply the same decipherment strategy using a word-based English model. The table below compares the transliteration results for the U.S. Senator task, when using comparable versus non-parallel data for decipherment training. While training on comparable corpora does have benefits and reduces the whole-name error to 59% on the Senator task, it is encouraging to see that our best decipherment results using only non-parallel data comes close (66% error).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Comparable versus Non-Parallel Corpora", "sec_num": "5" }, { "text": "English/Japanese Corpora Error on name transliteration task (# of phoneme sequences) whole-name error normalized word edit distance Comparable Corpora 59 41.8 (English = 2,608 Japanese = 2,455) Non-Parallel Corpora 66 49.3 (English = 98,000 Japanese = 9,350)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Comparable versus Non-Parallel Corpora", "sec_num": "5" }, { "text": "We have presented a method for attacking machine transliteration problems without parallel data. We developed phonemic substitution tables trained using only monolingual resources and demonstrated their performance in an end-to-end name transliteration task. We showed that consistent improvements in transliteration performance are possible with the use of strong decipherment techniques, and our best system achieves significant improvements over the baseline system. In future work, we would like to develop more powerful decipherment models and techniques, and we would like to harness the information available from a wide variety of monolingual resources, and use it to further narrow the gap between parallel-trained and non-parallel-trained approaches.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "6" }, { "text": "In our experiments, we use the Carmel finite-state transducer package(Graehl, 1997), a toolkit with an algorithm for EM training of weighted finite-state transducers.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "This research was supported by the Defense Advanced Research Projects Agency under SRI International's prime Contract Number NBCHD040058.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgements", "sec_num": "7" } ], "bib_entries": { "BIBREF1": { "ref_id": "b1", "title": "/0*\" 1&\"&''*' 12)%*,#+ 3\"&#%#%$", "authors": [ { "first": "!\"#$#", "middle": [], "last": "%&' (", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "45678--45687", "other_ids": {}, "num": null, "urls": [], "raw_text": "!\"#$#%&' ()\"\"*+,-.%/0*\" 1&\"&''*' 12)%*,#+ 3\"&#%#%$ 45678-9::;", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "*@6?A.B) D:*>)CA88A.B) C=2@)C6", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "*@6?A.B) D:*>)CA88A.B) C=2@)C6.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "<=>?& @3A# R!FG1K-JL=GH", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "<=>?& @3A# R!FG1K-JL=GH-", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "C=?) D:!:.)CA76.)", "authors": [ { "first": "R!fg1k-Jl=gh @", "middle": [], "last": "=<", "suffix": "" } ], "year": null, "venue": "", "volume": "6", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "R!FG1K-JL=GH @=<;6)8:C=?) D:!:.)CA76.)", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "D=@.)!F6AFF6? RGSS C6.D:9A.)", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "R!FG1K-J <2?& B#C5# RGSS-JLH5.A.H-RGSS-JLH5.A.H D=@.)!F6AFF6? RGSS C6.D:9A.)", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "F8A.2) F?:.*6)", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "<.)F8A.2) F?:.*6)", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "/0*\" 1&\"&''*' 12)%*,#+ 3\"&#%#%$", "authors": [ { "first": "!\"#$#", "middle": [], "last": "%&' (", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "45678--45687", "other_ids": {}, "num": null, "urls": [], "raw_text": "!\"#$#%&' ()\"\"*+,-.%/0*\" 1&\"&''*' 12)%*,#+ 3\"&#%#%$ 45678-9::;", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "6)*<=9=) F1GH .JI", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "!:*6)*<=9=) F1GH .JI. ,-'.& /00", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "HLGM :*:)>: >=?6:)6:;2)", "authors": [ { "first": "=", "middle": [], "last": "Hlgm-", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "=.HLGM-.7.7.- =.HLGM :*:)>: >=?6:)6:;2)", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "*@6?A.B) D:*>)CA88A.B) C=2@ <=>?& @3A# R!FG1K-JL=GH", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "*@6?A.B) D:*>)CA88A.B) C=2@ <=>?& @3A# R!FG1K-JL=GH-", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "C=?) D:!:.)CA76.)", "authors": [ { "first": "R!fg1k-Jl=gh @", "middle": [], "last": "=<", "suffix": "" } ], "year": null, "venue": "", "volume": "6", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "R!FG1K-JL=GH @=<;6)8:C=?) D:!:.)CA76.)", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "D=@.)!F6AFF6? RGSS C6.D:9A.) RGSS", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "R!FG <2?& B#C5# RGSS-JLH5.A.H-RGSS-JLH5.A.H D=@.)!F6AFF6? RGSS C6.D:9A.) RGSS", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "F8A.2) F?:.*6)", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "<.)F8A.2) F?:.*6)", "links": null }, "BIBREF31": { "ref_id": "b31", "title": "Translating named entities using monolingual and bilingual resources", "authors": [ { "first": "K", "middle": [], "last": "Al-Onaizan", "suffix": "" }, { "first": "", "middle": [], "last": "Knight", "suffix": "" } ], "year": 2002, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Al-Onaizan and K. Knight. 2002. Translating named entities using monolingual and bilingual resources. In Proc. of ACL.", "links": null }, "BIBREF32": { "ref_id": "b32", "title": "Maximum likelihood from incomplete data via the EM algorithm", "authors": [ { "first": "A", "middle": [ "P" ], "last": "Dempster", "suffix": "" }, { "first": "N", "middle": [ "M" ], "last": "Laird", "suffix": "" }, { "first": "D", "middle": [ "B" ], "last": "Rubin", "suffix": "" } ], "year": 1977, "venue": "Journal of the Royal Statistical Society Series", "volume": "39", "issue": "4", "pages": "1--38", "other_ids": {}, "num": null, "urls": [], "raw_text": "A. P. Dempster, N. M. Laird, and D. B. Rubin. 1977. Maximum likelihood from incomplete data via the EM algorithm. Journal of the Royal Statistical Society Se- ries, 39(4):1-38.", "links": null }, "BIBREF33": { "ref_id": "b33", "title": "Identification of transliterated foreign words in Hebrew script", "authors": [ { "first": "Y", "middle": [], "last": "Goldberg", "suffix": "" }, { "first": "M", "middle": [], "last": "Elhadad", "suffix": "" } ], "year": 2008, "venue": "Proc. of CICLing", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Y. Goldberg and M. Elhadad. 2008. Identification of transliterated foreign words in Hebrew script. In Proc. of CICLing.", "links": null }, "BIBREF34": { "ref_id": "b34", "title": "Active sample selection for named entity transliteration", "authors": [ { "first": "D", "middle": [], "last": "Goldwasser", "suffix": "" }, { "first": "D", "middle": [], "last": "Roth", "suffix": "" } ], "year": 2008, "venue": "Proc. of ACL/HLT Short Papers", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Goldwasser and D. Roth. 2008a. Active sample se- lection for named entity transliteration. In Proc. of ACL/HLT Short Papers.", "links": null }, "BIBREF35": { "ref_id": "b35", "title": "Transliteration as constrained optimization", "authors": [ { "first": "D", "middle": [], "last": "Goldwasser", "suffix": "" }, { "first": "D", "middle": [], "last": "Roth", "suffix": "" } ], "year": 2008, "venue": "Proc. of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Goldwasser and D. Roth. 2008b. Transliteration as constrained optimization. In Proc. of EMNLP.", "links": null }, "BIBREF36": { "ref_id": "b36", "title": "A fully Bayesian approach to unsupervised part-of-speech tagging", "authors": [ { "first": "S", "middle": [], "last": "Goldwater", "suffix": "" }, { "first": "L", "middle": [], "last": "Griffiths", "suffix": "" }, { "first": "T", "middle": [], "last": "", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "S. Goldwater and L. Griffiths, T. 2007. A fully Bayesian approach to unsupervised part-of-speech tagging. In Proc. of ACL.", "links": null }, "BIBREF37": { "ref_id": "b37", "title": "Carmel finite-state toolkit", "authors": [ { "first": "J", "middle": [], "last": "", "suffix": "" } ], "year": 1997, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J. Graehl. 1997. Carmel finite-state toolkit.", "links": null }, "BIBREF38": { "ref_id": "b38", "title": "A joint sourcechannel model for machine transliteration", "authors": [ { "first": "L", "middle": [], "last": "Haizhou", "suffix": "" }, { "first": "Z", "middle": [], "last": "Min", "suffix": "" }, { "first": "S", "middle": [], "last": "Jian", "suffix": "" } ], "year": 2004, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "L. Haizhou, Z. Min, and S. Jian. 2004. A joint source- channel model for machine transliteration. In Proc. of ACL.", "links": null }, "BIBREF39": { "ref_id": "b39", "title": "Name translation in statistical machine translation-learning when to transliterate", "authors": [ { "first": "U", "middle": [], "last": "Hermjakob", "suffix": "" }, { "first": "K", "middle": [], "last": "Knight", "suffix": "" }, { "first": "H", "middle": [], "last": "Daume", "suffix": "" } ], "year": 2008, "venue": "Proc. of ACL/HLT", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "U. Hermjakob, K. Knight, and H. Daume. 2008. Name translation in statistical machine translation-learning when to transliterate. In Proc. of ACL/HLT.", "links": null }, "BIBREF40": { "ref_id": "b40", "title": "Improving named entity translation combining phonetic and semantic similarities", "authors": [ { "first": "S", "middle": [], "last": "Huang", "suffix": "" }, { "first": "A", "middle": [], "last": "Vogel", "suffix": "" }, { "first": "", "middle": [], "last": "Waibel", "suffix": "" } ], "year": 2004, "venue": "Proc. of HLT/NAACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Huang, S. Vogel, and A. Waibel. 2004. Improving named entity translation combining phonetic and se- mantic similarities. In Proc. of HLT/NAACL.", "links": null }, "BIBREF41": { "ref_id": "b41", "title": "Collapsed consonant and vowel models: New approaches for English-Persian transliteration and backtransliteration", "authors": [ { "first": "S", "middle": [], "last": "Karimi", "suffix": "" }, { "first": "F", "middle": [], "last": "Scholer", "suffix": "" }, { "first": "A", "middle": [], "last": "Turpin", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "S. Karimi, F. Scholer, and A. Turpin. 2007. Col- lapsed consonant and vowel models: New ap- proaches for English-Persian transliteration and back- transliteration. In Proc. of ACL.", "links": null }, "BIBREF42": { "ref_id": "b42", "title": "Named entity transliteration and discovery in multilingual corpora", "authors": [ { "first": "A", "middle": [], "last": "Klementiev", "suffix": "" }, { "first": "D", "middle": [], "last": "Roth", "suffix": "" } ], "year": 2008, "venue": "Learning Machine Translation", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "A. Klementiev and D. Roth. 2008. Named entity translit- eration and discovery in multilingual corpora. In Learning Machine Translation. MIT press.", "links": null }, "BIBREF43": { "ref_id": "b43", "title": "Machine transliteration", "authors": [ { "first": "K", "middle": [], "last": "Knight", "suffix": "" }, { "first": "J", "middle": [], "last": "Graehl", "suffix": "" } ], "year": 1998, "venue": "Computational Linguistics", "volume": "24", "issue": "4", "pages": "599--612", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. Knight and J. Graehl. 1998. Machine transliteration. Computational Linguistics, 24(4):599-612.", "links": null }, "BIBREF44": { "ref_id": "b44", "title": "A computational approach to deciphering unknown scripts", "authors": [ { "first": "K", "middle": [], "last": "Knight", "suffix": "" }, { "first": "K", "middle": [], "last": "Yamada", "suffix": "" } ], "year": 1999, "venue": "Proc. of the ACL Workshop on Unsupervised Learning in Natural Language Processing", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. Knight and K. Yamada. 1999. A computational ap- proach to deciphering unknown scripts. In Proc. of the ACL Workshop on Unsupervised Learning in Natural Language Processing.", "links": null }, "BIBREF45": { "ref_id": "b45", "title": "Unsupervised analysis for decipherment problems", "authors": [ { "first": "K", "middle": [], "last": "Knight", "suffix": "" }, { "first": "A", "middle": [], "last": "Nair", "suffix": "" }, { "first": "N", "middle": [], "last": "Rathod", "suffix": "" }, { "first": "K", "middle": [], "last": "Yamada", "suffix": "" } ], "year": 2006, "venue": "Proc. of COLING/ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. Knight, A. Nair, N. Rathod, and K. Yamada. 2006. Unsupervised analysis for decipherment problems. In Proc. of COLING/ACL.", "links": null }, "BIBREF46": { "ref_id": "b46", "title": "Learning transliteration lexicons from the web", "authors": [ { "first": "J", "middle": [], "last": "Kuo", "suffix": "" }, { "first": "H", "middle": [], "last": "Li", "suffix": "" }, { "first": "Y", "middle": [], "last": "Yang", "suffix": "" } ], "year": 2006, "venue": "Proc. of ACL/COLING", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J. Kuo, H. Li, and Y. Yang. 2006. Learning translitera- tion lexicons from the web. In Proc. of ACL/COLING.", "links": null }, "BIBREF47": { "ref_id": "b47", "title": "Semantic transliteration of personal names", "authors": [ { "first": "H", "middle": [], "last": "Li", "suffix": "" }, { "first": "C", "middle": [], "last": "Sim", "suffix": "" }, { "first": "K", "middle": [], "last": "", "suffix": "" }, { "first": "J", "middle": [], "last": "Kuo", "suffix": "" }, { "first": "M", "middle": [], "last": "Dong", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "H. Li, C. Sim, K., J. Kuo, and M. Dong. 2007. Semantic transliteration of personal names. In Proc. of ACL.", "links": null }, "BIBREF48": { "ref_id": "b48", "title": "Using the web as a bilingual dictionary", "authors": [ { "first": "M", "middle": [], "last": "Nagata", "suffix": "" }, { "first": "T", "middle": [], "last": "Saito", "suffix": "" }, { "first": "K", "middle": [], "last": "Suzuki", "suffix": "" } ], "year": 2001, "venue": "Proc. of the ACL Workshop on Data-driven Methods in Machine Translation", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Nagata, T. Saito, and K. Suzuki. 2001. Using the web as a bilingual dictionary. In Proc. of the ACL Work- shop on Data-driven Methods in Machine Translation.", "links": null }, "BIBREF49": { "ref_id": "b49", "title": "Mining the web for transliteration lexicons: Joint-validation approach", "authors": [ { "first": "J", "middle": [], "last": "Oh", "suffix": "" }, { "first": "H", "middle": [], "last": "Isahara", "suffix": "" } ], "year": 2006, "venue": "Proc. of the IEEE/WIC/ACM International Conference on Web Intelligence", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J. Oh and H. Isahara. 2006. Mining the web for translit- eration lexicons: Joint-validation approach. In Proc. of the IEEE/WIC/ACM International Conference on Web Intelligence.", "links": null }, "BIBREF50": { "ref_id": "b50", "title": "Attacking decipherment problems optimally with low-order n-gram models", "authors": [ { "first": "S", "middle": [], "last": "Ravi", "suffix": "" }, { "first": "K", "middle": [], "last": "Knight", "suffix": "" } ], "year": 2008, "venue": "Proc. of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "S. Ravi and K. Knight. 2008. Attacking decipherment problems optimally with low-order n-gram models. In Proc. of EMNLP.", "links": null }, "BIBREF51": { "ref_id": "b51", "title": "Probabilistic methods for a Japanese syllable cipher", "authors": [ { "first": "S", "middle": [], "last": "Ravi", "suffix": "" }, { "first": "K", "middle": [], "last": "Knight", "suffix": "" } ], "year": 2009, "venue": "Proc. of the International Conference on the Computer Processing of Oriental Languages (ICCPOL)", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "S. Ravi and K. Knight. 2009. Probabilistic methods for a Japanese syllable cipher. In Proc. of the International Conference on the Computer Processing of Oriental Languages (ICCPOL).", "links": null }, "BIBREF52": { "ref_id": "b52", "title": "Bootstrapping a stochastic transducer for arabic-english transliteration extraction", "authors": [ { "first": "T", "middle": [], "last": "Sherif", "suffix": "" }, { "first": "G", "middle": [], "last": "Kondrak", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "T. Sherif and G. Kondrak. 2007a. Bootstrapping a stochastic transducer for arabic-english transliteration extraction. In Proc. of ACL.", "links": null }, "BIBREF53": { "ref_id": "b53", "title": "Substring-based transliteration", "authors": [ { "first": "T", "middle": [], "last": "Sherif", "suffix": "" }, { "first": "G", "middle": [], "last": "Kondrak", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "T. Sherif and G. Kondrak. 2007b. Substring-based transliteration. In Proc. of ACL.", "links": null }, "BIBREF54": { "ref_id": "b54", "title": "Named entity transliteration with comparable corpora", "authors": [ { "first": "R", "middle": [], "last": "Sproat", "suffix": "" }, { "first": "T", "middle": [], "last": "Tao", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhai", "suffix": "" } ], "year": 2006, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "R. Sproat, T. Tao, and C. Zhai. 2006. Named entity transliteration with comparable corpora. In Proc. of ACL.", "links": null }, "BIBREF55": { "ref_id": "b55", "title": "Unsupervised named entity transliteration using temporal and phonetic correlation", "authors": [ { "first": "T", "middle": [], "last": "Tao", "suffix": "" }, { "first": "S", "middle": [], "last": "Yoon", "suffix": "" }, { "first": "A", "middle": [], "last": "Fister", "suffix": "" }, { "first": "R", "middle": [], "last": "Sproat", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhai", "suffix": "" } ], "year": 2006, "venue": "Proc. of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "T. Tao, S. Yoon, A. Fister, R. Sproat, and C. Zhai. 2006. Unsupervised named entity transliteration using tem- poral and phonetic correlation. In Proc. of EMNLP.", "links": null }, "BIBREF56": { "ref_id": "b56", "title": "Learning to find English to Chinese transliterations on the web", "authors": [ { "first": "J", "middle": [], "last": "Wu", "suffix": "" }, { "first": "S", "middle": [], "last": "Chang", "suffix": "" }, { "first": "J", "middle": [], "last": "", "suffix": "" } ], "year": 2007, "venue": "Proc. of EMNLP/CoNLL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J. Wu and S. Chang, J. 2007. Learning to find English to Chinese transliterations on the web. In Proc. of EMNLP/CoNLL.", "links": null }, "BIBREF57": { "ref_id": "b57", "title": "Unsupervised word sense disambiguation rivaling supervised methods", "authors": [ { "first": "D", "middle": [], "last": "Yarowsky", "suffix": "" } ], "year": 1995, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Yarowsky. 1995. Unsupervised word sense disam- biguation rivaling supervised methods. In Proc. of ACL.", "links": null }, "BIBREF58": { "ref_id": "b58", "title": "Multilingual transliteration using feature based phonetic method", "authors": [ { "first": "K", "middle": [], "last": "Yoon", "suffix": "" }, { "first": "R", "middle": [], "last": "Kim", "suffix": "" }, { "first": "", "middle": [], "last": "Sproat", "suffix": "" } ], "year": 2007, "venue": "Proc. of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yoon, K. Kim, and R. Sproat. 2007. Multilingual transliteration using feature based phonetic method. In Proc. of ACL.", "links": null }, "BIBREF59": { "ref_id": "b59", "title": "Discriminative methods for transliteration", "authors": [ { "first": "D", "middle": [], "last": "Zelenko", "suffix": "" }, { "first": "C", "middle": [], "last": "Aone", "suffix": "" } ], "year": 2006, "venue": "Proc. of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Zelenko and C. Aone. 2006. Discriminative methods for transliteration. In Proc. of EMNLP.", "links": null } }, "ref_entries": { "FIGREF0": { "text": "Figure 2 shows the phonemic substitution table learnt from parallel training.", "type_str": "figure", "num": null, "uris": null }, "FIGREF1": { "text": "Phonemic substitution table learnt from non-parallel corpora. For each English phoneme, only the top ten mappings with P(j|e) > 0.01 are shown.", "type_str": "figure", "num": null, "uris": null } } } }