|
{ |
|
"paper_id": "Y14-1032", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:44:47.478360Z" |
|
}, |
|
"title": "Improving Statistical Machine Translation Accuracy Using Bilingual Lexicon Extraction with Paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "Chenhui", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Kyoto University", |
|
"location": {} |
|
}, |
|
"email": "chu@nlp.ist.i.kyoto-u.ac.jp" |
|
}, |
|
{ |
|
"first": "Toshiaki", |
|
"middle": [], |
|
"last": "Nakazawa", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "nakazawa@pa.jst.jp" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Kyoto University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Statistical machine translation (SMT) suffers from the accuracy problem that the translation pairs and their feature scores in the translation model can be inaccurate. The accuracy problem is caused by the quality of the unsupervised methods used for translation model learning. Previous studies propose estimating comparable features for the translation pairs in the translation model from comparable corpora, to improve the accuracy of the translation model. Comparable feature estimation is based on bilingual lexicon extraction (BLE) technology. However, BLE suffers from the data sparseness problem, which makes the comparable features inaccurate. In this paper, we propose using paraphrases to address this problem. Paraphrases are used to smooth the vectors used in comparable feature estimation with BLE. In this way, we improve the quality of comparable features, which can improve the accuracy of the translation model thus improve SMT performance. Experiments conducted on Chinese-English phrase-based SMT (PBSMT) verify the effectiveness of our proposed method. 1 Scarceness of parallel corpora also leads to the low coverage of the translation model (which we call the coverage problem of SMT), however we do not tackle this in this paper.", |
|
"pdf_parse": { |
|
"paper_id": "Y14-1032", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Statistical machine translation (SMT) suffers from the accuracy problem that the translation pairs and their feature scores in the translation model can be inaccurate. The accuracy problem is caused by the quality of the unsupervised methods used for translation model learning. Previous studies propose estimating comparable features for the translation pairs in the translation model from comparable corpora, to improve the accuracy of the translation model. Comparable feature estimation is based on bilingual lexicon extraction (BLE) technology. However, BLE suffers from the data sparseness problem, which makes the comparable features inaccurate. In this paper, we propose using paraphrases to address this problem. Paraphrases are used to smooth the vectors used in comparable feature estimation with BLE. In this way, we improve the quality of comparable features, which can improve the accuracy of the translation model thus improve SMT performance. Experiments conducted on Chinese-English phrase-based SMT (PBSMT) verify the effectiveness of our proposed method. 1 Scarceness of parallel corpora also leads to the low coverage of the translation model (which we call the coverage problem of SMT), however we do not tackle this in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In statistical machine translation (SMT) (Brown et al., 1993) , the translation model is automatically learned form parallel corpora in an unsupervised way. The translation model contains translation pairs with their features scores. SMT suffers from the accuracy problem that the translation model may be inaccurate, meaning that the translation pairs and their features scores may be inaccurate. The accuracy problem is caused by the quality of the unsupervised method used for translation model learning, which always correlates with the amount of parallel corpora. Increasing the amount of parallel corpora is a possible way to improve the accuracy, however parallel corpora remain a scarce resource for most language pairs and domains. 1 Accuracy also can be improved by filtering out the noisy translation pairs from the translation model, however meanwhile we may lose some good translation pairs, thus the coverage of the translation model may decrease. A good solution to improve the accuracy while keeping the coverage is estimating new features for the translation pairs from comparable corpora (which we call comparable features), to make the translation model more discriminative thus more accurate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 61, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous studies use bilingual lexicon extraction (BLE) technology to estimate comparable features (Klementiev et al., 2012; Irvine and Callison-Burch, 2013a) . They extend traditional BLE that estimates similarity for bilingual word pairs on comparable corpora, to translation pairs in the translation model of SMT. The similarity scores of the translation pairs are used as comparable features. These comparable features are combined with the original features used in SMT, which can provide additional information to distinguish good and bad translation pairs. A major problem of previous studies is that they do not deal with the data sparseness problem that BLE suffers from. BLE uses vector representations for word PACLIC 28 ! 263 pairs to compare the similarity between them. Data sparseness makes the vector representations sparse (e.g., the vector of a low frequent word tends to have many zero entries), thus they do not always reliably represent the meanings of words. Therefore, the similarity of word pairs can be inaccurate. Smoothing technology has been proposed to address the data sparseness problem for BLE. Pekar et al. (2006) smooth the vectors of words with their distributional nearest neighbors, however distributional nearest neighbors can have different meanings and thus introduce noise. Andrade et al. (2013) use synonym sets in WordNet to smooth the vectors of words, however WordNet is not available for every language. More importantly, both studies work for words, which are not suitable for comparable feature estimation. The reason is that translation pairs can also be phrases (Koehn et al., 2003) or syntactic rules (Galley et al., 2004) etc., depending on what kind of SMT models we use.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 124, |
|
"text": "(Klementiev et al., 2012;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 158, |
|
"text": "Irvine and Callison-Burch, 2013a)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1127, |
|
"end": 1146, |
|
"text": "Pekar et al. (2006)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1315, |
|
"end": 1336, |
|
"text": "Andrade et al. (2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1612, |
|
"end": 1632, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1652, |
|
"end": 1673, |
|
"text": "(Galley et al., 2004)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose using paraphrases to address the data sparseness problem of BLE for comparable feature estimation. A paraphrase is a restatement of the meaning of a word, phrase or syntactic rule etc., therefore it is suitable for the data sparseness problem. We generate paraphrases from the parallel corpus used for translation model learning. Then, we use the paraphrases to smooth the vectors of the translation pairs in the translation model for comparable feature estimation. Smoothing is done by learning vectors that combine the vectors of the original translation pairs with the vectors of their paraphrases. The smoothed vectors can overcome the data sparseness problem, making the vectors more accurately represent the meanings of the translation pairs. In this way, we improve the quality of comparable features, which can improve the accuracy of the translation model thus improve SMT performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We conduct experiments on Chinese-English Phrase-based SMT (PBSMT) (Koehn et al., 2003) . 2 Experimental results show that our proposed method can improve SMT performance, compared to the previous studies that estimate comparable features without dealing with the data sparseness problem of BLE (Klementiev et al., 2012; Irvine and Callison-Burch, 2013a) . The results verify the effectiveness of using BLE together with paraphrases for the accuracy problem of SMT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 87, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 90, |
|
"end": 91, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 320, |
|
"text": "(Klementiev et al., 2012;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 354, |
|
"text": "Irvine and Callison-Burch, 2013a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "From the pioneering work of (Rapp, 1995) , BLE from comparable corpora has been studied for a long time. BLE is based on the distributional hypothesis (Harris, 1954) , stating that words with similar meaning have similar distributions across languages. Contextual similarity (Rapp, 1995) , topical similarity (Vuli\u0107 et al., 2011) and temporal similarity (Klementiev and Roth, 2006) can be important clues for BLE. Orthographic similarity may also be used for BLE for some similar language pairs (Koehn and Knight, 2002) . Moreover, some studies try to use the combinations of different similarities for BLE (Irvine and Callison-Burch, 2013b; Chu et al., 2014) . To address the data sparseness problem of BLE, smoothing technology has been proposed (Pekar et al., 2006; Andrade et al., 2013) . BLE can be used to address the accuracy problem of SMT, which estimates comparable features for the translation pairs in the translation model (Klementiev et al., 2012) . BLE also can be used to address the coverage problem of SMT, which mines translations for the unknown words or phrases in the translation model from comparable corpora (Daume III and Jagarlamudi, 2011; Irvine et al., 2013) . Moreover, studies have been conducted to address the accuracy and coverage problems of SMT simultaneously with BLE (Irvine and Callison-Burch, 2013a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 40, |
|
"text": "(Rapp, 1995)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 165, |
|
"text": "(Harris, 1954)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 287, |
|
"text": "(Rapp, 1995)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 329, |
|
"text": "(Vuli\u0107 et al., 2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 381, |
|
"text": "(Klementiev and Roth, 2006)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 519, |
|
"text": "(Koehn and Knight, 2002)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 641, |
|
"text": "(Irvine and Callison-Burch, 2013b;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 659, |
|
"text": "Chu et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 768, |
|
"text": "(Pekar et al., 2006;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 790, |
|
"text": "Andrade et al., 2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 961, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1132, |
|
"end": 1165, |
|
"text": "(Daume III and Jagarlamudi, 2011;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1166, |
|
"end": 1186, |
|
"text": "Irvine et al., 2013)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1304, |
|
"end": 1338, |
|
"text": "(Irvine and Callison-Burch, 2013a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bilingual Lexicon Extraction (BLE) for SMT", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our study focuses on addressing the accuracy problem of SMT with BLE. We use paraphrases to address the data sparseness problem of BLE for comparable feature estimation, which makes the comparable features more accurate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bilingual Lexicon Extraction (BLE) for SMT", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Many methods have been proposed to use paraphrases for SMT, mainly for the coverage problem. One method is paraphrasing unknown words or phrases in the translation model (Callison-Burch et al., 2006; Razmara et al., 2013; Marton et al., 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 199, |
|
"text": "(Callison-Burch et al., 2006;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 221, |
|
"text": "Razmara et al., 2013;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 242, |
|
"text": "Marton et al., 2009)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "PACLIC 28 ! 264 f e \u03c6(f |e) lex(f |e) \u03c6(e|f ) lex(e|f ) Alignment \u4e1a unemployment figures 0.3 0.0037 0.0769 0.0018 0-0 1-1 \u4e1a number of unemployed 0.1333 0.0188 0.1025 0.0041 1-0 1-1 0-2 \u4e1a", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": ". unemployment was 0.3333 0.0015 0.0256 6.8e-06 0-1 1-1 1-2 \u4e1a unemployment and bringing 1 0.0029 0.0256 5.4e-07 0-0 1-0 Table 1 : An example of the accuracy problem in PBSMT. The correct translations of \" \u4e1a (unemployment) (number of people)\" are in bold. The incorrect phrase pairs are extracted because \" (number of people)\" is incorrectly aligned to \"unemployment\", and their feature scores are incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 127, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Another method is constructing a paraphrase lattice for the tuning and testing data, and performing lattice decoding (Du et al., 2010; Bar and Dershowitz, 2014) . Paraphrases also can be incorporated as additional training data, which may improve both coverage and accuracy of SMT (Pal et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 134, |
|
"text": "(Du et al., 2010;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 160, |
|
"text": "Bar and Dershowitz, 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 299, |
|
"text": "(Pal et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Previous studies require external data in addition to the parallel corpus used for SMT for paraphrase generation to make their methods effective. These paraphrases can be generated from external parallel corpora (Callison-Burch et al., 2006; Du et al., 2010) , or monolingual corpora based on distributional similarity (Marton et al., 2009; Razmara et al., 2013; Pal et al., 2014; Bar and Dershowitz, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 241, |
|
"text": "(Callison-Burch et al., 2006;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 258, |
|
"text": "Du et al., 2010)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 340, |
|
"text": "(Marton et al., 2009;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 362, |
|
"text": "Razmara et al., 2013;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 380, |
|
"text": "Pal et al., 2014;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 406, |
|
"text": "Bar and Dershowitz, 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our study differs from previous studies in using paraphrases for smoothing the vectors of BLE, which is used for comparable feature estimation that can improve the accuracy of SMT. Another difference is that our proposed method is effective when only using the paraphrases generated from the parallel corpus used for SMT, while previous studies require external data for paraphrase generation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrases for SMT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this paper, we conduct experiments on PBSMT (Koehn et al., 2003) . Here, we give a brief overview of PBSMT, and explain the accuracy problem of PB-SMT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 67, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Accuracy Problem of Phrase-based SMT (PBSMT)", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In PBSMT, the translation model is represented as a phrase table, containing phrase pairs together with their feature scores. 3 The phrase pairs are extracted based on unsupervised word alignments, whose quality always correlates with the amount of the parallel corpus. Inverse and direct phrase translation probabilities \u03c6(f |e) and \u03c6(e|f ), inverse and direct lexical weighting lex(f |e) and lex(e|f ) are used as features for the phrase table. Phrase translation probabilities are calculated via maximum likelihood estimation, which counts how often a source phrase f is aligned to target phrase e in the parallel corpus, and vise versa. Lexical weighting is the average word translation probability calculated using internal word alignments of a phrase pair, which is used to smooth the overestimation of the phrase translation probabilities. Other typical features such as the reordering model features and the n-gram language model features are also used in PBSMT. These features are combined in a log linear model, and their weights are tuned using a small size of parallel sentences. During decoding, these features together with their tuned weights are used to produce new translations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 127, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Accuracy Problem of Phrase-based SMT (PBSMT)", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "One problem of PBSMT is that the phrase pairs and their feature scores in the phrase table may be inaccurate. One reason for this is the quality of the word alignment. Another reason is that the translation probabilities of rare word and phrase pairs tend to be grossly overestimated. Sparseness of the parallel corpus leads to word alignment errors and overestimations, which result in inaccurate phrase pairs and feature scores. Table 1 shows an example of phrase pairs and feature scores taken from the phrase table constructed in our experiments (See Section 5 for the details of the experiments), which contains inaccurate phrase pairs. Figure 1 shows an overview of our proposed method. We construct a phrase table from a parallel corpus following (Koehn et al., 2003) . Because this phrase table may be inaccurate, we estimate comparable features from comparable corpora following (Klementiev et al., 2012; Irvine and Callison-Burch, 2013a) . These comparable features are appended to the original phrase table, to address the accuracy PACLIC 28", |
|
"cite_spans": [ |
|
{ |
|
"start": 754, |
|
"end": 774, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 913, |
|
"text": "(Klementiev et al., 2012;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 947, |
|
"text": "Irvine and Callison-Burch, 2013a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 438, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 650, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Accuracy Problem of Phrase-based SMT (PBSMT)", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "! 265 !\"#$%&%'()* +\"&$\"&%! , -*...*) -*...*/0, -.) -1**()20, -.) -1*3** , 4 *...*) 4 *...*/0, 4 .) 4 1**()20, 4 .) 4 1*3** , 5 *...*) 5 *...*/0, 5 .) 5 1**()20, 5 .) 5 1*3** \"#\"*\"* 6%&%(()(*+\"&$78* 9-0, -:) -1**940, -:) -1*3** 9-0, 4 :) 4 1**940, 4 :) 4 1*3** 9-0, 5 :) 5 1**940, 5 :) 5 1*3** \"#\"*\"* , -*...*, 4 *...*$0, -., 4 1* , -*...*, 5 *...*$0, -., 5 1* \"#\"*\"* 6;&%8)*<%'()* 6%&%$;&%8)! ) -*...*) 4 *...*$0) -.) 4 1* ) -*...*) 5 *...*$0) -.) 5 1* \"#\"*\"*", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "=>?@'%8)A*+\"#$%&%'()* ,)%<7&)*)8B#%B\"C* 0D)+<\"&*8#\"\"<;ECF* GE<;*$%&%$;&%8)81* problem of PBSMT. Comparable feature estimation is based on BLE, which suffers from the data sparseness problem. We propose using paraphrases to address this problem. We generate phrasal level paraphrases for both the source and target language from the parallel corpus. Then we use the paraphrases to smooth the vectors of the source and target phrases used for comparable feature estimation respectively. Smoothing is done by learning a vector that combines the original vector of a phrase with the vectors of its paraphrases. The smoothed vectors can represent the meanings of phrase pairs more accurately. Finally, we compute the similarity of phrase pairs based on the smoothed source and target vectors. In this way, we improve the quality of comparable features, which can improve the accuracy of the phrase table thus improve SMT performance. Details of paraphrase generation, comparable feature estimation and vector smoothing with paraphrases will be described in Section 4.1, 4.2 and 4.3 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we generate both source and target phrasal level paraphrases from the parallel corpus used for SMT 4 through bilingual pivoting (Bannard and Callison-Burch, 2005) . The idea of this method is that if two source phrases f 1 and f 2 are translated to the same target phrase e, we can assume that f 1 and f 2 are a paraphrase pair. Probability of this paraphrase pair can be assigned by marginalizing over 4 Paraphrases also can be generated from external parallel corpora and monolingual corpora, however we leave it as future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 177, |
|
"text": "(Bannard and Callison-Burch, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 419, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Generation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "all shared target translations e in the parallel corpus, defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Generation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(f 1 |f 2 ) = e \u03c6(f 1 |e)\u03c6(e|f 2 )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Paraphrase Generation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where, \u03c6(f 1 |e) and \u03c6(e|f 2 ) are phrase translation probability. Target paraphrases can be generated in a similar way. Note that word alignment errors can also lead to incorrect paraphrase generation. For example, \"unemployment figures\" and \"unemployment and bringing\" in Table 1 might be generated as a paraphrase pair. However, this kind of noisy pairs can be easily pruned according to their low probabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 281, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase Generation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Following (Klementiev et al., 2012 ; Irvine and Callison-Burch, 2013a), we estimate contextual, topical and temporal similarities as comparable features. However, we do not use orthographic similarity as comparable feature, because we experiment on Chinese-English, which is not an orthographically similar language pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "(Klementiev et al., 2012", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Besides phrasal features, we also estimate lexical features following (Klementiev et al., 2012; Irvine and Callison-Burch, 2013a) . The lexical features are the average similarity scores of word pairs over all possible word alignments across two phrases. They are used to smooth the phrasal features, like the lexical weighting in PBSMT. However, they only can slightly alleviate the sparseness of phrasal features, because individual words also suffer from the data sparseness problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 95, |
|
"text": "(Klementiev et al., 2012;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 129, |
|
"text": "Irvine and Callison-Burch, 2013a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In the following sections, we describe the meth-ods to estimate contextual, topical and temporal features in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Contextual feature is the contextual similarity of a phrase pair. Contextual similarity is based on the distributional hypothesis on context, stating that phrases with similar meaning appear in similar contexts across languages. From the pioneering work of (Rapp, 1995) , contextual similarity has been used for BLE for a long time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 269, |
|
"text": "(Rapp, 1995)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the literature, different definitions of context have been proposed for BLE, such as window-based context, sentence-based context and syntax-based context etc. In this paper, we use window-based context, and leave the comparison of using different definitions of context as future work. Given a phrase, we count all its immediate context words, with a window size of 4 (2 preceding words and 2 following words). We build a context by collecting the counts in a bag of words fashion, namely we do not distinguish the positions that the context words appear in. The number of dimensions of the constructed vector is equal to the vocabulary size. We further reweight each component in the vector by multiplying by the IDF score following (Garera et al., 2009; Chu et al., 2014) , which is defined as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 738, |
|
"end": 759, |
|
"text": "(Garera et al., 2009;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 777, |
|
"text": "Chu et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "IDF (t, D) = log |D| 1 + |{d \u2208 D : t \u2208 d}| (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where |D| is the total number of documents in the corpus, and |{d \u2208 D : t \u2208 d}| denotes number of documents where the term t appears. 5 We model the source and target vectors using the method described above, and project the source vector onto the vector space of the target language using a seed dictionary. The contextual similarity of the phrase pair is the similarity of the vectors, which is computed using cosine similarity defined as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 135, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Cos(f, e) = K k=1 F k \u00d7 E k K k=1 (F k ) 2 \u00d7 K k=1 (E k ) 2", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where f and e are the source and target phrases, F and E are the projected source vector and target vector, K is the number of dimensions of the vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Topical feature is the topical similarity of a phrase pair. Topical similarity uses the distributional hy-pothesis on topics, stating that two phrases are potential translation candidates if they are often present in the same cross-lingual topics and not observed in other cross-lingual topics (Vuli\u0107 et al., 2011) . Vuli\u0107 et al. (2011) propose using bilingual topic model based method to estimate topical similarity. However, this method is not scalable for large data sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 314, |
|
"text": "(Vuli\u0107 et al., 2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 336, |
|
"text": "Vuli\u0107 et al. (2011)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we estimate topical feature in a scalable way following (Klementiev et al., 2012) . We treat an article pair aligned by interlanguage links in Wikipedia as a topic aligned pair. For a phrase pair, we build source and target topical occurrence vectors by counting their occurrences in its corresponding language articles. The number of dimensions of the constructed vector is equal to the number of aligned article pairs, and each dimension is the number of times that the phrase appears in the corresponding article. The similarity of the phrase pair is computed as the similarity of the source and target vectors using cosine similarity (Equation 3).", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 96, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Temporal feature is the temporal similarity of a phrase pair. The intuition of temporal similarity is that news stories across languages tend to discuss the same world events on the same day, and the occurrences of a translated phrase pair over time tend to spike on the same dates (Klementiev and Roth, 2006; Klementiev et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 309, |
|
"text": "(Klementiev and Roth, 2006;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 334, |
|
"text": "Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Temporal feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We estimate temporal feature following (Klementiev and Roth, 2006; Klementiev et al., 2012) . For a phrase pair, we build source and target temporal occurrence vectors by counting their occurrences in equally sized temporal bins, which are sorted from the set of time-stamped documents in the comparable corpus. We set the window size of a bin to 1 day. Therefore the number of dimensions of the constructed vector is equal to the number of days spanned by the corpus, and each dimension is the number of times that the phrase appears in the corresponding bin. The similarity of the phrase pair is computed as the similarity of the source and target vectors using cosine similarity (Equation 3).", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 66, |
|
"text": "Roth, 2006;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 67, |
|
"end": 91, |
|
"text": "Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Temporal feature", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Data sparseness results in sparse representations of the vectors, therefore the similarity of the phrase pair can be inaccurate. We propose using paraphrases to PACLIC 28 ! 267 Phrase Paraphrase tampered being tampered an appropriation appropriation 11th 11th . so many years many years first thing first thing that mass media , media , Table 2 : Examples of overlaps between a phrase and its paraphrase.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 260, |
|
"text": "Phrase Paraphrase tampered being tampered an appropriation appropriation 11th", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 350, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Vector Smoothing with Paraphrases", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "smooth both the source and target vectors, to deal with the data sparseness problem. After smoothing, the vectors can more accurately represent the phrases. We compute the similarity of the phrase pair based on the smoothed source and target vectors, and use it as comparable features for PBSMT. One problem of using paraphrases for smoothing is that a phrase and its paraphrase may overlap. Table 2 shows some examples of overlaps between a phrase and its paraphrase generated from the parallel corpus we use. The vector of the overlapped paraphrase contains overlapped information of the vector of the original phrase. Therefore, it is necessary to consider overlap when using paraphrases for vector smoothing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vector Smoothing with Paraphrases", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "There are three types of vectors (context, topical and temporal occurrence vectors) need to be smoothed. The method for smoothing context vector is different from topical and temporal occurrence vectors, because the components in context vector are different. Topical and temporal occurrence vectors can be smoothed using the same method, because the components of both vectors are occurrence information. The following sections describe the methods to smooth the context vector, and topical and temporal occurrence vectors respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vector Smoothing with Paraphrases", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We smooth the context vector of a phrase x with the following equation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "X \u2032 = f (x) f (x) + n j=1 f (x j ) \u2022X+ n i=1 f (x i ) f (x) + n j=1 f (x j ) \u2022 p(x i |x) \u2022 \u23a7 \u23aa \u23a8 \u23aa \u23a9 X i \\X (x \u2282 x i ) X i \u2212 X (x \u2283 x i ) X i (otherwise)", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where X \u2032 is the smoothed context vector, X is the context vector of x, n is the number of paraphrases that x has, X i is the context vector of paraphrase x i , p(x i |x) is the probability that x i is a paraphrase of x. f (x) is the frequency of x in the corpus, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f (x) f (x)+ n j=1 f (x j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is the frequency weight for x. Frequency weight is also used for the paraphrases in a similar way. The frequency weight is proposed by Andrade et al. (2013) when using synonyms to smooth the context vector of a word. They show that using the frequency information of words as weights performs better than simple summation of the vectors. For the overlap problem between x and x i , we do the following:", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 156, |
|
"text": "Andrade et al. (2013)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If x \u2282 x i namely x is contained in x i , we use", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the context words that exist in X i but do not exist in X for smoothing, which is X i \\X;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If x \u2283 x i namely x contains x i , we remove the overlapped contextual information between X i and X for smoothing, which is X i \u2212 X;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Otherwise, we use X i for smoothing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Vector Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We smooth the topical and temporal occurrence vectors of a phrase x with the following equation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "X \u2032 = X + n i=1 p(x i |x) \u2022 \u23a7 \u23aa \u23a8 \u23aa \u23a9 0 (x \u2282 x i ) X i \u2212 X (x \u2283 x i ) X i (otherwise)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where X \u2032 is the smoothed occurrence vector, X is the occurrence vector of x, n is the number of paraphrases that x has, X i is the occurrence vector of paraphrase x i , p(x i |x) is the probability that x i is a paraphrase of x. For the overlap problem between x and x i , we do the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If x \u2282 x i , we do not use X i for smoothing, because X already contains the occurrence information in X i ;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If x \u2283 x i , we remove the overlapped occurrence information between X i and X for smoothing, which is X i \u2212 X;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Otherwise, we use X i for smoothing. Examples of the three types of vectors before and after smoothing are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 125, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topical and Temporal Occurrence Vectors Smoothing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Before smoothing After smoothing Context <rising: 2.37, economic: 0, recession: 3.94 ><rising: 0.03, economic: 0.06, recession: 0.04 > Topical <Topic1: 0, Topic2: 1, Topic3: 0 > <Topic1: 0.12, Topic2: 1.27, Topic3: 0.05 > Temporal <Date1: 1, Date2: 0, Date3: 6 > <Date1: 1.25, Date2: 0.08, Date3: 6.38 > Table 3 : Examples of the three types of vectors for the phrase \"unemployment figures\" before and after smoothing.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 311, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PACLIC 28 ! 268", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our experiments, we compared our proposed method with (Klementiev et al., 2012) . We estimated comparable features from comparable corpora using the method of (Klementiev et al., 2012) and our proposed method respectively. We appended the comparable features to the phrase table, and evaluated the two methods in the perspective of SMT performance. We conducted experiments on Chinese-English data. In all our experiments, we preprocessed the data by segmenting Chinese sentences using a segmenter proposed by Chu et al. (2012) , and tokenizing English sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 82, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 187, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 530, |
|
"text": "Chu et al. (2012)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We conducted Chinese-to-English translation experiments. The parallel corpus we used is from Chinese-English NIST open MT. 6 The \"NIST\" column of Table 4 shows the statistics of this parallel corpus. For decoding, we used the state-of-theart PBSMT toolkit Moses (Koehn et al., 2007) with default options, except for the phrase length limit (7\u21923) following (Klementiev et al., 2012) . We trained a 5-gram language model on the English side of the parallel corpus using the SRILM toolkit 7 with interpolated Kneser-Ney discounting, and used it for all the experiments. We used NIST open MT 2002 and 2003 data sets for tuning and testing, containing 878 and 919 sentence pairs respectively. Note that both MT 2002 and 2003 data sets contain 4 references for each Chinese sentence. Tuning was performed by minimum error rate training (MERT) (Och, 2003) , and it was re-run for every experiment. textual feature was estimated on the parallel corpus. We treated the two sides of the parallel corpus as independent monolingual corpora, following (Haghighi et al., 2008; Klementiev et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 124, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 282, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 381, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 848, |
|
"text": "(Och, 2003)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1039, |
|
"end": 1062, |
|
"text": "(Haghighi et al., 2008;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1063, |
|
"end": 1087, |
|
"text": "Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 153, |
|
"text": "Table 4", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SMT Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Contextual feature estimation requires a seed dictionary. The seed dictionary we used is NIST Chinese-English translation lexicon Version 3.0, 8 containing 82k entries. The temporal feature was estimated on Chinese 9 and English 10 Gigaword version 5.0. We used the afp, cna and xin sections with date range 1994/05-2010/12 of the corpora. The topical feature was estimated on Chinese and English Wikipedia data. We downloaded Chinese 11 (2012/09/21) and English 12 (2012/10/01) Wikipedia database dumps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used an open-source Python script 13 to extract and clean the text from the dumps. We aligned the articles on the same topic in Chinese-English Wikipedia via the interlanguage links. We estimated comparable features for the unique phrase pairs used for tuning and testing. These phrase pairs were extracted from the entire phrase table constructed from the parallel corpus, by checking all the source phrases in the tuning and testing data sets. We call these phrase pairs the filtered phrase table. Table 5 shows the statistics of the fil- Table 6 : Statistics the generated paraphrases for the phrases and individual words inside the phrases in the filtered phrase table.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 503, |
|
"end": 510, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 551, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "tered phrase table. We can see that each Chinese phrase has a large number of translations on average especially for the lower order n-gram phrases, which can indicate the inaccuracy of the filtered phrase table.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our proposed method requires paraphrases for vector smoothing. We used Joshua (Ganitkevitch et al., 2012) to generate both Chinese and English paraphrases from the parallel corpus. We kept the paraphrase pairs that satisfy logp(x 1 |x 2 ) > \u22127 and logp(x 2 |x 1 ) > \u22127 14 for smoothing, where p(x 1 |x 2 ) is the probability that x 1 is a paraphrase of x 2 , and p(x 2 |x 1 ) is the probability that x 2 is a paraphrase of x 1 . Table 6 shows the statistics of the paraphrase generation results for the Chinese and English phrases, and individual words inside the phrases in the filtered phrase table.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 105, |
|
"text": "(Ganitkevitch et al., 2012)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 436, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that, for some phrase pairs, their comparable feature scores may be 0, because of data sparseness. In that case, we set their comparable features to a small positive number of 1e \u2212 07. Table 7 : BLEU-4 scores for Chinese-to-English translation experiments (\" \u2020\" and \" \u2021\" denote that the result is significantly better than \"Baseline\" at p < 0.01 and \"Kle-mentiev+\" at p < 0.05 respectively)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 197, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparable Feature Estimation Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We report results on the test set using caseinsensitive BLEU-4 score and four references. Table 7 shows the results of Chinese-to-English translation experiments. \"Baseline\" denotes the baseline system that does not use comparable features. \"Klementiev+\" denotes the system that appends the comparable features estimated following (Klementiev et al., 2012) to the phrase table. \"Proposed\" denotes the system that uses the comparable features estimated by our proposed method. \"+Contextual\", \"+Topical\" and \"+Temporal\" denote the systems that append contextual, topical and temporal features respectively. \"+All\" denotes the system that appends all the three types of features. The significance test was performed using the bootstrap resampling method proposed by Koehn (2004) . We can see that \"Klementiev+\" does not always outperform \"Baseline\". The reason for this is that the comparable features estimated by (Klementiev et al., 2012) are inaccurate. \"Proposed\" performs significantly better than both \"Baseline\" and \"Kle-mentiev+\". The reason for this is that \"Proposed\" deals with the data sparseness problem of BLE for comparable feature estimation, making the features more accurate thus improve the SMT performance. As for different comparable features of \"Proposed\", \"+Contextual\", \"+Topical\" and \"+Temporal\" are all helpful, and combining them can be more effective. The results verify the effectiveness of our proposed method for the accuracy problem of PBSMT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 331, |
|
"end": 356, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 775, |
|
"text": "Koehn (2004)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 912, |
|
"end": 937, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We also investigated the comparable features estimated by the method of (Klementiev et al., 2012) and our proposed method. Based on our investigation, most comparable features estimated by our proposed method are more accurate than the ones estimated by the method of (Klementiev et al., 2012 Table 8 : Examples of comparable feature scores estimated by the method of (Klementiev et al., 2012) (above the bold line) and our proposed method (below the bold line) for the phrase pairs shown in Table 1 (\"con\", \"top\" and \"tem\" denote phrasal contextual, topical and temporal features respectively, \"con lex\", \"top lex\" and \"tem lex\" denote lexical contextual, topical and temporal features respectively).", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 97, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 292, |
|
"text": "(Klementiev et al., 2012", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 300, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 499, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "ture scores estimated for the phrase pairs shown in Table 1 . Table 8 shows the comparable feature scores estimated by the method of (Klementiev et al., 2012) (above the bold line) and our proposed method (below the bold line). We can see that the method of (Klementiev et al., 2012) suffers from the data sparseness problem. Many of the feature scores are 1e \u2212 07, and many of the feature scores for the correct translations (\"unemployment figures\" and \"number of unemployed\") are lower than the incorrect ones (\". unemployment was\" and \"unemployment and bringing\"). Our proposed method addresses the data sparseness problem by using paraphrases for vector smoothing. We can see that, after smoothing the feature scores can more accurately distinguish the good translations from the bad ones.", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 283, |
|
"text": "(Klementiev et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 59, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 62, |
|
"end": 69, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed using BLE together with paraphrases to address the accuracy problem of SMT. The translation pairs and their feature scores in the translation model of SMT can be inaccurate, because of the quality of the unsupervised methods used for translation model learning. Estimating comparable features from comparable corpora with BLE has been proposed for the accuracy problem of SMT. However, BLE suffers from the data sparseness problem, which makes the comparable features inaccurate. We proposed using paraphrases to address this problem. Paraphrases were used to smooth the vectors used in comparable feature estimation with BLE. Experiments conducted on Chinese-English PBSMT verified the effective-ness of our proposed method. As future work, firstly we plan to generate paraphrases from external parallel corpora and monolingual corpora, where as in this paper we used the paraphrases generated from the parallel corpus used for SMT. Secondly, in this paper we estimated contextual features from the parallel corpus, however in the future we plan to estimate it from comparable corpora. Finally, since our proposed method should be language independent and can be applied to other SMT models, we plan to conduct experiments on other language pairs and SMT models to verify this.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our proposed method can also be applied to other language pairs and SMT models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that in PBSMT, the definition of a phrase also includes a single word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since there are no document bounds in the corpus we used to estimate contextual feature, we treated every 100 sentences as one document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also tried other pruning thresholds, and this threshold showed the best performance in the preliminary experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the Japan Society for the Promotion of Science (JSPS) Grant-in-Aid for JSPS Fellows. We also thank the anonymous reviewers for their valuable comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Translation acquisition using synonym sets", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Andrade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaki", |
|
"middle": [], |
|
"last": "Tsuchida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Onishi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Ishikawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of NAACL-HLT 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "655--660", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Andrade, Masaki Tsuchida, Takashi Onishi, and Kai Ishikawa. 2013. Translation acquisition using synonym sets. In Proceedings of NAACL-HLT 2013, pages 655-660.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Paraphrasing with bilingual parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Bannard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of ACL 2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "597--604", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Bannard and Chris Callison-Burch. 2005. Para- phrasing with bilingual parallel corpora. In Proceed- ings of ACL 2005, pages 597-604.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Inferring paraphrases for a highly inflected language from a monolingual corpus", |
|
"authors": [ |
|
{ |
|
"first": "Kfir", |
|
"middle": [], |
|
"last": "Bar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nachum", |
|
"middle": [], |
|
"last": "Dershowitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of CICLing", |
|
"volume": "8404", |
|
"issue": "", |
|
"pages": "245--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kfir Bar and Nachum Dershowitz. 2014. Inferring para- phrases for a highly inflected language from a mono- lingual corpus. In Proceedings of CICLing 2014, pages 8404:2:245-256.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The mathematics of statistical machine translation: Parameter estimation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"A Della" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "19", |
|
"issue": "", |
|
"pages": "263--312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and Robert L. Mercer. 1993. The mathe- matics of statistical machine translation: Parameter es- timation. Association for Computational Linguistics, 19(2):263-312.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Improved statistical machine translation using paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of NAACL-HLT 2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, Philipp Koehn, and Miles Os- borne. 2006. Improved statistical machine transla- tion using paraphrases. In Proceedings of NAACL- HLT 2006, pages 17-24.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Exploiting shared Chinese characters in Chinese word segmentation optimization for Chinese-Japanese machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chenhui", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshiaki", |
|
"middle": [], |
|
"last": "Nakazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of EAMT 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenhui Chu, Toshiaki Nakazawa, Daisuke Kawahara, and Sadao Kurohashi. 2012. Exploiting shared Chi- nese characters in Chinese word segmentation opti- mization for Chinese-Japanese machine translation. In Proceedings of EAMT 2012, pages 35-42.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Iterative bilingual lexicon extraction from comparable corpora with topical and contextual knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Chenhui", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshiaki", |
|
"middle": [], |
|
"last": "Nakazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of CICLing", |
|
"volume": "8404", |
|
"issue": "", |
|
"pages": "296--309", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenhui Chu, Toshiaki Nakazawa, and Sadao Kuro- hashi. 2014. Iterative bilingual lexicon extraction from comparable corpora with topical and contextual knowledge. In Proceedings of CICLing 2014, pages 8404:2:296-309.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Domain adaptation for machine translation by mining unseen words", |
|
"authors": [ |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daume", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jagadeesh", |
|
"middle": [], |
|
"last": "Jagarlamudi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACL-HLT 2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "407--412", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hal Daume III and Jagadeesh Jagarlamudi. 2011. Do- main adaptation for machine translation by mining un- seen words. In Proceedings of ACL-HLT 2011, pages 407-412.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Facilitating translation using source language paraphrase lattices", |
|
"authors": [ |
|
{ |
|
"first": "Jinhua", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [ |
|
"Way" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of EMNLP 2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "420--429", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhua Du, Jie Jiang, and Andy Way. 2010. Facilitating translation using source language paraphrase lattices. In Proceedings of EMNLP 2010, pages 420-429.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "What's in a translation rule", |
|
"authors": [ |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hopkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Daniel Marcu Susan Dumais and Salim Roukos", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "273--280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What's in a translation rule? In Daniel Marcu Susan Dumais and Salim Roukos, ed- itors, Proceedings of NAACL-HLT 2004, pages 273- 280.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Joshua 4.0: Packing, pro, and paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Weese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of WMT 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "283--291", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch, Yuan Cao, Jonathan Weese, Matt Post, and Chris Callison-Burch. 2012. Joshua 4.0: Packing, pro, and paraphrases. In Proceedings of WMT 2012, pages 283-291.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improving translation lexicon induction from monolingual corpora via dependency contexts and part-of-speech equivalences", |
|
"authors": [ |
|
{ |
|
"first": "Nikesh", |
|
"middle": [], |
|
"last": "Garera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--137", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikesh Garera, Chris Callison-Burch, and David Yarowsky. 2009. Improving translation lexicon induc- tion from monolingual corpora via dependency con- texts and part-of-speech equivalences. In Proceedings of CoNLL 2009, pages 129-137.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Learning bilingual lexicons from monolingual corpora", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-HLT 2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "771--779", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi, Percy Liang, Taylor Berg-Kirkpatrick, and Dan Klein. 2008. Learning bilingual lexicons from monolingual corpora. In Proceedings of ACL- HLT 2008, pages 771-779.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Distributional structure. Word", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Zellig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1954, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "146--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zellig S. Harris. 1954. Distributional structure. Word, 10(23):146-162.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Combining bilingual and comparable corpora for low resource machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Irvine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of WMT 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "262--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Irvine and Chris Callison-Burch. 2013a. Combin- ing bilingual and comparable corpora for low resource machine translation. In Proceedings of WMT 2013, pages 262-270.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Supervised bilingual lexicon induction with multiple monolingual signals", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Irvine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of NAACL-HLT 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "518--523", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Irvine and Chris Callison-Burch. 2013b. Supervised bilingual lexicon induction with multiple monolingual signals. In Proceedings of NAACL-HLT 2013, pages 518-523.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Monolingual marginal matching for translation model adaptation", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Irvine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Quirk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of EMNLP 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1077--1088", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Irvine, Chris Quirk, and Hal Daum\u00e9 III. 2013. Monolingual marginal matching for translation model adaptation. In Proceedings of EMNLP 2013, pages 1077-1088.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Weakly supervised named entity transliteration and discovery from multilingual comparable corpora", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Klementiev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of COLING-ACL 2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "817--824", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Klementiev and Dan Roth. 2006. Weakly supervised named entity transliteration and discovery from multilingual comparable corpora. In Proceed- ings of COLING-ACL 2006, pages 817-824.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Toward statistical machine translation without parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Klementiev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Irvine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of EACL 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "130--140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Klementiev, Ann Irvine, Chris Callison- Burch, and David Yarowsky. 2012. Toward statistical machine translation without parallel corpora. In Pro- ceedings of EACL 2012, pages 130-140.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning a translation lexicon from monolingual corpora", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the ACL-02 Workshop on Unsupervised Lexical Acquisition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn and Kevin Knight. 2002. Learning a translation lexicon from monolingual corpora. In Pro- ceedings of the ACL-02 Workshop on Unsupervised Lexical Acquisition, pages 9-16.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Statistical phrase-based translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz", |
|
"middle": [ |
|
"Josef" |
|
], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of NAACL-HLT 2003, NAACL '03", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Franz Josef Och, and Daniel Marcu. 2003. Statistical phrase-based translation. In Proceed- ings of NAACL-HLT 2003, NAACL '03, pages 48-54.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL 2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Con- stantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceed- ings of ACL 2007, pages 177-180.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Statistical significance tests for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of EMNLP 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Dekang Lin and Dekai Wu, editors, Proceedings of EMNLP 2004, pages 388-395.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improved statistical machine translation using monolingually-derived paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Marton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of EMNLP 2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "381--390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuval Marton, Chris Callison-Burch, and Philip Resnik. 2009. Improved statistical machine translation using monolingually-derived paraphrases. In Proceedings of EMNLP 2009, pages 381-390.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Minimum error rate training in statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Franz Josef", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of ACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och. 2003. Minimum error rate training in statistical machine translation. In Proceedings of ACL 2003, pages 160-167.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Role of paraphrases in pb-smt", |
|
"authors": [ |
|
{ |
|
"first": "Santanu", |
|
"middle": [], |
|
"last": "Pal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pintu", |
|
"middle": [], |
|
"last": "Lohar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudip Kumar", |
|
"middle": [], |
|
"last": "Naskar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of CICLing", |
|
"volume": "8404", |
|
"issue": "", |
|
"pages": "245--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Santanu Pal, Pintu Lohar, and Sudip Kumar Naskar. 2014. Role of paraphrases in pb-smt. In Proceedings of CICLing 2014, pages 8404:2:245-256.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Finding translations for lowfrequency words in comparable corpora", |
|
"authors": [ |
|
{ |
|
"first": "Viktor", |
|
"middle": [], |
|
"last": "Pekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Mitkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine Translation", |
|
"volume": "20", |
|
"issue": "4", |
|
"pages": "247--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viktor Pekar, Ruslan Mitkov, Dimitar Blagoev, and An- drea Mulloni. 2006. Finding translations for low- frequency words in comparable corpora. Machine Translation, 20(4):247-266.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Identifying word translations in non-parallel texts", |
|
"authors": [ |
|
{ |
|
"first": "Reinhard", |
|
"middle": [], |
|
"last": "Rapp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of ACL 1995", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "320--322", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reinhard Rapp. 1995. Identifying word translations in non-parallel texts. In Proceedings of ACL 1995, pages 320-322.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Graph propagation for paraphrasing out-of-vocabulary words in statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Majid Razmara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Siahbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ACL 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1105--1115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Majid Razmara, Maryam Siahbani, Reza Haffari, and Anoop Sarkar. 2013. Graph propagation for para- phrasing out-of-vocabulary words in statistical ma- chine translation. In Proceedings of ACL 2013, pages 1105-1115.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Identifying word translations from comparable corpora using latent topic models", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wim", |
|
"middle": [ |
|
"De" |
|
], |
|
"last": "Smet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACL-HLT 2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "479--484", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Vuli\u0107, Wim De Smet, and Marie-Francine Moens. 2011. Identifying word translations from comparable corpora using latent topic models. In Proceedings of ACL-HLT 2011, pages 479-484.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Overview of our proposed method.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"3\">shows the statistics of the comparable data</td></tr><tr><td colspan=\"3\">used for comparable feature estimation. The con-</td></tr><tr><td>6 LDC2007T02,</td><td>LDC2002T01,</td><td>LDC2003T17,</td></tr><tr><td colspan=\"3\">LDC2004T07, HK News part of LDC2004T08, LDC2005T10</td></tr><tr><td colspan=\"2\">and LDC2006T04 7 http://www.speech.sri.com/projects/srilm</td><td/></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Statistics of the comparable data used for comparable feature estimation.", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "\u2021 46.10 \u2020 \u2021 46.00 \u2020 \u2021 46.26 \u2020", |
|
"html": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"3\">+Contextual+Topical+Temporal +All</td></tr><tr><td>Baseline</td><td/><td>45.45</td></tr><tr><td colspan=\"2\">Klementiev+ 43.69</td><td>45.72</td><td>45.05 45.92</td></tr><tr><td>Proposed</td><td>45.56</td><td/></tr></table>" |
|
} |
|
} |
|
} |
|
} |