|
{ |
|
"paper_id": "Y15-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:41:39.232715Z" |
|
}, |
|
"title": "Translation of Unseen Bigrams by Analogy Using an SVM Classifier", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Waseda University", |
|
"location": { |
|
"addrLine": "2-7 Hibikino, Wakamatsu-ku", |
|
"postCode": "808-0135", |
|
"settlement": "Kitakyushu", |
|
"region": "Fukuoka", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Lyu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Waseda University", |
|
"location": { |
|
"addrLine": "2-7 Hibikino, Wakamatsu-ku", |
|
"postCode": "808-0135", |
|
"settlement": "Kitakyushu", |
|
"region": "Fukuoka", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Lepage", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Waseda University", |
|
"location": { |
|
"addrLine": "2-7 Hibikino, Wakamatsu-ku", |
|
"postCode": "808-0135", |
|
"settlement": "Kitakyushu", |
|
"region": "Fukuoka", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "yves.lepage@waseda.jp" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Detecting language divergences and predicting possible sub-translations is one of the most essential issues in machine translation. Since the existence of translation divergences, it is impractical to straightforward translate from source sentence into target sentence while keeping the high degree of accuracy and without additional information. In this paper, we investigate the problem from an emerging and special point of view: bigrams and the corresponding translations. We first profile corpora and explore the constituents of bigrams in the source language. Then we translate unseen bigrams based on proportional analogy and filter the outputs using an Support Vector Machine (SVM) classifier. The experiment results also show that even a small set of features from analogous can provide meaningful information in translating by analogy.", |
|
"pdf_parse": { |
|
"paper_id": "Y15-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Detecting language divergences and predicting possible sub-translations is one of the most essential issues in machine translation. Since the existence of translation divergences, it is impractical to straightforward translate from source sentence into target sentence while keeping the high degree of accuracy and without additional information. In this paper, we investigate the problem from an emerging and special point of view: bigrams and the corresponding translations. We first profile corpora and explore the constituents of bigrams in the source language. Then we translate unseen bigrams based on proportional analogy and filter the outputs using an Support Vector Machine (SVM) classifier. The experiment results also show that even a small set of features from analogous can provide meaningful information in translating by analogy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Over the last decade, phrase-based statistical machine translation (Koehn et al., 2003) systems have demonstrated that they can produce reasonable quality when ample training data is available, especially for language pairs with similar word order. However, the PB-SMT model has not yet been capable of satisfying the various translation tasks for very different languages (Isozaki et al., 2010) . The existence of translation divergences makes the straightforward transfer from source sentences into target sentences hard. Though many previous pieces of work (Dorr, 1994; Habash et al., 2002; Dorr et al., 2004) have attempted to take account for divergences and to deal with this linguistic problem using various translation approaches. This paper further inquires the topic.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 87, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 395, |
|
"text": "(Isozaki et al., 2010)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 572, |
|
"text": "(Dorr, 1994;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 593, |
|
"text": "Habash et al., 2002;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 612, |
|
"text": "Dorr et al., 2004)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since sentence consists of bigrams, instead of analysing the syntactic structures of the whole sentence or part of the sentence as in (Ding and Palmer, 2005) , we explore the possibilities of translating unseen bigrams based on an analogy learning method. We investigate the coverage of translated bigrams in the test set and inspect the probability of translating a bigram using analogy. Analogical learning has been investigated by several authors. To cite a few, Lepage et al. (2005) showed that proportional analogy can capture some syntactic and lexical structures across languages. Langlais et al. (2007) investigated the more specific task of translating unseen words. Bayoudh et al. (2007) explored generating new learning examples from very scarce original learning data using analogy to train an SVM classifier. Dandapat et al. (2010) performed transliteration by analogical learning for English-to-Hindi.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "(Ding and Palmer, 2005)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 486, |
|
"text": "Lepage et al. (2005)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 610, |
|
"text": "Langlais et al. (2007)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 697, |
|
"text": "Bayoudh et al. (2007)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 822, |
|
"end": 844, |
|
"text": "Dandapat et al. (2010)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the issue of translation using analogy, one of the main drawbacks should be addressed is the problem of \"over-generative\". Analogy is able to capture the most divergences of translation in the most cases, yet it generates a great number of solutions that are ungrammatical and incorrect. In this paper, we propose to translate useen bigrams as reconstructing with the principle of analogy learning. In machine learning, SVMs have been shown that it is efficient in performing a non-linear classification. By specifying features used in experiment, we employ an SVM classifier to fast filter the solutions output by the analogy solver. The final goal of this research is to explore the possibility of translation using analogy and point out a feasible way to solve the problem of \"over-generative\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows: Section 2 describes basic notions in alignment and analogy. In Section 3, we explore the classification of bigrams and their contributions to the whole corpus and report some profiling results. Section 4 presents our approach, depending on the analogous, and describes how to processing the data and extract examples for training an SVM classifier. We also evaluate the result using the some standard measures. Finally, in Section 5, conclusions and perspectives are presented.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, from a theoretical point of view, we study the categories of word alignment in translating. Given a sentence, various alignments of bigram exist. The following is an example of nonmonotonic alignments where alignment links are crossing between parallel sentences (Japanese and English): e: He 1 saw 2 a cat 3 with a long 4 tail 5 . j: Kare ha 1 nagai 4 sippo no 5 neko wo 3 mita 2 . e: He long tail of cat saw", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic notions 2.1 Alignment classification", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this example, e means an original English sentence in parallel texts, j means a Japanese sentence, and\u1ebd means an amended English sentence which is better for translation parameter training with j. The phrases with the same index are aligned. Based on these two sentences, different categories of alignments have been identified. For each category, examples are given: According to whether the translation is continuous or not, we divide the alignments into 2 categories: 1. both the n-gram and its translation in the target language are continuous. 2. the translation in the target language contains gaps because of syntactic divergence (Dorr et al., 2004) . We define \"[X]\" to stand for gaps in the target side as denoted by (Chiang, 2005) (1) long tail to nagai sippo. -Bigram-to-unigram the bigram corresponds to a unigram, e.g., (3) a cat to neko. -Crossing-N-gram the translation is continuous, but in a different order, e.g., (2) cat with to no neko.", |
|
"cite_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 659, |
|
"text": "(Dorr et al., 2004)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 743, |
|
"text": "(Chiang, 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic notions 2.1 Alignment classification", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Discontinuous Alignment -Bigram-to-N-gram-with-gaps a large number of translations in the target language are not continuous. This is a common phenomenon is illustrated by (4). he saw to kara wa [X] mita. -Crossing-N-gram-with-gaps the bigram was aligned with dis-continuous words with gaps in the middle, at same time, the translation is in a different order, e.g., (5). sipo no neko to cat [X] tail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic notions 2.1 Alignment classification", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we describe employing analogy to deal with diverse alignments for bigram translation. We follow (Turney, 2006) to describe the basic notions of proportional analogy used in this work. Verbal analogies are often written A : B :: C : D. They meaning A is to B as C is to D. For example: annual taxes : annual statistics :: the taxes : the statistics The above example can be understood as follows: we reconstruct an unseen bigram annual taxes by a triple of known bigrams. All the elements in the unseen bigram is taken by similarity from the second (annual statistics) and third (the taxes) known bigrams and put together by difference with the fourth known bigram (the statistics). The definition of proportional analogy that we use in this paper is drawn from (Lepage, 1998) and we focus in this study on formal proportional analogies. A 4-tuple of n-grams A, B, C and D is said to be a proportional analogy if the following 3 constraints are verified. The lengths of the n-grams may be different, but should meet the following constraints:", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 127, |
|
"text": "(Turney, 2006)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 778, |
|
"end": 792, |
|
"text": "(Lepage, 1998)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proportional analogy", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1. |A| a + |D| a = |C| a + |B| a , \u2200a 2. d(A, B) = d(C, D) 3. d(A, C) = d(B, D)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proportional analogy", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where d is the edit distance that counts the minimal number of insertions and deletions that are necessary to transform a string into another string. |A| a is the number of occurrences of the word a in the n-gram A. This approach still works well on different length of n-grams in fact. However, this method is a necessary condition but not sufficient when applying to translation issue.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proportional analogy", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As for bilingual translation using analogy, Denoual et al. (2007) presented a parallelopiped view on translating unknown words using analogy, we expand it to bigrams (see Figure 2 ). Suppose that we want to translate the following bigram (English): annual taxes into French, in order to translate the unknown bigram, bilingual proportional analogy requires a triple of source bigrams and corresponding translations. This procedure can be splitted into 2 steps:", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 65, |
|
"text": "Denoual et al. (2007)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 179, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proportional analogy", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1. reconstruct unseen bigram with a triple of source bigrams 2. translate using analogy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proportional analogy", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Given a bigram, it can be reconstructed using other n-grams via different reconstruction patterns. For instance, we can rebuild the bigram: annual taxes in following several ways:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bigram reconstruction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Pattern 1: ab : ac :: db : dc annual income statistics annual taxes is reconstructed with different n-grams extracted from the training corpus. Beside these 5 Patterns, analogy in general can capture other various patterns in natural language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bigram reconstruction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We restrict to Pattern 1 in reconstructing of source bigrams because this Pattern contains more information of context and crossing-language alignment. On the contrary, we allow all Patterns in the target side as we want to collect as many translations as possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bigram reconstruction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The problem that we define is, given an unseen bigram A in the source languages, supposing we have known an alignment between n-gram and its translation which is represented by a, we want to find the appropriate template T i , to adapt the synchronous analogy and finally generate the target\u00c3 successfully. We formalize analogical deduction as following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "A : B i :: C j : x (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Assume the previous analogical equation has a solution x. We define the case when x belongs to the training set as \"reconstructible\". \u03d5(.) is the trans- lation function, bidirectional analogical deduction also requires to repeat this operation with all target translations corresponding to the source bigrams in the opposite direction. In other words, satisfies following equation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2203(B m , C n , D k ) \u2208 \u03d5(B i ) \u00d7 \u03d5(C i ) \u00d7 \u03d5(x)/ (2) \u2203y/y : B m :: C n : D k", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We define \"bidirectional reconstructible\" as when input an unseen bigram and finally it outputs the solution as y. In this model, a stands alignment between source language bigram and its translation in target language, a \u21d4 (X, X ), if the alignment (A, y) appears in the test set (as \u2203y \u2208 \u03d5(A)), we recognize the output as the translation, called \"attested translation\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The Figure 3 describes this procedure and Figure 4 shows the details about constituents of bigrams. Since the proceeding of the whole produce of analogical derivation is very time-consuming, in order to evaluate the ceiling coverage of \"attested translation\", we conduct the synchronous parsing for fast obtaining the examples. It is easy to obtain the alignments between A and A in the test set with some automatic aligners. From a bigram A and its translation A , for each elements in source side and with all relevant of bigrams B, C from the source part of the bicorpus, if there also exists the translations B , C , we can reduce the remaining D and D which is described as following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 12, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 42, |
|
"end": 51, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(A, A ):(B i , B m )::(C j , C n )\u21d2(D, D )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "If finally we find D and D at the end of this equation are linked, we consider that from A it can arrive to A successfully.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation by analogy", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We first profile the test set by exploring the proportion of unseen bigrams in the source language. Then we investigate the reconstructiblility/bidirectional reconstructibility of unseen bigrams in the source language. Finally, we estimate the maximum of attested translation bigrams using this analogy-based approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data profiling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use the Europarl Corpora 1 (Koehn, 2005) to prepare the classification examples used to train and test the SVM classifier. We split the corpus into two parts: a training set and a test set. A set of 100,000 sentences which lengths less than 30 with the French translation are extracted as the training set. We also sample a set of 10,000 sentences from the remaining corpus not contained in training set as the test set. This corpus only offers aligned texts, however, it does not provide word alignment information for each language pair. Table 1 shows some statistic of bigrams and the proportion of unseen bigrams in the experiment data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 43, |
|
"text": "(Koehn, 2005)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 550, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data preprocessing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Before reconstructing, we preprocess to obtain word-to-word alignments. Our work is based on the dominant method to obtain word alignment, which trained from the Expectation Maximization (EM) algorithm. To extract the word alignment, EM algorithm will be utilized to train the bilingual corpus for several iterations, and then phrase pairs that are consistent with this word alignment will be extracted. We align the words automatically relying on the GIZA++ 2 (Ochet al., 2003) implementation of the IBM Models in Moses toolkit (Koehn et al., 2007) , running the algorithm in both directions, source to target and target to source.", |
|
"cite_spans": [ |
|
{ |
|
"start": 461, |
|
"end": 478, |
|
"text": "(Ochet al., 2003)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 549, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-to-word alignment", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The heuristics applied to obtain a symmetrized alignment in this step is grow-diag-final-and, it starts with the intersection of directional word alignments and enrich it with alignment points from the union. We employ this algorithm to obtained alignment, and from that we extract the continuous bigrams and their aligned targets directly from the alignment files. At same time, an aligned test set was build as the golden reference using the same approach. \"aligned\" means it is aligned by GIZA++. Table 3 : Distribution of bigrams, e.g., unaligned and aligned in the training data. More than 90% of unseen bigrams can be reconstructed.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 500, |
|
"end": 507, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word-to-word alignment", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Though the most of bigrams are reconstructible, not all bigrams belonging to this set can really generate a solution (case of BR) as same as the aligned translations in the target language. That is a quiet interesting and rifeness phenomenon in the most cases (case of \u00acBR). We implement bilingual synchronizing parsing to quickly search the reusable and useful templates (case of attested translation). As the matter of fact, though not all final solution are acceptable, we are aiming at to bound the mount of successful analogy in total. The statistics are provided in the following.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reconstructiblity", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Templates Table 4 : Distribution of bigrams, e.g., attested translation and unattested translation using analogy, it means more than 3/4 (66.37%+11.16%) of bigrams are attested translation only referring to the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 17, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Negative Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "T s : (B i , B m ) ,(C j , C n ), (D, D )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Negative Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since the proportional analogy for translation mapping is the necessary condition but not sufficient, identifying the correct translation via proportional analogy with some machine learning approaches is very necessary. In the following, we will describe how we collect the examples and from them to extract the features to train the SVM classifier. It implements the estimating-processing by using the specified features: independent features from (A, A ) as well as relative features from analogical templates of (B i , B m ), (C j , C n ), (D, D ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM Classifier", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For classifying the outputs as correct translation or not, the software LIBSVM 3 (Chang et al., 2012) in used, which is an integrated software comes with scripts that automate normalization of the features and optimization of the \u03b3 and C parameters. We still need to restrict the features to feed it for training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 101, |
|
"text": "(Chang et al., 2012)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Lexical Weighting: the direct lexical weighting P lex (e|f ) and inverse lexical weighting P lex (f |e) for (A, A ). Given a word alignment a, we apply the formula of IBM Model 1 to compute the lexical translation probability of a phrase e given the foreign phrase f as (Koehn et al., 2003) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 290, |
|
"text": "(Koehn et al., 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P lex (e|f, a) = I i=1 1 {j|(i, j) \u2208 a} \u2200(i,j)\u2208a w(e i |f j ) (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, we compute the score as the following equation without the word alignment:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P lex (e|f ) = 1 I I i=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "log max", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "{j|\u2200(i,j)\u2208a}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "{w(e i |f j )}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(5) Length: the lengths of A in words, '[X]' should not be recognized as a word, because it can be \u03b5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Frequency: we compile the data with the suffix array for fast searching (Lopez, 2007) . We calculate the frequency of occurrence for each n-gram generated by analogy in French (with/without gaps). The complete French subset of Europarl corpus is used as the reference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 85, |
|
"text": "(Lopez, 2007)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Reference (French MutualInformation: It is considered as the most widely used measure in extraction of collocations. We only compute the score only for A as following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "I(X) = log p(w 1 , w 2 , .., w m ) m i=1 p(w i )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Relative Features LexicalWeight: the lexical weightings of (B i , B m ), (C j , C n ) and (D, D ) in both directions (direct lexical weighting P lex (e|f ) and inverse phrase translation probabilities P lex (f |e).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Blue triangles stand positive examples and red circles stand negative examples. We found that the output with the balanced template in lexical weighting does not mean it has the larger probability to be a positive examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Length: the lengths of B m , C n and D in words, \"[X]\" should not be recognized as a word, because it can be \u03b5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Frequency: the occurrences of B i , C j and D and same to targets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dice's coefficient: Dice coefficient measures the presence/absence of data between to phrases, where |X| and |Y | are the number of words in set X and Y , respectively, and |X \u2229Y | is the number of words shared by the two set. We import the following formula to compute the score of Dice coefficient among B , C and D , e.g.:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Dice(X, Y ) = 2|X \u2229 Y | |X| + |Y |", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "MutualInformation: This measures the cooccurrence phrases mutual dependence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "x stands the word in source bigram and y stands the word in the solution of analogy. p(x, y) is the word-to-word translation probability. p(.) is the probability distribution function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "I(X, Y ) = x,y p(x, y) log p(x, y) p(x)p(y)", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "\u2022 Independent Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As we treat verifying analogy output as a binary classification problem, we obtained various outputs from analogy engine for each bigram. \u03d5(.) is the translation function, we label the training examples as in (5):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem formulation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y = 1, if A \u2208 \u03d5(A) 0, if A / \u2208 \u03d5(A)", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Problem formulation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "Each instance is associated with a set of features that have been discussed in the previous section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem formulation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "The bilingual-crossing examples are generated by the previous script depends on the alignment output by GIZA++. During training of the SVM classifier, positive and negative instances of examples are generated from the subset of attested translation and unusable templates in the middle of analogy proceeding. We also build a test set to validate the accuracy of such a classifier. Test 1k 1k 2k Training 5k 5k 10k Table 7 : Size of the examples used as the test set and training set in the experiment.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 430, |
|
"text": "Test 1k 1k 2k Training 5k 5k 10k Table 7", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental settings", |
|
"sec_num": "3.4.3" |
|
}, |
|
{ |
|
"text": "To test the performance of our approach we focus on the accuracy of the results. We first sample 2k examples as test data (as in Table 7 ). During training the SVM classifier determines a maximum margin hyperplane between the positive and negative examples. We measure the quality of the classification by precision and recall. Let C be the set of output predictions. We standardly define precision P, recall R and F-measure as in (10):", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P = C tp C tp + C f p , R = C tp C tp + C f n , F = 2P R P + R", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "It should be noted that the number of examples for training are different for the systems of different language pairs. Because we are interested in the possibilities of found translation, we used the standard accuracy measure to evaluate the performance of classifier on the test set:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "accuracy = C tp + C tn C", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "where C tp is the counts of true-positive and C tn is the counts of true-negative. C is the total counts of candidates. We show the details of evaluation scores in Table 8 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 171, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "In this paper we have performed an investigation on translating unseen bigrams in MT by employing an analogy-based method empirically, which has never been explored. We investigated the maximum possible coverage of bilingual reconstructible bigrams in the test and the probabilities when a bigram is attested translation by using the analogy. As can be noticed from the presented results, after importing the features of templates which are used in analogy diveration, the performance of SVM classifier improves. In other words, it means that the analogous information has the positive effects on classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future works", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Though the accuracy is not as high as we expected, there are some reason can explain it, first, even the alignment output by GIZA++ is still so far from completely correct, and second, the used features are very simple. Moreover, without the contextual information, this result should be acceptable. The results suggest lexical weighting and mutual information contribute most to identifying the correct translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future works", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Another should be addressed that bigrams translation is the most difficult in analogy-based machine translation. If a bigram is attested translation, unquestionable, it will help the longer n-grams translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future works", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The future works should focus on identifying the proper longer chunk/phrase translations using the similar approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future works", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/europarl/archives.html#v3 PACLIC 29", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/moses/giza/GIZA++.html PACLIC 29", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.csie.ntu.edu.tw/ cjlin/libsvm/ PACLIC 29", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported in part by China Scholarship Council (CSC) under the CSC Grant No.201406890026 is acknowledged. We also thank the anonymous reviewers for their insightful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Measures of the amount of ecologic association between species", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Dice", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1945, |
|
"venue": "Ecology", |
|
"volume": "26", |
|
"issue": "3", |
|
"pages": "297--302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dice, L.R. 1945. Measures of the amount of ecologic association between species. Ecology, Vol.26, No.3, pp.297-302.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Machine translation divergences: A formal description and proposed solution", |
|
"authors": [ |
|
{ |
|
"first": "Bonnie", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dorr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Computational Linguistics", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "597--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bonnie J. Dorr. 1994. Machine translation divergences: A formal description and proposed solution. Compu- tational Linguistics.20.4: pp.597-633.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Translating unknown Words by Analogical Learning", |
|
"authors": [ |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Langlais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Patry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "EMNLP/CoNLL'07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "877--886", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philippe Langlais and Alexandre Patry 2007. Trans- lating unknown Words by Analogical Learning. In EMNLP/CoNLL'07, pages 877-886, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Mitigating problems in analogy-based ebmt with smt and vice versa: a case study with named entity transliteration", |
|
"authors": [ |
|
{ |
|
"first": "Sandipan", |
|
"middle": [], |
|
"last": "Dandapat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Morrissey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudip", |
|
"middle": [], |
|
"last": "Kumar Naskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harold", |
|
"middle": [], |
|
"last": "Somers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "24th Pacific Asia Conference on Language Information and Computation (PACLIC'10)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "365--372", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandipan Dandapat, Sara Morrissey, Sudip Kumar Naskar, and Harold Somers. 2010. Mitigating prob- lems in analogy-based ebmt with smt and vice versa: a case study with named entity transliteration. In 24th Pacific Asia Conference on Language Information and Computation (PACLIC'10), pages 365-372, Sendai, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Using bigrams in text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Bekkerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "1003", |
|
"issue": "", |
|
"pages": "1--2", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ron Bekkerman and James Allan. Using bigrams in text categorization. Department of Computer Science, University of Massachusetts, Amherst 1003 (2004): 1- 2.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "23", |
|
"issue": "", |
|
"pages": "377--403", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekai Wu. 1997. Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational linguistics.23.3: pp.377-403.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Solving analogies on words: an algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Lepage", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yves Lepage. 1998. Solving analogies on words: an algorithm. Proceedings of the 36th Annual Meet- ing of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics-Volume 1. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Improved cross-language retrieval using backoff translation", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Oard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gina", |
|
"middle": [], |
|
"last": "Levow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Proceedings of the First International Conference on Human Language Technology Research (HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Resnik, Douglas Oard and Gina Levow. 2001. Im- proved cross-language retrieval using backoff transla- tion. In Proceedings of the Proceedings of the First International Conference on Human Language Tech- nology Research (HLT).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Handling translation divergences: Combining statistical and symbolic techniques in generation-heavy machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nizar Habash and Bonnie Dorr. 2002. Handling transla- tion divergences: Combining statistical and symbolic techniques in generation-heavy machine translation. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A system- atic comparison of various statistical alignment mod- els. Computational linguistics29.1, pp.19-51.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A best-first alignment algorithm for automatic extraction of transfer mappings from bilingua corpora. Recent advances in example-based machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Arul", |
|
"middle": [], |
|
"last": "Menezes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "421--442", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arul Menezes and Stephen D. Richardson. 2003. A best-first alignment algorithm for automatic extrac- tion of transfer mappings from bilingua corpora. Re- cent advances in example-based machine translation. Springer Netherlands. pp.421-442.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Divergence Unraveling for Word Alignment of Parallel Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Natural Language Engineering", |
|
"volume": "1", |
|
"issue": "1", |
|
"pages": "1--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bonnie Dorr, Necip Fazil Ayan and Nizar Habash. 2004. Divergence Unraveling for Word Alignment of Parallel Corpora. Natural Language Engineering, 1 (1), pp.1- 17.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Statistical phrase-based translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Franz Josef Och and Daniel Marcu 2003. Statistical phrase-based translation. Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Hu- man Language Technology-Volume 1.Association for Computational Linguistics", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Kenji Yamada, Philippe Langlais and Arne Mauser", |
|
"authors": [ |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Simard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Cancedda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruno", |
|
"middle": [], |
|
"last": "Cavestro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Dymetman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Gaussier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cyril", |
|
"middle": [], |
|
"last": "Goutte", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing (HLT-EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "755--762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michel Simard, Nicola Cancedda, Bruno Cavestro, Marc Dymetman, Eric Gaussier, Cyril Goutte, Kenji Ya- mada, Philippe Langlais and Arne Mauser. 2005. Translating with non-contiguous phrases In Proceed- ings of the Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing (HLT-EMNLP). Association for Computational Linguistics, pp.755-762.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Scaling phrase-based statistical machine translation to larger corpora and longer phrases", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Bannard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, Colin Bannard and Josh Schroeder. 2005. Scaling phrase-based statistical machine trans- lation to larger corpora and longer phrases. Proceed- ings of the 43rd Annual Meeting on Association for Computational Linguistics. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Machine translation using probabilistic synchronous dependency insertion grammars", |
|
"authors": [ |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuan Ding and Martha Palmer. 2005. Machine trans- lation using probabilistic synchronous dependency in- sertion grammars. Proceedings of the 43rd Annual Meeting on Association for Computational Linguis- tics. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Corpusbased learning of analogies and semantic relations. Machine Learning60", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Littman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "251--278", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney and Michael L. Littman. 2005. Corpus- based learning of analogies and semantic relations. Machine Learning60.1-3 (2005): pp.251-278.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Europarl: A parallel corpus for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "MT summit", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. MT summit. Vol. 5", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "N-gram-based versus phrasebased statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Crego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Jussa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Mari\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Fonollosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the International Workshop on Spoken Language Technology (IWSLT'05)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. M. Crego, M. R. Costa-Jussa, J. B. Mari\u00f1o and J. A. Fonollosa. 2005. N-gram-based versus phrasebased statistical machine translation In Proceedings of the International Workshop on Spoken Language Technol- ogy (IWSLT'05).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "An Ngram-based statistical machine translation decoder", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Josep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Crego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Jos\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adri\u00e0", |
|
"middle": [], |
|
"last": "Mari\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gispert", |
|
"middle": [], |
|
"last": "De", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of the 9th European Conference on Speech Communication and Technology (Interspeech'05)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Josep M. Crego, Jos\u00e9 B. Mari\u00f1o and Adri\u00e0 de Gispert. 2005. An Ngram-based statistical machine transla- tion decoder. Proc. of the 9th European Confer- ence on Speech Communication and Technology (In- terspeech'05).", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A hierarchical phrase-based model for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics.Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Chiang. 2005. A hierarchical phrase-based model for statistical machine translation Proceedings of the 43rd Annual Meeting on Association for Computa- tional Linguistics.Association for Computational Lin- guistics", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Similarity of semantic relations Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "379--416", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney 2006. Similarity of semantic relations Computational Linguistics.32(2):pp.379-416.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Word-based alignment, phrase-based translation: What's the link", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of AMTA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Lopez and Philip Resnik. 2006. Word-based alignment, phrase-based translation: What's the link. Proc. of AMTA.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Analogical translation of unknown words in a statistical machine translation framework", |
|
"authors": [ |
|
{ |
|
"first": "Etienne", |
|
"middle": [ |
|
"Denoual" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ". ; Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Etienne Denoual. 2007. Analogical translation of unknown words in a statistical machine translation framework. Proceedings of Machine Translation Sum- mit XI. Copenhagen Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, et al. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions. Association for Compu- tational Linguistics, pp.177-180", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning a classifier with very few examples: analogy based and knowledge based generation of new examples for character recognition. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Sabri", |
|
"middle": [], |
|
"last": "Bayoudh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harold", |
|
"middle": [], |
|
"last": "Mouch\u00e8re", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Miclet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "527--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabri Bayoudh, Harold Mouch\u00e8re, Laurent Miclet and E. Anquetil. 2007. Learning a classifier with very few examples: analogy based and knowledge based gener- ation of new examples for character recognition. Ma- chine Learning: ECML 2007, pp.527-534.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Hierarchical Phrase-Based Translation with Suffix Arrays", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "976--985", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Lopez. 2007. Hierarchical Phrase-Based Transla- tion with Suffix Arrays. EMNLP-CoNLL, pp.976-985", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A uniform approach to analogies, synonyms, antonyms, and associations", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 22nd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "527--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D Turney. 2008. A uniform approach to analo- gies, synonyms, antonyms, and associations. Proceed- ings of the 22nd International Conference on Compu- tational Linguistics (Coling 2008), pp.527-534.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Using an SVM Classifier to Improve the Extraction of Bilingual Terminology from Wikipedia. User-Contributed Knowledge and Artificial Intelligence: An Evolving Synergy", |
|
"authors": [ |
|
{ |
|
"first": "Maike", |
|
"middle": [], |
|
"last": "Erdmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kotaro", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takahiro", |
|
"middle": [], |
|
"last": "Hara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shojiro", |
|
"middle": [], |
|
"last": "Nishio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maike Erdmann, Kotaro Nakayama, Takahiro Hara and Shojiro Nishio . 2009. Using an SVM Classifier to Improve the Extraction of Bilingual Terminology from Wikipedia. User-Contributed Knowledge and Artifi- cial Intelligence: An Evolving Synergy, pp.15.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The 'purest' EBMT system ever built: no variables, no templates, no training, examples, just examples, only examples", |
|
"authors": [ |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Lepage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Denoual", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the MT Summit X, Second Workshop on Example-Based Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yves Lepage and Etienne Denoual. 2005. The 'purest' EBMT system ever built: no variables, no templates, no training, examples, just examples, only examples. Proceedings of the MT Summit X, Second Workshop on Example-Based Machine Translation, pp.81-90.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "The structure of unseen trigrams and its application to language models: A first investigation", |
|
"authors": [ |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Lepage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Gosme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Lardilleux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Universal Communication Symposium (IUCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "944--952", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yves Lepage, Julien Gosme and Adrien Lardilleux. 2010. The structure of unseen trigrams and its appli- cation to language models: A first investigation. Uni- versal Communication Symposium (IUCS), 2010 4th International. IEEE. pp.944-952.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Automatic evaluation of translation quality for distant language pairs", |
|
"authors": [ |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Isozaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsutomu", |
|
"middle": [], |
|
"last": "Hirao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katsuhito", |
|
"middle": [], |
|
"last": "Sudoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hajime", |
|
"middle": [], |
|
"last": "Tsukada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "944--952", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh, and Hajime Tsukada. 2010. Automatic evalu- ation of translation quality for distant language pairs. Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing Association for Computational Linguistics. pp.944-952.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Automatic Bilingual Phrase Extraction from Comparable Corpora. COLING", |
|
"authors": [ |
|
{ |
|
"first": "Ahmet", |
|
"middle": [], |
|
"last": "Aker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Gaizauskas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmet Aker, Yang Feng and Robert J. Gaizauskas. 2012. Automatic Bilingual Phrase Extraction from Compa- rable Corpora. COLING. pp.23-32.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "LIBSVM: a library for support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "ACM transactions on intelligent systems and technology", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chang, C. C., and C. J. Lin. 2012. LIBSVM: a library for support vector machines. ACM transactions on in- telligent systems and technology. 2: 27: 1-27: 27.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Various Alignments found in the experiment corpus, \"[X]\" stands gaps between words.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "View of the harmonization parallelopiped: four terms in each language form a monolingual proportional analogy. ab : b :: ac : c annual taxes : taxes :: annual statistics : statistics Pattern 3: ab : a :: db : d annual taxes : annual :: the taxes : the Pattern 4: ab : db :: ac : dc annual taxes : the taxes :: annual statistics : the statistics Pattern 5: ab : aeb :: ac : aec annual taxes :", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": ": B \u2032 m :: C \u2032 n : D \u2032 kBilingual analogical reduction for the bigram from the input annual taxes (English) to the output imp\u00f4ts annuels (French), the related analogous and its translation are indicated in thefigure.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Logic binary tree for the problem of analogy and bidirectional analogy in the source language, \"not found alignment\" means the known bigrams that have not been aligned in the training set.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Statistics on the English-French parallel corpus used for the training and test sets, it also indicates the statistics of unseen bigrams in the test set.", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">bigrams proportion</td></tr><tr><td>known</td><td>\u00ac found alignment found alignment</td><td>995 45,527</td><td>1.45% 66.37%</td></tr><tr><td>unseen</td><td>reconstructible unreconstructible</td><td>20,056 2,022</td><td>29.14% 2.95%</td></tr><tr><td>Total</td><td/><td>68,600</td><td>100.00%</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Statistics on the aligned and unaligned bigrams in data, it also indicates GIZA++ can not align all words in the source language after grow-diag-final-and.", |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">reconstructible</td></tr><tr><td/><td/><td>BR</td><td>\u00acBR</td></tr><tr><td/><td colspan=\"2\">attested unattested</td><td>total</td></tr><tr><td>bigrams</td><td>7,659</td><td colspan=\"2\">10,347 18,006 2,050</td></tr><tr><td colspan=\"2\">proportion 11.16%</td><td colspan=\"2\">15.09% 26.25% 2.99%</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Samples of bigrams and related analogical templates, according (B i , B m ), (C j , C n ), (D, D ), the translation A is produced. Both positive and negative examples are presented in the table.", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Statistics on the French monolingual corpus used as reference.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |