|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:06:43.185030Z" |
|
}, |
|
"title": "The JHU Submission to the 2020 Duolingo Shared Task on Simultaneous Translation and Paraphrase for Language Education", |
|
"authors": [ |
|
{ |
|
"first": "Huda", |
|
"middle": [], |
|
"last": "Khayrallah", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Bremerman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Arya", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Mccarthy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Winston", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University \u00a7 University of Maryland", |
|
"location": { |
|
"settlement": "College Park" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents the Johns Hopkins University submission to the 2020 Duolingo Shared Task on Simultaneous Translation and Paraphrase for Language Education (STAPLE). We participated in all five language tasks, placing first in each. Our approach involved a language-agnostic pipeline of three components: (1) building strong machine translation systems on general-domain data, (2) finetuning on Duolingo-provided data, and (3) generating n-best lists which are then filtered with various score-based techniques. In addition to the language-agnostic pipeline, we attempted a number of linguistically-motivated approaches, with, unfortunately, little success. We also find that improving BLEU performance of the beam-search generated translation does not necessarily improve on the task metric-weighted macro F1 of an n-best list.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents the Johns Hopkins University submission to the 2020 Duolingo Shared Task on Simultaneous Translation and Paraphrase for Language Education (STAPLE). We participated in all five language tasks, placing first in each. Our approach involved a language-agnostic pipeline of three components: (1) building strong machine translation systems on general-domain data, (2) finetuning on Duolingo-provided data, and (3) generating n-best lists which are then filtered with various score-based techniques. In addition to the language-agnostic pipeline, we attempted a number of linguistically-motivated approaches, with, unfortunately, little success. We also find that improving BLEU performance of the beam-search generated translation does not necessarily improve on the task metric-weighted macro F1 of an n-best list.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The Duolingo 2020 STAPLE Shared Task (Mayhew et al., 2020) focuses on generating a comprehensive set of translations for a given sentence, translating from English into Hungarian, Japanese, Korean, Portuguese, and Vietnamese. The formulation of this task ( \u00a72) differs from the conventional machine translation setup: instead of the n-gram match (BLEU) against a single reference, sentence-level exact match is computed between a list of proposed candidates and a weighted list of references (as in Figure 1 ). The set of references is drawn from Duolingo's language-teaching app. Any auxiliary data is allowed for building systems, including existing very-large parallel corpora for translation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 58, |
|
"text": "(Mayhew et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 507, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our approach begins with strong MT systems ( \u00a73) which are fine-tuned on Duolingo-provided data ( \u00a74). We then generate large n-best lists, from which we select our final candidate list ( \u00a75). Our Figure 1: An example English source sentence with its weighted Portuguese target translations. The objective of the task is to recover the list of references, and performance is measured by a weighted F-score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "entries outperform baseline weighted F1 scores by a factor of 2 to 10 and are ranked first in the official evaluation for every language pair ( \u00a76.2). In addition to our system description, we perform additional analysis ( \u00a77). We find that stronger BLEU performance of the beam-search generated translation is not indicative of improvements on the task metric-weighted macro F1 of a set of hypotheses-and suggest this should encourage further research on how to train NMT models when n-best lists are needed ( \u00a77.1). We perform detailed analysis on our output ( \u00a77.2), which led to additional development on English-Portuguese ( \u00a78.1). We also present additional linguistically-informed methods which we experimented with but which ultimately did not improve performance ( \u00a78).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Data We use data provided by the STAPLE shared task (Mayhew et al., 2020) . This data consists of a single English prompt sentence or phrase paired with multiple translations in the target lan-hu ja ko pt vi total prompts 4,000 2,500 2,500 4,000 3,500 mean translations 63 342 280 132 56 median translations 36 192 154 68 30 STD. translations 66 362 311 150 62 Table 1 : Statistics over the Duolingo-provided data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 73, |
|
"text": "(Mayhew et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 381, |
|
"text": "3,500 mean translations 63 342 280 132 56 median translations 36 192 154 68 30 STD. translations 66 362 311 150 62 Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "guage. These translations come from courses intended to teach English to speakers of other languages; the references are initially generated by trained translators, and augmented by verified user translations. Each translation is associated with a relative frequency denoting how often it is selected by Duolingo users. Table 1 shows the total number of prompts provided as well as the mean, median, and standard deviation of the number of translations per training prompt. All of the provided task data is lower-cased. For each language pair, we created an internal split of the Duolingo-provided training data: 100 training prompts for use in validating the MT system (JHU-VALID), another 100 intended for model selection (JHU-DEV), 1 and a 300-prompt test set for candidate selection (JHU-TEST). The remaining data (JHU-TRAIN) was used for training the MT models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 327, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The official metric is weighted macro F 1 . This is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Weighted Macro F 1 = s\u2208S Weighted F 1 (s) |S| ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where S is all prompts in the test corpus. The weighted F1 is computed with a weighted recall, where TP s are the true positives for a prompt s, and FN s are the false negatives for a prompt s:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "WTP s = t\u2208TP s weight(t) WFN s = t\u2208FN s weight(t) Weighted Recall(s) = WTP s WTP s + WFN s .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that recall is weighted (according to weights provided with the gold data), but precision is not. Evaluation is conducted on lowercased text with the punctuation removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Additional data for our systems was obtained from Opus (Tiedemann, 2012) . 2 We removed duplicate bitext pairs, then reserved 3k random pairs from each dataset to create a validation, development, and test sets of 1k sentence each. The validation dataset is used as held-out data to determine when to stop training the MT system. 3 Table 2 shows the amount of training data used from each source.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 72, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 76, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 339, |
|
"text": "Table 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data and preprocessing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Duolingo data (including the evaluation data) is all lowercased. Since our approach is to overgenerate candidates and filter, we want to avoid glutting the decoder beam with spurious cased variants. For this reason, we lowercase all text on both the source and (where relevant) target sides prior to training. However, it is worth noting that this has a drawback, as source case can provide a signal towards meaning and word-sense disambiguation (e.g., apple versus Apple).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and preprocessing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "After lowercasing, we train separate Sentence-Piece models (Kudo and Richardson, 2018) on the source and target sides of the bitext, for each language. We train a regularized unigram model (Kudo, 2018) with a vocabulary size of 5,000 and a character coverage of 0.995. When applying the model, we set \u03b1 = 0.5. No other preprocessing was applied.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 86, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 201, |
|
"text": "(Kudo, 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and preprocessing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used fairseq (Ott et al., 2019) to train standard Transformer (Vaswani et al., 2017 ) models with 6 encoder and decoder layers, a model size of 512, feed forward layer size of 2048, and 8 attention heads, and a dropout of 0.1. We used an effective batch size of 200k tokens. 4 We concatenated the development data across test sets, and quit training when validation perplexity had failed to improve for 10 consecutive checkpoints.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 34, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 65, |
|
"end": 86, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 279, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation models", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We trained two sets of models: MODEL1 was trained on just the data above the line in (Lison and Tiedemann, 2016) 252,622k 13,097k 8,840k 196,960k 20,298k Tatoeba (tatoeba.org) 580k 1,537k -1,215k 16k WikiMatrix (Schwenk et al., 2019) 5,682k 9,013k 2,598k 45,147k 17,427k JW300 (Agi\u0107 and Vuli\u0107, 2019) 19,378k 34,325k 32,356k 39,023k 11,233k QED (Abdelali et al., 2014) 5,693k 9,064k 9,992k 8,542k 5,482k ", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 112, |
|
"text": "(Lison and Tiedemann, 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 233, |
|
"text": "(Schwenk et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 299, |
|
"text": "(Agi\u0107 and Vuli\u0107, 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 367, |
|
"text": "(Abdelali et al., 2014)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation models", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "After training general-domain machine translation models, we fine-tune them on the Duolingo data. 5 The Duolingo data pairs single prompts with up to hundreds of weighted translations; we turned this into bitext in three ways:", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 99, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 1-best: the best translation per prompt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 all: each translation paired with its prompt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 up-weighted: all possible translations with an additional 1, 9, or 99 copies of the 1best translation (giving the 1-best translation a weight of 2x, 10x, or 100x the others). 6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We fine-tune with dropout of 0.1, and an effective batch size of 160k tokens. We sweep learning rates of 1 \u00d7 10 \u22124 and 5 \u00d7 10 \u22124 . We withhold a relatively high percentage of the Duolingo training data for internal development (500 prompts total, which ranged from to 12.5 to 20% of the provided data), so we also train systems using all the released data (with none withheld), taking hyperparameters learned from our splits (number of fine-tuning epochs, candidate selection parameters, etc).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "From the models trained on general-domain data ( \u00a73) and refined on in-domain data ( \u00a74), we generate 1,000-best translations. For each translation, fairseq provides word-level and length-normalized log-probability scores, which all serve as grist for the next stage of our pipeline: candidate selection. 5 Training on the Duolingo data directly was less effective. 6 A better method might be to train using the weights to weight the sentences in training as available in Marian (Junczys-Dowmunt et al., 2018) but that was not available in fairseq, so we improvised.", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 306, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 367, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Generation and Selection", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For Portuguese only, we experimented with ensembling multiple fine-tuned models in two ways: (a) using models from different random seeds, and (b) using different types of systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensembling", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As a baseline, we extract hypotheses from the n-best list using the provided my_cands_extract.py script. 7 which simply extracts the same number of hypotheses, k, per prompt. To determine how many hypotheses to retain from the model's n-best list, we conduct a sweep over k on JHU-TEST and select the best k per language pair based on weighted macro F1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 106, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selecting top k hypotheses", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We propose to use the log probability scores directly and choose a cutoff point based on the top score for each prompt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probability score thresholding", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We consider a multiplicative threshold on the probabilities of the hypothesis, relative to the best hypothesis. For example, if the threshold value is \u22120.40, for a prompt where the top hypothesis logprobability is \u22121.20, any hypothesis from the top 1000 with a log-probability greater than or equal to \u22121.60 will be selected. 8 As in \u00a75.2, we sweep over this threshold value for each language pair and choose the value that results in the highest weighted macro F1 score from JHU-TEST. Table 4 : The weighted macro F1 on JHU-TEST for MODEL2 and fine-tuned variants for Japanese and Korean. Candidates are extracted from the n-best list using the proposed probability score thresholding ( \u00a75.3).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 493, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Probability score thresholding", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We present results of our different methods on our internal development set in \u00a76.1 and present our official evaluation performance in \u00a76.2. Table 3 shows the weighted macro F1 performance on JHU-TEST for MODEL1 and fine-tuned variants. Candidates are extracted from the n-best list using the proposed probability score thresholding ( \u00a75.3). Fine-tuning improves performance (except for fine-tuning on just the 1-best translation in Hungarian). For all language pairs, the best finetuning performance came from training on the upweighted training data, where we trained on all possible translations with the 1-best up-weighted 10 times. For Japanese and Korean 9 MODEL2 (Table 4), all types of fine-tuning improve weighted F1, but for both language pairs, the best finetuning variant matches that of MODEL1. Table 5 shows the weighted macro F1 on JHU-TEST for two methods of selecting candidates from the n-best list. The first line is the baseline top k hypothesis selection ( \u00a75.2), the second is our pro-posed probability score thresholding ( \u00a75.3). The best fine-tuned system is shown with each selection method for each language pair. The proposed probability score thresholding improves performance over the baseline top k candidate selection by 2-3.3 F1 points.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 148, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 815, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In Table 6 , we present the final results of our submission on the official test set (DUO-TEST). Our systems ranked first in all language pairs, with improvements of 0.1 to 9.2 over the next best teams. We denote in parenthesis the improvement over the next best team's system on DUO-TEST. We also report the score that our system achieved on our internal test set (JHU-TEST).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Official evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For Hungarian and Vietnamese, our winning submission was MODEL1 fine-tuned on the upweighted Duolingo data (1-best repeated 10x) with a learning rate of 1 \u00d7 10 \u22124 . For Japanese, our winning submission was MODEL2 fine-tuned on the up-weighted Duolingo data (1-best repeated 10x) with a learning rate of 5 \u00d7 10 \u22124 . For Korean, our winning submission was MODEL2 fine-tuned on the up-weighted Duolingo data (1-best repeated 10x) with a learning rate of 1 \u00d7 10 \u22124 , but without en \u2192 x hu ja ko pt vi top k hypothesis selection ( \u00a75.2) 54.6 29.5 35.6 50.0 51.0 Probability score thresholding ( \u00a75.3) 56.6 32.1 38.8 52.8 54.3 Table 5 : The weighted macro F1 on JHU-TEST for two methods of selecting candidates from the n-best list: baseline top k hypothesis selected (discussed in \u00a75.2), and our proposed probability score thresholding ( \u00a75.3). The best finetuned system is shown with each selection method for each language pair. any internal development data withheld. 10 For Portuguese, our winning submission was an ensemble of 3 systems. We began with MODEL1 fine-tuned on the up-weighted Duolingo data with a learning rate of 1 \u00d7 10 \u22124 . We used fairseq's default ensembling to ensemble 3 systems trained on all the translations of each Duolingo prompt, with the 1-best data repeated a total of 2x, 10x, and 100x for each system. While we submitted slightly different systems for each language pair, the following worked well overall: Fine-tuning on the Duolingo data was crucial. This is a domain adaptation taskthe Duolingo data differs greatly from the standard MT bitext we pretrain on, such as Europarl proceedings, GlobalVoices news, Subtitles, or Wikipedia text. 11 Taking advantage of the relative weights of the training translations and upweighting the best one was also helpful across the board. We suspect that using the weights in training directly (as opposed to our hack of upweight- 10 As described in \u00a74, we first fine-tune a system and use our internal splits for model selection from checkpoints and threshold selection. Then we apply all the same parameters to fine-tune a system with no data withheld. This was better than with holding data only for en-ko (on DUO-DEV). Since this en-ko system was trained on JHU-TEST, Table 6 reports the JHU-TEST results on the corresponding system that withheld that data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1900, |
|
"end": 1902, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 621, |
|
"end": 628, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2241, |
|
"end": 2248, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Official evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "11 In addition to style differences, the Duolingo sentences are much shorter on average. ing the best translation) would likely improve performance further. 12", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Official evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We perform qualitative and quantitative analyses of our output, which informed our own work and will motivate future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In Figure 2 , we plot macro weighted F1 on JHU-TEST against BLEU score 13 on JHU-DEV for finetuned systems for each language. It is clear that this BLEU score did not identify the best performing system according to the macro weighted F1 metric. For example, performance on beam search BLEU could be improved by further finetuning systems that had already been fine-tuned on all translations of each prompt on just the 1best translation of each prompt, but that degraded the task performance. In fact, the systems that performed best on macro weighted F1 in Hungarian and Korean were over 20 BLEU behind the highest BLEU score for those languages (and the top BLEU scoring systems did poorly on the task metric).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BLEU vs. Macro Weighted F1", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "While this phenomenon may be an artifact of these particular metrics, we suspect this is indicative of an interesting topic for further research. MT models trained with NLL are trained to match a 1hot prediction, which may make their output distributions poorly calibrated (Ott et al., 2018; Kumar and Sarawagi, 2019; Desai and Durrett, 2020) . More research is needed for strong conclusions, but our initial analysis suggests that training on the more diverse data improves quality of a deep nbest list of translations at the expense of the top beam search output. This may be important in cases where an n-best list of translations is being generated for a downstream NLP task. The data for this task was unique in that it provided diverse translations for a given prompt. In most cases where this type of data is not available, training towards a distribution (rather than a single target word), as is done in word-level knowledge distillation (Buciluundefined et al., 2006; Hinton et al., 2015; Kim and Rush, 2016) may prove useful to introduce the diversity needed for a strong n-best list of translations. This can be done either towards a distribution of the base model when fine-tuning (Dakwale and Monz, 2017; Khayrallah et al., 2018) or towards the distribution of an auxiliary model, such as a paraphraser (Khayrallah et al., 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 291, |
|
"text": "(Ott et al., 2018;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 317, |
|
"text": "Kumar and Sarawagi, 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 342, |
|
"text": "Desai and Durrett, 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 947, |
|
"end": 977, |
|
"text": "(Buciluundefined et al., 2006;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 998, |
|
"text": "Hinton et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 999, |
|
"end": 1018, |
|
"text": "Kim and Rush, 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1218, |
|
"text": "(Dakwale and Monz, 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1219, |
|
"end": 1243, |
|
"text": "Khayrallah et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BLEU vs. Macro Weighted F1", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "In each language, we performed a qualitative error analysis by manually inspecting the difference between the gold and system translations for prompts with lowest weighted recall on JHU-TEST.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Our systems were often incapable of expressing target language nuance absent from the source language. For example, for the prompt \"we have asked many times.\", a gold translation was '\u79c1\u305f \u3061\u306f\u4f55\u5ea6\u3082\u5c0b\u306d\u3066\u3057\u307e\u3063\u305f' whereas our system output '\u79c1\u305f\u3061\u306f\u4f55\u5ea6\u3082\u5c0b\u306d\u307e\u3057\u305f'. The gold translations often included the \u3066\u3057\u307e\u3063\u305f verb ending, which conveys a nuance similar to perfect aspect. The prompt's scenario would lead many Japanese users to use this nuanced ending when translating, but our system produces valid but less natural translations that do not appear in the references.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Another issue is vocabulary choice on a more general level. Often there are several ways to translate certain words or phrases, but our systems prefer the less common version. For example, a com-mon translation of 'please' in Portuguese is 'por favor', which appears in the high-weighted gold translations. Another possible translation, 'por obs\u00e9quio', which our system seemed to prefer, appears in much lower-weighted translations. Another example is the translation of 'battery' in Korean. The high-weighted references include the common word for battery ('\u1100 \u1165 \u11ab \u110c \u1165 \u11ab \u110c \u1175') but only lower-weighted references include '\u1107 \u1162\u1110 \u1165\u1105 \u1175', which was preferred by our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Our system also struggled with polysemous prompt words. For example, for the prompt \"cups are better than glasses.\", our system output translations like '\u110f \u1165 \u11b8\u110b \u1175 \u110b \u1161 \u11ab\u1100 \u1167 \u11bc\u1103 \u1173 \u11af\u1107 \u1169\u1103 \u1161 \u1102 \u1161 \u11ba\u1103 \u1161' , using \u110b \u1161 \u11ab \u1100 \u1167 \u11bc (eyeglasses), instead of translations like '\u110f \u1165 \u11b8 \u110b \u1175 \u110b \u1172\u1105 \u1175\u110c \u1161 \u11ab\u1107 \u1169\u1103 \u1161 \u1102 \u1161 \u11ba\u1103 \u1161' , using \u110b \u1172\u1105 \u1175\u110c \u1161 \u11ab (drinking glasses). The systems seem to be incapable of considering the context, \"cups\" in this case, for the ambiguity resolution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "A final class of our system's errors is grammatical errors. For example, for the prompt \"every night, the little sheep dreams about surfing.\", the gold translations included sentences like 'toda noite a pequena ovelha sonha com surfe' whereas our system output sentences like 'toda noite as ovelhas pequenas sonham com surfe'. The error was that our output included 'ovelhas' (plural sheep), but the gold translations all used 'ovelha' (single sheep).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "We also find cases where our system produces valid translations but is penalized because these are not among the gold translations. We consider these cases as a result of an \"incomplete\" gold set with missing paradigms. 14 For example, the Vietnamese pronouns for 'he' and 'she' can vary according to age (in relation to the speaker). From youngest to oldest, some pronouns for 'she' are 'ch\u1ecb \u1ea5y', 'c\u00f4 \u1ea5y', and 'b\u00e0 \u1ea5y'. For several of the prompts, the gold outputs only include some of these pronouns despite all being valid. In the prompt \"she has bread\", only the first two pronouns are present even though a translation representing the sentence as an older woman having bread should be equally valid. We also find this missing pronoun slot problem in Portuguese (references only using 'voc\u00ea' and not 'tu' for translations of 'you') and Japanese (only using '\u3042\u306a \u305f' and not '\u541b' for translations of 'you').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Missing paradigm slots in Duolingo data", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "We could not easily predict when slots would be missing. Because the data comes from Duolingo courses, we believe this may depend on the prompt's depth in the learning tree. As earlier lessons are studied by more users, we suspect they are also more likely to contain more complete gold translation sets due to more users submitting additional valid translations. This makes it difficult to assess the success of our models and distinguish \"true errors\" from valid hypotheses that are marked incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Missing paradigm slots in Duolingo data", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "We explored additional methods both for selecting candidates from an n-best lists and for generating additional candidates based on an n-best list. While they did not improve performance and were not included in our final submission, we discuss the methods and the analyses learned from them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "What Didn't Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Our error analysis revealed that our systems often output sentences that were not incorrect, but not optimized for the Duolingo task. For example, many of our top candidates for translations of \"please\" in Portuguese used por obs\u00e9quio, which is a very formal version, instead of the more common por favor. While both versions were valid for the prompts, the gold translations with por favor were weighted higher, so we would desire models to prefer this translation. We interpret this as domain mismatch between the STAPLE data and our MT training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moore-Lewis filtering", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "To filter out such bad candidates, we experimented with cross-entropy language model filtering (Moore and Lewis, 2010). This takes two language models: a (generally large) out-of-domain language model (OD), and a (typically small) indomain language model (ID), and uses the difference in normalized cross-entropy from these two models to score sentences. Sentences with good OD scores and poor ID scores are likely out-ofdomain and can be discarded based on a score threshold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moore-Lewis filtering", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Experimenting on Portuguese, we used KenLM (Heafield, 2011) to train a Kneser-Ney-smoothed 5-gram model on the Portuguese side of the MT training data (Table 2 ) as the OD model and a 3-gram model on the Duolingo Portuguese data (ID). These were used to score all candidates t as ", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 59, |
|
"text": "(Heafield, 2011)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 159, |
|
"text": "(Table 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Moore-Lewis filtering", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "score(t) = p ID (t) \u2212 p OD (t).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moore-Lewis filtering", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "We swept thresholds and minimum prompt lengths on our JHU-TEST data, and found with a threshold of \u22121.50 on 7word prompts and longer performed the best. Moore-Lewis filtering was originally designed for more coarse-grained selection of training data. We suspect (but did not have time to test) that a better idea is therefore to apply this upstream, using it to help select data used to train the generaldomain MT system (Axelrod et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 421, |
|
"end": 443, |
|
"text": "(Axelrod et al., 2011)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moore-Lewis filtering", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Extending the probability score thresholding ( \u00a75.3), we consider incorporating a score from a reverse model that represents the probability that the original prompt was generated by the candidate. The reverse model score is also used in Dual Conditional Cross-Entropy Filtering when selecting clean data from noisy corpora (Junczys-Dowmunt, 2018), and for re-scoring n-best lists in MMI decoding (Li et al., 2016) We train base and fine-tuned reverse systems for the five language pairs and use them to score the output translations. We compute the combined score of a hypothesis given a prompt as the arithmetic mean of the forward and backward log probability scores and use them in the probability score thresholding algorithm from \u00a75.3. We find that after sweeping across threshold values, incorporating the reverse score performs slightly worse overall than the standard thresholding method for every language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 397, |
|
"end": 414, |
|
"text": "(Li et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dual conditional thresholding", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The Duolingo data generally consists of simple language, which means we did not expect to see novel phrases in the references that were not in our training corpora. We used this idea to filter hypotheses that had any n-grams that didn't appear in our training data. Our hope was that this would catch rare formulations or ungrammatical sentences, e.g. cachorro preta, which has the wrong gender on the adjective. However, even using bigrams caused this method to filter out too many hypotheses and hurt F1 performance. Part-of-speech filtering Although the language used in Duolingo is relatively simple, the number of unique types turned out to be quite large. However the number part-of-speech (POS) tags is small. Instead of filtering based on words, we count n-grams of POS tags, hoping to remove ungrammatical sentences with tags such as DET DET.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "N-gram filtering", |
|
"sec_num": "8.3" |
|
}, |
|
{ |
|
"text": "In our experiments, this did not actually exclude any hypotheses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "N-gram filtering", |
|
"sec_num": "8.3" |
|
}, |
|
{ |
|
"text": "In between the extremes of large number of types using raw lexical forms and few types using POS tags is to leverage open class words or additional morphological information. We morphologically tag the dataset with the Stanford NLP toolkit (Qi et al., 2018) , then represent each sentence either by its words, its POS tags, its morphological tags, or words for closed-class items and tags for openclass items, as shown in Table 8 . This too resulted in few hypotheses being filtered and did not impact F1 performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 257, |
|
"text": "(Qi et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 429, |
|
"text": "Table 8", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Open class words and morphology", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As the Duolingo data was generated by language learners, we also considered filtering sentences by the difficulty of the words within. Experimenting with Japanese, we examined the grade level of kanji 15 in each sentence. Ignoring non-kanji characters, the average grade level per sentence on the STAPLE training data was 3.77, indicating a 3 rd -4 th grade level. Future work could consider filtering by other measures such as the coreness of a word (Wu et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 468, |
|
"text": "(Wu et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering by difficulty level", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Inspired by query expansion in information retrieval, we post-edit either by consider morphological variants in situations of underspecification, substituting forms in different scripts (for Japanese), or replacing long-form number names with numerals. We found these ineffective because Morphological expansions English is morphologically poorer than 4 target languages. As an example, the English word 'you' may be translated into Portuguese as 'tu ', 'voc\u00ea', 'voc\u00eas', or 'v\u00f3s', to consider only nominative forms. We can thus generate three additional candidates by altering the morphosyntax (and maintaining grammatical concord) while keeping the meaning intact. Evaluating in Portuguese and Vietnamese, we find that this is ineffective (see \u00a77.3). Consider Vietnamese. It is a morphologically isolating and zero-marking language, so concord between constituents is not overtly marked. This leaves us fairly free to swap out morphological variants of pronouns: there may be difference in age, connotation, or register, but the overt semantics of the English prompt are preserved. All swapping transformations in Table 9 give poorer performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 480, |
|
"text": "', 'voc\u00ea', 'voc\u00eas', or 'v\u00f3s',", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1115, |
|
"end": 1122, |
|
"text": "Table 9", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Generation via post-editing", |
|
"sec_num": "8.4" |
|
}, |
|
{ |
|
"text": "Hiragana replacement Japanese has three different writing systems-hiragana, katakana, and kanji-and sometimes a word written in kanji is considered an acceptable translation when written in hiragana. For example, the Japanese word for \"child\" is \u5b50\u4f9b when written with kanji, but an acceptable alternative is the hiragana \u3053\u3069\u3082 . We experiment with expanding translation candidates by replacing Japanese kanji with pronunciations from a furigana (hiragana pronunciation) dictionary but this method did not improve performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via post-editing", |
|
"sec_num": "8.4" |
|
}, |
|
{ |
|
"text": "Numeral replacement For sentences containing numbers, the list of accepted translations often contains Arabic numbers, in addition to numbers in the native language. For example, 'o senhor smith vir\u00e1 no dia dez de julho' and 'o senhor smith vir\u00e1 no dia 10 de julho.' are both gold translations of \"mr. smith will come on july tenth.\" We experiment with replacing native numbers with Arabic numerals in Japanese, Portuguese, and Vietnamese. This did not improve weighted F1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via post-editing", |
|
"sec_num": "8.4" |
|
}, |
|
{ |
|
"text": "Our approach was general, borrowing from best practices in machine translation. We built large, general-domain MT systems that were then finetuned on in-domain data. We then followed an \"overgenerate and filter\" approach that made effective use of the scores from the systems to find a per-prompt truncation of large n-best lists produced from these systems. These techniques performed very well, ranking first in all five language pairs. We expect that further refinement and exploration of standard MT techniques-as well as techniques that we were unsuccessful with ( \u00a78)would bring further improvements that would accrue generally across languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "At the same time, the Duolingo shared task is distinct from machine translation in subtle but important ways: presenting simpler, shorter sentences and a 0-1 objective. While we were not able to get additional gains from linguistic insights, we don't see these failures as conclusive indictments of those techniques, but instead as invitations to look deeper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "However, we discovered that BLEU did not correlate well enough with task performance to be used for this. See \u00a77.1 for more analysis and discussion.3 Machine Translation SystemsWe began by building high-quality state-of-the-art machine translation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "opus.nlpl.eu 3 The other two were reserved for unanticipated use cases that never materialized.4 (batch size 4000) \u00d7 (2 GPUs) \u00d7 (update interval 25)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "github.com/duolingo/ duolingo-sharedtask-2020/blob/ 626239b78621af96fbb324e678cca17b3dd4e470/ my_cands_extract.py 8 In other words, we set a threshold of exp{\u22120.40} on the likelihood ratio.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These were the two languages where MODEL2 improved fine-tuning performance compared to MODEL1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This feature does exist in Marian (Junczys-Dowmunt et al., 2018) but not in Fairseq.13 Computed against the 1-best translation of each prompt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The task website notes this phenomenon. It calls the set of targets 'comprehensive', though not 'exhaustive'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Najoung Kim for remarks on Korean, An Nguyen for remarks on Vietnamese and Vinicius C. Costa for remarks on Portuguese, and Doug Oard for general advice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The AMARA corpus: Building parallel language resources for the educational domain", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1856--1862", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed Abdelali, Francisco Guzman, Hassan Sajjad, and Stephan Vogel. 2014. The AMARA corpus: Building parallel language resources for the educa- tional domain. In Proceedings of the Ninth Interna- tional Conference on Language Resources and Eval- uation (LREC'14), pages 1856-1862, Reykjavik, Iceland. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "JW300: A widecoverage parallel corpus for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Zeljko", |
|
"middle": [], |
|
"last": "Agi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3204--3210", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1310" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeljko Agi\u0107 and Ivan Vuli\u0107. 2019. JW300: A wide- coverage parallel corpus for low-resource languages. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3204-3210, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Domain adaptation via pseudo in-domain data selection", |
|
"authors": [ |
|
{ |
|
"first": "Amittai", |
|
"middle": [], |
|
"last": "Axelrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "355--362", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amittai Axelrod, Xiaodong He, and Jianfeng Gao. 2011. Domain adaptation via pseudo in-domain data selection. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Process- ing, pages 355-362, Edinburgh, Scotland, UK. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Model compression", |
|
"authors": [ |
|
{ |
|
"first": "Cristian", |
|
"middle": [], |
|
"last": "Buciluundefined", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandru", |
|
"middle": [], |
|
"last": "Niculescu-Mizil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 12th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "535--541", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1150402.1150464" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cristian Buciluundefined, Rich Caruana, and Alexan- dru Niculescu-Mizil. 2006. Model compression. In Proceedings of the 12th ACM SIGKDD Inter- national Conference on Knowledge Discovery and Data Mining, KDD '06, page 535-541, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Finetuning for neural machine translation with limited degradation across in-and out-of-domain data", |
|
"authors": [ |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Dakwale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 16th Machine Translation Summit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "156--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Praveen Dakwale and Christof Monz. 2017. Fine- tuning for neural machine translation with limited degradation across in-and out-of-domain data. In Proceedings of the 16th Machine Translation Sum- mit (MT-Summit 2017), pages 156-169.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Calibration of pre-trained transformers", |
|
"authors": [ |
|
{ |
|
"first": "Shrey", |
|
"middle": [], |
|
"last": "Desai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shrey Desai and Greg Durrett. 2020. Calibration of pre-trained transformers.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "KenLM: Faster and smaller language model queries", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "187--197", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Heafield. 2011. KenLM: Faster and smaller language model queries. In Proceedings of the Sixth Workshop on Statistical Machine Translation, pages 187-197, Edinburgh, Scotland. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Distilling the knowledge in a neural network", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Dual conditional cross-entropy filtering of noisy parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "888--895", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6478" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt. 2018. Dual conditional cross-entropy filtering of noisy parallel corpora. In Proceedings of the Third Conference on Machine Translation: Shared Task Papers, pages 888-895, Belgium, Brussels. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Marian: Fast neural machine translation in C++", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Neckermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Seide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri Aji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--121", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-4020" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Alham Fikri Aji, Nikolay Bogoychev, Andr\u00e9 F. T. Martins, and Alexandra Birch. 2018. Marian: Fast neural machine translation in C++. In Proceedings of ACL 2018, System Demonstrations, pages 116- 121, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Regularized training objective for continued training for domain adaptation in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Huda", |
|
"middle": [], |
|
"last": "Khayrallah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "36--44", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2705" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huda Khayrallah, Brian Thompson, Kevin Duh, and Philipp Koehn. 2018. Regularized training objective for continued training for domain adaptation in neu- ral machine translation. In Proceedings of the 2nd Workshop on Neural Machine Translation and Gen- eration, pages 36-44, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Simulated multiple reference training improves low-resource machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Huda", |
|
"middle": [], |
|
"last": "Khayrallah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.14524" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huda Khayrallah, Brian Thompson, Matt Post, and Philipp Koehn. 2020. Simulated multiple reference training improves low-resource machine translation. arXiv preprint arXiv:2004.14524.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sequencelevel knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1317--1327", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1139" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim and Alexander M. Rush. 2016. Sequence- level knowledge distillation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1317-1327, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Europarl: A parallel corpus for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "MT summit", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. In MT summit, vol- ume 5, pages 79-86. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "66--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo. 2018. Subword regularization: Improv- ing neural network translation models with multiple subword candidates. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 66- 75, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sentence-Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-2012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentence- Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Calibration of encoder decoder models for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Aviral", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunita", |
|
"middle": [], |
|
"last": "Sarawagi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aviral Kumar and Sunita Sarawagi. 2019. Calibration of encoder decoder models for neural machine trans- lation. CoRR, abs/1903.00802.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A diversity-promoting objective function for neural conversation models", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "110--119", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting ob- jective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "OpenSub-titles2016: Extracting large parallel corpora from movie and TV subtitles", |
|
"authors": [ |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Lison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "923--929", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pierre Lison and J\u00f6rg Tiedemann. 2016. OpenSub- titles2016: Extracting large parallel corpora from movie and TV subtitles. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 923-929, Por- toro\u017e, Slovenia. European Language Resources As- sociation (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Simultaneous translation and paraphrase for language education", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Mayhew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klinton", |
|
"middle": [], |
|
"last": "Bicknell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brust", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Mcdowell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burr", |
|
"middle": [], |
|
"last": "Settles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Mayhew, Klinton Bicknell, Chris Brust, Bill McDowell, Will Monroe, and Burr Settles. 2020. Si- multaneous translation and paraphrase for language education. In Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Intelligent selection of language model training data", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the ACL 2010 Conference Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "220--224", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert C. Moore and William Lewis. 2010. Intelli- gent selection of language model training data. In Proceedings of the ACL 2010 Conference Short Pa- pers, pages 220-224, Uppsala, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Analyzing uncertainty in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "3956--3965", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Michael Auli, David Grangier, and Marc'Aurelio Ranzato. 2018. Analyzing uncer- tainty in neural machine translation. In Proceed- ings of the 35th International Conference on Ma- chine Learning, volume 80 of Proceedings of Ma- chine Learning Research, pages 3956-3965, Stock- holmsm\u00e4ssan, Stockholm Sweden. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-4009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Universal dependency parsing from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--170", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K18-2016" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Timothy Dozat, Yuhao Zhang, and Christo- pher D. Manning. 2018. Universal dependency pars- ing from scratch. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 160-170, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyu", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia. CoRR, abs/1907.05791.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Parallel data, tools and interfaces in OPUS", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2214--2218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in OPUS. In Proceedings of the Eighth In- ternational Conference on Language Resources and Evaluation (LREC'12), pages 2214-2218, Istanbul, Turkey. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Multilingual dictionary based construction of core vocabulary", |
|
"authors": [ |
|
{ |
|
"first": "Winston", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Garrett", |
|
"middle": [], |
|
"last": "Nicolai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4204--4210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Winston Wu, Garrett Nicolai, and David Yarowsky. 2020. Multilingual dictionary based construction of core vocabulary. In Proceedings of The 12th Lan- guage Resources and Evaluation Conference, pages 4204-4210, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "andar ali? eu posso andar pra l\u00e1?", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Macro Weighted F1 (JHU-TEST) vs. BLEU (JHU-DEV) for a variety of fine-tuned systems for each language pair. The two metrics are not well correlated within a language pair.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td>,</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"text": "Number of English word tokens for all datasets used to train the baseline MT models. Just the data above the line was used to train the MODEL1 baseline, all the data was used to train the MODEL2 baseline.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td colspan=\"2\">en \u2192 x</td><td>ja</td><td>ko</td></tr><tr><td colspan=\"2\">MODEL2</td><td colspan=\"2\">16.8 12.5</td></tr><tr><td>fine-tune on:</td><td colspan=\"3\">JHU-TRAIN: 1-best JHU-TRAIN: all upweighted JHU-TRAIN: all + 1x 1-best upweighted JHU-TRAIN: all + 9x 1-best upweighted JHU-TRAIN: all + 99x 1-best 31.0 33.4 18.4 18.7 31.5 38.0 30.3 38.0 32.1 38.8</td></tr></table>", |
|
"text": "The weighted macro F1 on JHU-TEST for MODEL1 and fine-tuned variants. Candidates are extracted from the n-best list using the proposed probability score thresholding ( \u00a75.3).", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"text": "Moore-Lewis filtering for Pt (macro F1).", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"text": "Preprocessing operations for filtering on one Portuguese gold output for the prompt do they have five girls?, organized from most specific to most general.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"content": "<table><tr><td>Strategy</td><td colspan=\"2\">P RW WF1macro</td></tr><tr><td>Baseline</td><td>26.91 69.70</td><td>34.49</td></tr><tr><td>Add 1;PL</td><td>26.62 69.84</td><td>34.27</td></tr><tr><td>Add 3;SG;MASC</td><td>23.97 70.19</td><td>33.42</td></tr><tr><td>Add 3;SG;FEM</td><td>24.69 70.49</td><td>33.51</td></tr><tr><td>Add 3;PL</td><td>22.28 69.75</td><td>31.89</td></tr><tr><td>Add most frequent 'she'</td><td>26.77 69.84</td><td>34.38</td></tr><tr><td>Swap most common 'he's</td><td>26.71 69.82</td><td>34.37</td></tr><tr><td colspan=\"2\">Swap 2 nd most common 'he's 26.90 69.71</td><td>34.47</td></tr><tr><td colspan=\"2\">Swap 3 rd most common 'he's 26.88 69.71</td><td>34.45</td></tr></table>", |
|
"text": "15 Specified by the Japanese Ministry of Education and annotated in edrdg.org/wiki/index.php/KANJIDIC_ Project", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF11": { |
|
"content": "<table><tr><td>: Effect of pronoun-based augmentation on met-</td></tr><tr><td>rics in Vietnamese, computed on JHU-TEST. All strate-</td></tr><tr><td>gies improve recall and weighted recall, but they cause</td></tr><tr><td>precision and F1 to decrease.</td></tr><tr><td>several acceptable translations were not present in</td></tr><tr><td>the ground truth dataset (see \u00a77.3).</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |