|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:06:37.546841Z" |
|
}, |
|
"title": "Expand and Filter: CUNI and LMU Systems for the WNGT 2020 Duolingo Shared Task", |
|
"authors": [ |
|
{ |
|
"first": "Jind\u0159ich", |
|
"middle": [], |
|
"last": "Libovick\u00fd", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LMU Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "libovicky@cis.lmu.de" |
|
}, |
|
{ |
|
"first": "Zden\u011bk", |
|
"middle": [], |
|
"last": "Kasner", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jind\u0159ich", |
|
"middle": [], |
|
"last": "Helcl", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Du\u0161ek", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "odusek@ufal.mff.cuni.cz" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present our submission to the Simultaneous Translation And Paraphrase for Language Education (STAPLE) challenge. We used a standard Transformer model for translation, with a crosslingual classifier predicting correct translations on the output n-best list. To increase the diversity of the outputs, we used additional data to train the translation model, and we trained a paraphrasing model based on the Levenshtein Transformer architecture to generate further synonymous translations. The paraphrasing results were again filtered using our classifier. While the use of additional data and our classifier filter were able to improve results, the paraphrasing model produced too many invalid outputs to further improve the output quality. Our model without the paraphrasing component finished in the middle of the field for the shared task, improving over the best baseline by a margin of 10-22% weighted F1 absolute.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present our submission to the Simultaneous Translation And Paraphrase for Language Education (STAPLE) challenge. We used a standard Transformer model for translation, with a crosslingual classifier predicting correct translations on the output n-best list. To increase the diversity of the outputs, we used additional data to train the translation model, and we trained a paraphrasing model based on the Levenshtein Transformer architecture to generate further synonymous translations. The paraphrasing results were again filtered using our classifier. While the use of additional data and our classifier filter were able to improve results, the paraphrasing model produced too many invalid outputs to further improve the output quality. Our model without the paraphrasing component finished in the middle of the field for the shared task, improving over the best baseline by a margin of 10-22% weighted F1 absolute.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The usual goal of machine translation (MT) is to generate a single correct translation of a source sentence. Neural machine translation (NMT; Bahdanau et al., 2015; Vaswani et al., 2017 ) models a conditional distribution over possible target sentences given a source sentence, and uses beamsearch decoding as a heuristic to get one or more translations. However, the number of possible correct translations is often vast in comparison (Bojar et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 164, |
|
"text": "Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 185, |
|
"text": "Vaswani et al., 2017", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 456, |
|
"text": "(Bojar et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The STAPLE challenge (Mayhew et al., 2020) poses the problem of MT slightly differently. Here, the goal is to generate as many correct translations as possible. Knowing many correct translations can be useful e.g. for automatic scoring in tools for language education, such as Duolingo. 1 On one hand, the learners should be guided to use the more common formulations, on the other hand, they should not be penalized for providing a correct but unusual answer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 42, |
|
"text": "(Mayhew et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present a pipeline of two systems in our submission to the STAPLE challenge. As the first step, we use a standard NMT model trained with additional, carefully filtered data. The NMT output n-best lists are filtered using a classifier. Second, we use a Levenshtein Transformer model (Gu et al., 2019) to generate paraphrases of the outputs of the first model. Again, the outputs of the Levenshtein Transformer are filtered using another classifier.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 302, |
|
"text": "(Gu et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is structured as follows. Our training datasets are described in detail in Section 2. We describe the two models in Sections 3 and 4 respectively. We conduct experiments with all five target languages in the challenge, i.e. Hungarian, Japanese, Korean, Vietnamese and Portuguese. The source language is English in all setups. The experiment settings are shown in Section 5. The results (Section 6) show that the Levenshtein Transformer paraphrase generator cannot easily improve on the filtered NMT output n-best list.", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 407, |
|
"text": "(Section 6)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The data in the STAPLE shared task comes from the Duolingo language learning platform. This represents a specific domain with a limited number of mostly simple sentences targeted at learners, using a limited vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "STAPLE Dataset", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Each source (English) sentence comes with a list of valid translations in the target language, ranging from a few up to hundreds of paraphrases. All of the valid translations are further annotated with a probability score indicating how frequent a given variant is. Statistics of the data are given in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 309, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "STAPLE Dataset", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We held out 200 source sentences with all their translations as our internal validation dataset. We use this dataset for validating the translation models and for estimating the filtering thresholds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "STAPLE Dataset", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For training the translation model, we obtained out-of-domain parallel corpora from the OPUS collection (Tiedemann, 2012) for all target languages, ParaCrawl (Espl\u00e0 et al., 2019) for Portuguese and Hungarian, and JParaCrawl (Morishita et al., 2019) for Japanese. We applied FastText language identifier (Joulin et al., 2016b,a) to clean the corpora. Furthermore, we filtered out sentence pairs with a length ratio that differs from the estimated mean ratio by more than 2.5 times the standard deviation. As in-domain training data, we mix data from the STAPLE training dataset and the Tatoeba 2 corpus (part of the OPUS collection). To balance the underrepresentation of the in-domain data in the training dataset, we oversample both the STAPLE dataset (200\u00d7) and the Tatoeba dataset (10\u00d7).", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 121, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 178, |
|
"text": "(Espl\u00e0 et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 248, |
|
"text": "(Morishita et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 327, |
|
"text": "(Joulin et al., 2016b,a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional data for MT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We use the combined mixed-domain parallel corpora for training backtranslation models (Sennrich et al., 2016) and an XLM-R-based domain classifier. The classifier is trained to predict whether a target sentence came from the STAPLE training dataset, conditioning on the source sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 109, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional data for MT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The monolingual data consists of Wikipedia, WMT NewsCrawl (Barrault et al., 2019) for Hungarian, Japanese and Portuguese, Leipzig Corpora NewsCrawl (Goldhahn et al., 2012) for all languages and the jpWaC corpus for Japanese (Erjavec et al., 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 81, |
|
"text": "(Barrault et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 171, |
|
"text": "(Goldhahn et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 246, |
|
"text": "(Erjavec et al., 2008)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional data for MT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We filtered both the monolingual and parallel data using the domain classifier. The classifier has over 99% accuracy on balanced data. We set a permissive threshold for keeping the sentence pair to 10 \u22125 . Based on preliminary experiments, we include only a single correct translation from the STAPLE training set into the machine translation training data. This had a slightly positive effect on translation quality. Also, the n-best lists obtained by machine-translating the STAPLE dataset are more representative and thus more suitable for training the classifier for n-best list filtering than if we included all translations from the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional data for MT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our pipeline starts with the Transformer model (Vaswani et al., 2017) trained on the provided dataset enriched with additional data (see Section 2). This provides initial translations of the source sentence on the output n-best list, which are further filtered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 69, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We train a crosslingual classifier which predicts whether a translation in the MT output n-best list is correct (given the source sentence). Using the trained translation model, we first generate large n-best lists for all English sentences in the original training data. Next, we label each generated sentence whether it is a positive or a negative sample (based on the reference data). Finally, we create a balanced mix of negative and positive samples. Since the n-best lists contained much more incorrect translations, we oversample the list of correct translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering Classifier", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We use XLM-RoBERTa as our sentence classifier model (XLM-R; Conneau et al., 2020), specifically the pretrained variant available in the Hug-gingFace Transformers library 3 (Wolf et al., 2019) . We finetune this model on the balanced mix of the correct and incorrect translations for a given sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 191, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering Classifier", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "During inference, we generate an n-best list for a given source sentence, and we apply the classifier to filter out the sentences which are labeled as incorrect using a threshold value. The n-best list size and the threshold are hyperparameters of the method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering Classifier", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As an additional step to increase the number of valid translations produced, we train a target- Table 2 : Number of sentences in the parallel and monolingual data used for training the MT systems. The data in the second column were used for training the backtranslation systems, the last column corresponds to final translation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 103, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "language paraphrasing model. Rather than generating a translation directly from the source sentence, the model refines existing translations in order to produce new ones. The model is based on the Levenshtein Transformer (LevT; Gu et al., 2019) , which is a sequence generation model based on the Transformer (Vaswani et al., 2017) architecture. Instead of leftto-right autoregressive generation, LevT generates sequences in an arbitrary order using two basic operations -insertion and deletion. Using an initial sequence as a starting point, LevT is able to perform sequence refinement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 244, |
|
"text": "Gu et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 331, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "LevT iteratively applies three policies represented by fully-connected neural network layers on top of the last layer of a Transformer decoder:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "1. Deletion policy \u03c0 del removes tokens from the sequence;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "2. Placeholder policy \u03c0 plh inserts placeholders into the sequence;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "3. Insertion policy \u03c0 ins replaces placeholders with tokens from the vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The policies are trained to follow oracle policies. Given a source sequence X and a target sequence Y , L(X,Y ) is a minimum sequence of edit operations (delete and insert) that transform X to Y . Its length is equal to the Levenshtein distance (Levenshtein, 1966) between X and Y . The operations in L(X,Y ) define oracle policies for \u03c0 del , \u03c0 plh and \u03c0 ins .", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 264, |
|
"text": "(Levenshtein, 1966)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "There are two other possible training strategies: Either training the insertion policy to repair a target sentence with randomly dropped tokens, or training the deletion policy to refine the output from the insertion policy. However, we do not use these strategies for our model. In the first case, we did not find it beneficial for the model performance. In the second case, the option does not fit together with our inference scheme as described in Section 4.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To train the model to gradually produce more diverse, but still valid paraphrases, we provided the model with training paraphrase pairs with minimum edit distance. We represent the set of paraphrases as a complete graph with edges weighted by the Levenshtein distance (see Figure 1 ). We construct a minimum spanning tree of this graph and use the sentence pairs from the spanning tree edges as training examples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 281, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Formally, a training example for LevT is a tuple (E, X 0 , X,Y ) where E is the original English sentence, X 0 is the gold translation, X is the source node and Y is the target node. (E, X 0 ) is processed by the encoder, X is used as the initial sequence for the decoder and Y acts as the ground truth. We do not use any additional data for LevT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the original LevT formulation, inference is done by applying the model over the initial sequence for several iterations. This approach aims to produce a single output translation and the intermediate translations are deemed to be incorrect. We redefine the generation process as state-space search, considering translations in each step as potentially correct and accepting the translations based on the classifier score. We also repurpose the deletion policy as paraphrasing policy, which gives us the possibility to generate multiple translations in each step. Similarly to the original LevT architecture, the encoder output is grounding the translations in the source sentence throughout the inference process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The process can start from an arbitrary number of initial translations -in our case, the initial translations are the filtered outputs from the MT system. We put all the initial translations in a queue. In each step, we pop a translation from the queue, and we let the deletion policy mark all tokens suitable for paraphrasing. To expand our search space, we gen- erate multiple versions of the partially deleted sentence using all possible combinations of selected tokens, which are individually processed by the placeholder and insertion policies. We put the output translations in the queue and repeat the process until the queue is empty or we reach a preset limit on the number of generated sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Similarly to our NMT model, we filter relevant translations using a classifier built on top of XLM-R (see Section 3). In this case, the negative examples for the classifier are generated from LevT. We use the classifier's predictions to accept only translations passing a preset threshold. Moreover, we use the scores predicted by the classifier (probability of translation correctness) to define the priority of the translations in the queue, thus making it a pri-ority queue. Figure 2 shows an example of a single step of the inference algorithm with filtering.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 478, |
|
"end": 486, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Filtering", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We use the official evaluation metric for the STA-PLE challenge, which is the weighted macro F1 score, computed by exact match with respect to the set of all valid translations for a given source sentence. The weighted F1 is a compound of unweighted precision and weighted recall, where the weight is determined based on each translation's probability (see Section 2.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We train the translation model using the Marian toolkit 4 (Junczys-Dowmunt et al., 2018) . We use the Transformer Base hyperparameters, i.e., model dimension 512, feed-forward layer dimension 2048, 8 attention heads with a head dimension of 64. All models use SentencePiece-based (Kudo and Richardson, 2018) vocabulary of 32k units. Japanese was tokenized using UDPipe (Straka and Strakov\u00e1, 2017) , other languages were processed with SentencePiece without tokenization. The model is trained using the Adam optimizer (Kingma and Ba, 2015), with Noam learning schedule (Vaswani et al., 2017) with 8,000 warmup steps and initial learning rate 3 \u2022 10 \u22124 , dropout rate 0.1, label smoothing 0.1 and gradient clipping at 5.0. We set the training batch size to 4,096 tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 88, |
|
"text": "(Junczys-Dowmunt et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 307, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 396, |
|
"text": "(Straka and Strakov\u00e1, 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 590, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation Model", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We base our paraphrasing model on the Levenshtein Transformer as implemented in fairseq 5 (Ott 157 et al., 2019) . We replace the Transformer encoder with the pretrained XLM-R Base with 12 layers and 8 attention heads, keeping the vanilla Transformer decoder with 6 layers and 8 attention heads. We use the output layer of XLM-R as the decoder output layer and finetune it together with the last four layers of XLM-R (freezing the rest of XLM-R parameters). We train a separate decoder for each policy and employ the early exit as described by Gu et al. (2019) by using only the features from the third layer of the decoder for the deletion and placeholder policies.", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 112, |
|
"text": "(Ott 157 et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 560, |
|
"text": "Gu et al. (2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The model is optimized using the Adam optimizer, with fixed learning rate 10 \u22125 , batch size 1,500 tokens, dropout rate 0.5 and label smoothing 0.1. We set the maximum number of placeholders for each position at 3 instead of 256.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We experiment with various decoding strategies. For deletion and placeholder policies, we introduce a penalty parameter preventing the policy from producing the zero (unchanged) outcome. This proved beneficial in particular for the deletion policy, as it frequently did not mark any tokens for deletion, thus limiting the search process. Alternatively, we force the policy to produce an outcome by selecting top k results with the highest score. In both cases, we find that limiting the number of placeholders generated at the same position to 1 helps to prevent excessive sentence length.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the insertion policy, we give up the nonautoregressivity and replace the placeholders in the left-to-right order, re-running the decoder in each step. This acts as a supplement for the fact that the insertion policy cannot repeatedly interact with the deletion policy in a single state-space search step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our experiments show that it is difficult to find a set of decoding parameters which would consistently produce meaningful output. For generating paraphrases, we find it useful to use the penalty strategy described above and tune the penalty separately for each language. Producing less paraphrases generally leads to better results, as it tends to limit the amount of incorrect output. On the contrary, we use the top-k strategy for generating the classifier training data, as it produces negative samples more robustly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrasing Model", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The results on the blind development and test data are shown in Table 3. Our translation model was able to bring considerable improvements over the provided baseline models -10-22% weighted F1 absolute due to increase in both precision and recall. We observed the highest improvements for Portuguese. Overall, our model tends to finish in the middle of the field.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Experiments with training data filtering showed that a smaller training set with better selected sentences leads to better trasnslation quality than using a larger general domain corpus (see Table 4 ), although the general corpus was double in the number of sentences (see Table 2 ). The optimal beam size for filtering only roughly corresponds to the average number of paraphrases in the data (see Table 1 ). Figure 3 shows the effect of beam size on the output quality. Without filtering, the precision quickly drops with increasing beam size. The filtering can partially compensate for the precision loss, however, at the expense of decreasing recall, too.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 198, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 280, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 406, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 418, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Despite our extensive efforts, the paraphrasing model did not bring substantial improvements over the translation model. Increase in recall typically came with lower precision and lower weighted F1 score. Even with the specific classifier training and its overall accuracy over 99%, the LevT output was too noisy to be precisely filtered. The only improvement was achieved for Portuguese, where the final paraphrasing and filtering setting resulted in slightly higher precision and similar recall. However, the improvement was mostly influenced by the second round of filtering (on the paraphrasing output). Therefore, we did not include the paraphrasing output in our primary submission.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We suppose that the inefficiency of the LevT model is caused by a mismatch between model training criteria and its application: the individual policies are trained separately, but need to complement each other to achieve good results. Moreover, the loss computed independently for each paraphrase may prevent the model from learning to generate multiple paraphrases. The shortage of useful paraphrases may be also caused by the lack of additional training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We presented our submission to the 2020 STA-PLE translation and paraphrasing shared task. Our model is based on the Transformer architecture, used additional carefully selected training data, a XLM-R-based classifier to filter MT output beams, and an optional paraphrasing component based on the Levenshtein Transformer approach. The MT Table 3 : Results for both system variants and all languages. \"MT\" denotes the variant without paraphrasing (final official result), \"+Para\" is the system with paraphrasing. Metrics: Rk = official competition rank (with the number of valid submissions), Pre = precision (%), W-R = weighted recall (%), W-F1 = weighted F1 (%), \u2206base = weighted F1 difference w. r. t. the best baseline model (% absolute), \u2206best = w. r. t. the overall winner (% absolute). Table 4 : Translation quality measured by the weighted F1 score for general training (Gen.) data and domainspecific (Dom.) training data measured on our validation set with beam size 10 compared with the task baseline (AWS) and best filtered beam with beam size. model with the filter was able to gain substantial increases over the baseline, but did not reach the top places in the challenge. The paraphrasing component's output proved too noisy to bring any substantial benefits -we only observed minor improvements in Portuguese. Therefore, the paraphrasing was not included in our primary submission. Improving the paraphrasing model could be an interesting direction of future work. The amount of incorrect output could be reduced by better ac-counting for deletion and insertion policy interplay. However, computing the loss independently for each paraphrase may still hinder the ability of the model to generate multiple paraphrases for a single sentence. It may be thus necessary to rethink the training objective and tie it together with the inference process.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 344, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 791, |
|
"end": 798, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://www.duolingo.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tatoeba.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/huggingface/ transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://marian-nmt.github.io/ 5 https://github.com/pytorch/fairseq", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations (ICLR2015)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations (ICLR2015), San Diego, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Findings of the 2019 conference on machine translation (WMT19)", |
|
"authors": [ |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Federmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Fishel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvette", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Huck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathias", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--61", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5301" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lo\u00efc Barrault, Ond\u0159ej Bojar, Marta R. Costa-juss\u00e0, Christian Federmann, Mark Fishel, Yvette Gra- ham, Barry Haddow, Matthias Huck, Philipp Koehn, Shervin Malmasi, Christof Monz, Mathias M\u00fcller, Santanu Pal, Matt Post, and Marcos Zampieri. 2019. Findings of the 2019 conference on machine transla- tion (WMT19). In Proceedings of the Fourth Con- ference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 1-61, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Scratching the surface of possible translations", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matou\u0161", |
|
"middle": [], |
|
"last": "Mach\u00e1\u010dek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ale\u0161", |
|
"middle": [], |
|
"last": "Tamchyna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Zeman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of TSD 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-642-40585-3_59" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Bojar, Matou\u0161 Mach\u00e1\u010dek, Ale\u0161 Tamchyna, and Daniel Zeman. 2013. Scratching the surface of pos- sible translations. In Proc. of TSD 2013, Lecture Notes in Artificial Intelligence, Berlin / Heidelberg. Z\u00e1pado\u010desk\u00e1 univerzita v Plzni, Springer Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ACL 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of ACL 2020. ArXiv: 1911.02116.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Japanese web corpus with difficulty levels jpWaC-l 1.0. Slovenian language resource repository CLARIN", |
|
"authors": [ |
|
{ |
|
"first": "Toma\u017e", |
|
"middle": [], |
|
"last": "Erjavec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [ |
|
"Hmeljak" |
|
], |
|
"last": "Sangawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiko", |
|
"middle": [], |
|
"last": "Kawamura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Toma\u017e Erjavec, Kristina Hmeljak Sangawa, and Yoshiko Kawamura. 2008. Japanese web corpus with difficulty levels jpWaC-l 1.0. Slovenian lan- guage resource repository CLARIN.SI.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "ParaCrawl: Web-scale parallel corpora for the languages of the EU", |
|
"authors": [ |
|
{ |
|
"first": "Miquel", |
|
"middle": [], |
|
"last": "Espl\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Forcada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gema", |
|
"middle": [], |
|
"last": "Ram\u00edrez-S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of Machine Translation Summit XVII", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "118--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miquel Espl\u00e0, Mikel Forcada, Gema Ram\u00edrez-S\u00e1nchez, and Hieu Hoang. 2019. ParaCrawl: Web-scale paral- lel corpora for the languages of the EU. In Proceed- ings of Machine Translation Summit XVII Volume 2: Translator, Project and User Tracks, pages 118-119, Dublin, Ireland. European Association for Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Building large monolingual dictionaries at the leipzig corpora collection: From 100 to 200 languages", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Goldhahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Eckart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uwe", |
|
"middle": [], |
|
"last": "Quasthoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "759--765", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Goldhahn, Thomas Eckart, and Uwe Quasthoff. 2012. Building large monolingual dictionaries at the leipzig corpora collection: From 100 to 200 lan- guages. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 759-765, Istanbul, Turkey. Euro- pean Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Levenshtein transformer", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11179--11189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Changhan Wang, and Junbo Zhao. 2019. Levenshtein transformer. In Advances in Neural In- formation Processing Systems, pages 11179-11189.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Matthijs Douze, H\u00e9rve J\u00e9gou, and Tomas Mikolov. 2016a. Fasttext.zip: Compressing text classification models", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1612.03651" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, H\u00e9rve J\u00e9gou, and Tomas Mikolov. 2016a. Fasttext.zip: Compressing text classification models. arXiv preprint arXiv:1612.03651.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bag of tricks for efficient text classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.01759" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2016b. Bag of tricks for efficient text classification. arXiv preprint arXiv:1607.01759.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Marian: Fast neural machine translation in C++", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Neckermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Seide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri Aji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Alham Fikri Aji, Nikolay Bogoychev, Andr\u00e9 F. T. Martins, and Alexandra Birch. 2018. Marian: Fast neural machine translation in C++. In Proceedings of ACL 2018, System Demonstrations, pages 116- 121, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Binary codes capable of correcting deletions, insertions, and reversals", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vladimir I Levenshtein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Soviet physics doklady", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "707--710", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir I Levenshtein. 1966. Binary codes capable of correcting deletions, insertions, and reversals. In Soviet physics doklady, volume 10, pages 707-710.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Simultaneous translation and paraphrase for language education", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Mayhew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klinton", |
|
"middle": [], |
|
"last": "Bicknell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brust", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Mcdowell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burr", |
|
"middle": [], |
|
"last": "Settles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Mayhew, Klinton Bicknell, Chris Brust, Bill McDowell, Will Monroe, and Burr Settles. 2020. Si- multaneous translation and paraphrase for language education. In Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). ACL.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Jparacrawl: A large scale web-based englishjapanese parallel corpus", |
|
"authors": [ |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Morishita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaaki", |
|
"middle": [], |
|
"last": "Nagata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Makoto Morishita, Jun Suzuki, and Masaaki Nagata. 2019. Jparacrawl: A large scale web-based english- japanese parallel corpus. CoRR, abs/1911.10668.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT 2019: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of NAACL-HLT 2019: Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "86--96", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Tokenizing, POS tagging, lemmatizing and parsing UD 2.0 with UDPipe", |
|
"authors": [ |
|
{ |
|
"first": "Milan", |
|
"middle": [], |
|
"last": "Straka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "Strakov\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--99", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K17-3009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milan Straka and Jana Strakov\u00e1. 2017. Tokenizing, POS tagging, lemmatizing and parsing UD 2.0 with UDPipe. In Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Univer- sal Dependencies, pages 88-99, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Parallel data, tools and interfaces in OPUS", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2214--2218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in OPUS. In Proceedings of the Eight In- ternational Conference on Language Resources and Evaluation (LREC'12), pages 2214-2218, Istanbul, Turkey. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "6000--6010", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30, pages 6000-6010, Long Beach, CA, USA. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.03771" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv preprint arXiv:1910.03771.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "A minimum spanning tree of the graph of Portuguese translations for \"The site is open.\". Levenshtein distance (the sum of delete and insert operations) is 2 for solid green lines and 6 for the dashed yellow line.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "An example of a step of the inference algorithm of the paraphrasing model. A translation is taken from the queue Q and expanded using LevT policies. Results are scored by the classifier C; the translations scoring above the threshold thr = 0.5 are accepted. The translations are grounded in the source sentence by the encoder output.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Precision, weighted recall and weighted F1 score for NMT decoding with different beam sizes and with and without beam filtering.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"text": "", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"text": "", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>: STAPLE training data statistics (target lan-</td></tr><tr><td>guage set, number of source sentences (prompts), num-</td></tr><tr><td>ber of paraphrases (translations), average number of</td></tr><tr><td>paraphrases per source).</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "50.65 50.66 46.25 +16.40 -13.58 4/7 51.04 45.77 43.49 +15.39 -11.91 +Para -49.73 50.57 45.75 +15.90 -14.08 -49.91 45.66 43.01 +14.91 -12.39 Japanese MT 5/10 39.02 19.36 21.50 +17.25 -10.09 5/8 36.70 19.93 21.28 +16.97 -10.49 +Para-38.27 19.32 21.34 +17.09 -10.25 -35.76 19.87 21.08 +16.77 -10.69", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>Target</td><td>System</td><td/><td>Development</td><td>Test</td></tr><tr><td/><td/><td>Rk Pre</td><td>W-R W-F1 \u2206base \u2206best</td><td>Rk Pre</td><td>W-R W-F1 \u2206base \u2206best</td></tr><tr><td colspan=\"4\">Hungarian 4/9 Korean MT MT 4/8 40.05 21.57 22.21 +16.76 -19.16 +Para -39.65 21.06 21.87 +16.42 -19.50</td><td>4/6 38.94 19.35 20.58 +15.71 -19.77 -38.42 18.92 20.29 +15.42 -20.06</td></tr><tr><td>Portuguese</td><td>MT +Para</td><td colspan=\"2\">8/14 49.75 46.78 42.74 +21.59 -13.00 -50.72 46.60 43.22 +22.07 -12.52</td><td>6/10 49.88 43.81 40.84 +19.54 -14.26 -50.94 43.44 41.18 +19.88 -13.92</td></tr><tr><td>Vietnamese</td><td>MT +Para</td><td colspan=\"2\">3/6 52.27 37.36 38.26 +11.47 -16.47 -53.17 36.39 37.68 +10.89 -17.05</td><td>3/5 51.59 36.84 37.71 +12.32 -17.85 -52.09 36.35 37.34 +11.95 -18.22</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |