|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:09:46.353699Z" |
|
}, |
|
"title": "Should we find another model?: Improving Neural Machine Translation Performance with ONE-Piece Tokenization Method without Model Modification", |
|
"authors": [ |
|
{ |
|
"first": "Chanjun", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"country": "South Korea" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sugyeong", |
|
"middle": [], |
|
"last": "Eo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"country": "South Korea" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hyeonseok", |
|
"middle": [], |
|
"last": "Moon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"country": "South Korea" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Heuiseok", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"country": "South Korea" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Most of the recent natural language processing (NLP) studies are based on the pretrainfinetuning approach (PFA). However for small and medium-sized industries with insufficient hardware, there are many limitations in servicing latest PFA based NLP application software, due to slow speed and insufficient memory. Since these approaches generally require large amounts of data, it is much more difficult to service with PFA especially for low-resource languages. We propose a new tokenization method, ONE-Piece, to address this limitation. ONE-Piece combines morphologically-aware subword tokenization and vocabulary communicating method, which has not been carefully considered before. Our proposed method can also be utilized without modifying the model structure. We experiment by applying ONE-Piece to Korean, a morphologically-rich and low-resource language. We revealed that ONE-Piece with vanilla transformer model can achieve comparable performance to the current Korean-English machine translation state-of-the-art model. Recent studies using pretrain-finetuning approach (PFA) technique have achieved state-of-the-art (SOTA) performance in many natural language processing (NLP) tasks and are becoming the latest trend (", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Most of the recent natural language processing (NLP) studies are based on the pretrainfinetuning approach (PFA). However for small and medium-sized industries with insufficient hardware, there are many limitations in servicing latest PFA based NLP application software, due to slow speed and insufficient memory. Since these approaches generally require large amounts of data, it is much more difficult to service with PFA especially for low-resource languages. We propose a new tokenization method, ONE-Piece, to address this limitation. ONE-Piece combines morphologically-aware subword tokenization and vocabulary communicating method, which has not been carefully considered before. Our proposed method can also be utilized without modifying the model structure. We experiment by applying ONE-Piece to Korean, a morphologically-rich and low-resource language. We revealed that ONE-Piece with vanilla transformer model can achieve comparable performance to the current Korean-English machine translation state-of-the-art model. Recent studies using pretrain-finetuning approach (PFA) technique have achieved state-of-the-art (SOTA) performance in many natural language processing (NLP) tasks and are becoming the latest trend (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The trend of model research based on PFA raises two problems. First, it is hard to expect a similar performance for the low-resource setting. This is because most studies based on the PFA technique rely on large amounts of data (Zoph et al., 2016) . But for low-resource languages, it is difficult to provide the comparable amount of data required by recent papers. Second, it is necessary to overturn the existing model and pre-train a new model from scratch to create a PFA-based model that follows the latest research trends.", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 247, |
|
"text": "(Zoph et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since the PFA-based model requires many parameters, companies without adequate server or graphic processing unit (GPU) environments may have many difficulties in configuring the service environment and utilizing the latest model (Park et al., 2020b) . Therefore, new approaches are required to ensure high performance for low-resource languages and companies lacking extensive server and GPU environments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 249, |
|
"text": "(Park et al., 2020b)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To solve this problem, many researches are being conducted on the way of improving the performance of NLP application software without changing the model through data pre and post-processing, typically in machine translation (Pal et al., 2016; Currey et al., 2017; Banerjee and Bhattacharyya, 2018; Koehn et al., 2018; Kudo, 2018; Park et al., 2020b) . Reflecting this trend, we conducted a study on an optimized tokenization that can improve the performance of neural machine translation (NMT) without changing the model. We propose two perspectives for optimized tokenization. First, we analyze the limitations of byte pair encoding (BPE) (Sennrich et al., 2015) and sentencepiece (Kudo and Richardson, 2018) , which can easily be applied to various languages. Due to its language-agnostic characteristic, these methods are currently used as the defaults in language model research and existing tokenization methods. However, there are 7,111 languages around the world. More than 50 million people speak 25 languages as their mother tongue that have various morphological characteristics such as isolating language, agglutinative language, and fusional language. Considering this, it seems hard to assert that applying sentencepiece and BPE always produce the best performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 243, |
|
"text": "(Pal et al., 2016;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 264, |
|
"text": "Currey et al., 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 298, |
|
"text": "Banerjee and Bhattacharyya, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 318, |
|
"text": "Koehn et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 330, |
|
"text": "Kudo, 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 350, |
|
"text": "Park et al., 2020b)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 664, |
|
"text": "(Sennrich et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 710, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Second, we focus on the problem that there is not enough discussion about the corpus used in tokenizer training. Several studies that applied BPE and sentencepiece use a merged bilingual corpus, that combines two language corpora into one, when training its tokenizer (Song et al., 2019; Liu et al., 2020) . However in these studies, merged bilingual corpus is utilized without sufficient comparative analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 287, |
|
"text": "(Song et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 305, |
|
"text": "Liu et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this study, tokenization methods which leveraging merged bilingual corpora and separate bilingual corpora are denoted as Vocabulary Communicating (VC) and Vocabulary Separating (VS), respectively. We denote VC and VS as vocabulary methods and compare the performance of each method in NMT. In other words, we further figure out the optimal tokenization method through comparative experiments on various tokenization methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "All the experiments are made on a Korean dataset, which is a representative of low-resource and morphologically rich language (MRL). In particular, we propose ONE-Piece that combines the VC method and morphological segmentation followed by sentencepiece. Through comparative experiments with tokenization methods currently used in NLP research, such as BPE and sentencepiece, we revealed that ONE-Piece can encourage the optimal performance in Korean-English machine translation. The contributions of our study are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We proposed a new subword tokenization method, ONE-Piece, which leveraging morphological segmentation and vocabulary communicating method. Through ONE-Piece, we can obtain better performance than the existing tokenization methods such as BPE and sentencepiece. \u2022 Based on linguistic analysis, we showed that constructing corpus for training tokenizer is an important factor that has a critical influence on machine translation performance. \u2022 We presented a new viewpoint for pre-processing that can improve translation performance without modifying model structure. Our proposal consid-ered industrial service and demonstrated high speed and performance without using PFA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This study proposes an optimal tokenization method for improving machine translation performance from the viewpoints of morphological segmentation and vocabulary method. We derive an optimal tokenization method for Korean-English machine translation by conducting a case study that combines the morphological segmentation and vocabulary method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Korean is classified as an agglutinative language according to its type of morphemes. Due to the nature of agglutinative languages, one word can comprises substantive (noun/pronoun/numeral) followed by postposition, or the stem followed by the ending. Table 1 shows the result of tokenizing Korean sentences through BPE (Sennrich et al., 2015) , sentencepiece (Kudo and Richardson, 2018) , and morphological segmentation using MeCab-ko.", |
|
"cite_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 343, |
|
"text": "(Sennrich et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 387, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 259, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Morphologically-Aware SentencePiece", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In the case of BPE and sentencepiece, the postpositions '\uac00 (ga), \ub294 (neun), \ub97c (leul), \uc758 (ui), \uc778 (in)' have not been properly separated from the substantives. This failures in separating the postpositions from the substantives can lead to mistranslation of entities and grammartically incorrect translation. Generally, the postposition indicates the grammatical relationship to the substantive and plays an important role in organizing the meaning of words. Therefore, miss-separating the postpositions can lead to the incorrect translation of the whole sentence, and misunderstanding of the semantic relationship.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morphologically-Aware SentencePiece", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Also, in the case of BPE and sentencepiece, the entities (red-common noun, blue-proper noun) are over-tokenized. Both methods tokenize sentences based on frequency and probability without considering linguistic characteristics. This can lead to inappropriate segmentation between substantives and postpositions, or between stems and endings. These problems can be alleviated by employing morphological segmentation. In this study, we quantitatively analyze the effect of morphological segmentation in NMT, and propose the optimal method of leveraging it by combining sentencepiece.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morphologically-Aware SentencePiece", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We use Konlpy (Park and Cho, 2014) for morphological segmentation of Korean sentences. Konlpy Target Sentence BPE sentencepiece MeCab-ko The number of diagnoses started to soar, just as Lorna and Judith predicted, indeed hoped, that it would \uc9c4\ub2e8/ \uc22b\uc790\ub294/ \uae09 \uc99d@@/\ud588\uace0/ \ub85c@@/ \ub098\uc640/ \uc8fc@@/\ub514@@/ \uc2a4\uac00/ \uc608\uc0c1@@/\ud588\uace0/ \uc9c4@@/\uc2e4\ub85c/ \uadf8\ub4e4\uc774/ \ubc14\ub7ac@@/\ub358/ \uac83\ucc98\ub7fc", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 34, |
|
"text": "(Park and Cho, 2014)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 129, |
|
"text": "Target Sentence BPE sentencepiece", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Why MeCab-ko?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "_\uc9c4\ub2e8/_\uc22b\uc790\ub294/_\uae09\uc99d/ \ud588\uace0/_\ub85c/\ub098\uc640/_\uc8fc/\ub514/ \uc2a4\uac00/_\uc608\uc0c1/\ud588\uace0/_\uc9c4/ \uc2e4\ub85c/_\uadf8\ub4e4\uc774/_\ubc14/\ub7ac/ \ub358/_\uac83\ucc98\ub7fc' \uc9c4\ub2e8/ \uc22b\uc790/\ub294/ \uae09\uc99d/ \ud588/\uace0/ \ub85c\ub098(NNP)/\uc640/ \uc8fc\ub514\uc2a4(NNP)/\uac00/ \uc608\uc0c1/ \ud588/\uace0/ \uc9c4\uc2e4\ub85c/ \uadf8/\ub4e4/ \uc774/ \ubc14\ub7ac/\ub358/ \uac83/\ucc98\ub7fc", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why MeCab-ko?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Instead of blaming parents for causing autism, Asperger framed it as a lifelong, polygenetic disability \uc790\ud3d0@@/\uc99d\uc744/ \ubd80\ubaa8\uc758/ \ud0d3@@/\uc73c\ub85c/ \ub3cc\ub9ac\ub294/ \ub300\uc2e0/ \uc544\uc2a4@@/\ud37c@@/ \uac70\ub294/ \uadf8\uac83\uc744/ \uc7a5\uae30 \uc801\uc778/ \ub2e4@@/\uae30@@/ \uc6d0\uc758/ \uc7a5\uc560@@/\ub85c is an open-source Korean morphological analyzer package which provides 6 morphological analyzers: MeCab-ko, Kkma, Komoran, Hannanum, Okt, and Twitter. In this study, we select an analyzer that shows the best performance among them by experimenting morphological analysis for up to 1 M characters. In particular, since inference speed is a very important factor in the industry field, we focused on the time required for morphological analysis. The inference time required for each analyzer is shown in Figure 1 . As shown in Figure 1 , MeCab-ko shows the best results compared to other morphological analyzers. It takes 0.3353 secs in processing 1 M characters. Additionally, through experiments on different number of characters, we can see that MeCab-ko conducts analysis of the input sequence at a stable speed despite the exponential increase in the number of characters. For these reasons, we adopt MeCab-ko by its high processing speed and stability in character length.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 683, |
|
"end": 691, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 714, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Why MeCab-ko?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "_\uc790\ud3d0/\uc99d\uc744/_\ubd80\ubaa8 \uc758/_\ud0d3/\uc73c\ub85c/_\ub3cc\ub9ac \ub294/_\ub300\uc2e0/_\uc544\uc2a4/\ud37c/ \uac70\ub294/_\uadf8\uac83\uc744/_\uc7a5\uae30\uc801 \uc778/_\ub2e4/\uae30/\uc6d0\uc758/_\uc7a5\uc560/ \ub85c \uc790\ud3d0\uc99d/\uc744/ \ubd80\ubaa8/\uc758/ \ud0d3/ \uc73c\ub85c/ \ub3cc\ub9ac/\ub294/ \ub300\uc2e0/ \uc544 \uc2a4\ud37c\uac70(NNP)/\ub294/ \uadf8\uac83/ \uc744/ \uc7a5\uae30/\uc801/\uc778/ \ub2e4/\uae30\uc6d0/ \uc758/ \uc7a5\uc560/\ub85c", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why MeCab-ko?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The VC method has been used in several PFAbased models. In MASS (Song et al., 2019) , a 60K vocabulary was extracted by composing the source and target language into a merged bilingual corpus. In mBART (Liu et al., 2020) , the CC25 corpus was composed of a total of 25 languages extracted from CommonCrawl (CC) (Lample and Conneau, 2019; Wenzek et al., 2019) and used for unified vocabulary extraction. When using the VC method in mBART, there is a generalization effect for unseen languages. However, this effect has not been sufficiently discussed for languages that do not share an alphabet, and no quantitative basis for a generalization effect has been proposed. In this study, we conducted probing for this approach through quantitative analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 83, |
|
"text": "(Song et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 220, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 337, |
|
"text": "(Lample and Conneau, 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 358, |
|
"text": "Wenzek et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vocabulary Communicating Method", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In practical cases, source and target languages often communicate to each other; source language is contained in target sentences, and vice versa. In the case of our training data, approximately 6.9% of source sentences contains English tokens. For instance, domain specific terms such as \"Host IP\" can not be replaced by Korean token and constitute Korean sentences in its original form.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vocabulary Communicating Method", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "For the case of VS method, each language only contributes to the processing of corresponding language corpus, and different tokenizers are applied to the source and target sentences. If a vocabulary is extracted according to the VS method, source language dictionary is composed by reflecting only small fraction of the target languages, which is con- Figure 2 : Overall Architecture of NMT training process using ONE-Piece model tained in source sentences. In this case, target language token, which is not contained in source language dictionary but contained in target language dictionary, is treated as unknown.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 360, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Vocabulary Communicating Method", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The VC method can alleviate this problem. As previously mentioned, the VC method construct a merged corpus and the vocabulary extracted from this merged corpus is identically applied to the source and target sentences. By using VC method, the source and target language can interact within the same vocabulary and are mutually referenceable. Therefore, the source and target language can interact within the same vocabulary and are mutually referenceable. This can lead to full understanding of target language tokens in source sentences and vice verssa.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vocabulary Communicating Method", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "ONE-Piece is a subword tokenization method that utilizes morphological analysis and the VC method. By applying morphological analysis, characteristics of an agglutinative language, that a single word can comprises multiple morphemes, can be considered. Then by following sentencepiece, applying VC method, can alleviate the out of vocabulary (OOV) problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The ONE-piece can be obtained by following processes. First, from a parallel corpus P , which is consist of source sentences S = {S i } N i=1 and target sentences T = {T i } N i=1 , merged corpus M is created. More specifically, this procedures can be described as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "S i = {s j i } n i j=1 T i = {t j i } m i j=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(1) s j i denote j th word of source sentence S i , which is segmented by whitespace, and n i indicate the word length of S i . Similarly, t j i denote j th word, and m i indicate the word length of target sentence T i , which is segmented by whitespace.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We apply morphological analyzer to agglutinative language. In this paper, source sentences is re-segmented by morpheme-units, through morphological analyer. This can be denoted as equation 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Seg i = M A(S i ) = {seg j i } k i j=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(2) M A indicates morphological analyzer for source language. By M A, morpheme-unit-segmented sentence Seg i is generated from source sentence S i . k i denotes morpheme-token length of Seg i . Since a word comprises one or more morphemes, k i is always equal to or greater than j i . Then by combining all the Seg i and T i into one, merged corpus M is generated as equation 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M = [T 1 , . . . , T N , Seg 1 , . . . , Seg N ]", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "M is composed of both source language and target language. As M is created, we can generate ONE-piece by training sentencepiece model by M . Figure 2 is an overall architecture that describes the process of training NMT model by leveraging ONE-Piece. For Korean sentences in the source part, morphological segmentation is performed with MeCab-ko, and English sentences corresponding to the target side are segmented by whitespace. After combining source sentences and target sentences, we train sentencepiece model by using them. In this process, ONE-Piece model is created. Through ONE-Piece, input sentences are segmented into subwords and fed into the encoder and decoder for training NMT model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 149, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ONE-Piece", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We utilized Korean-English parallel corpora from 3 different data sources for our dataset: the AI Hub Korean-English parallel corpus 1 , OpenSubtitles 2 , and the IWSLT-17 TED corpus (Cettolo et al., 2017) . We constructed 2.7 M sentence pairs from these data sources. For better NMT performance, we applied parallel corpus filtering to our corpus and construct 2.2 M sentence pairs for training. We applied the same filtering method as Park et al. (2020a) . We randomly selected 5,000 sentence pairs from our training data for validation and used IWSLT-16 and IWSLT-17 test sets, which is consist of 1,143 and 1,429 sentence pairs, for performance evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 205, |
|
"text": "(Cettolo et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 456, |
|
"text": "Park et al. (2020a)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Since our ultimate purpose is to check whether the performance of the NMT model can be improved only by the subword tokenization method without changing the model, we adopt vanilla transformer as our baseline. The performance evaluation of translation results was conducted based on the BLEU score (Papineni et al., 2002) . To measure the score, we adopted multi-bleu.perl script 3 in Moses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 321, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this section, we experimentally compare and verify the performance of Korean-English machine translation using VC and VS methods. By applying each method to BPE and sentencepiece, we investigate the impact of the vocabulary method in the performance of NMT. The experimental results are shown in Table 2 . In sentencepiece, the VC method outperforms the VS method by 1.34 BLEU score on the IWSLT-16 test set and 0.99 BLEU score on the IWSLT-17 test set. Conversely for BPE, the VS method outperforms the VC method by 2.78 BLEU score on the IWSLT-16 test set and 2.42 BLEU score on the IWSLT-17 test set. There are some cases where the VS method yields a more superior performance than the VC method, depending on the tokenization algorithm. In other words, the VC method does not show consistently superior performance to the VS method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 306, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Verification of the Effectiveness of the VC Method", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Currently, many studies have employed the VC method based tokenizer as a default choice, regardless of the tokenization algorithm. From this experiment, we revealed that the current default option may not be the optimal choice depending on the selection of the tokenization algorithm. We further show that selecting vocabulary method is an important factor that significantly affects machine translation performance. This indicates that the vocabulary method must be considered when adopting a tokenization algorithm to ensure the optimal machine translation performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Verification of the Effectiveness of the VC Method", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "In this section, we verify the impact of the morphological segmentation. We experimented two tokenization methods using MeCab-ko in Korean corpus. The first method is to segment by morpheme units, and the second method is to add sentencepiece after this process, as first suggested by Park et al. (2019) . Whereas Park et al. (2019) used VS method based tokenizers in all of their experiments, we utilized VS method based tokenizers for this experiment. Our results are shown in Table 3 . Applying sentencepiece after morphological segmentation demonstrates better performance in both the IWSLT-16 and IWSLT-17 test sets compared to the MeCab-ko based segmentation without sentencepiece. However, our results show that applying morphological segmentation for tokenizer training yields overall performance degradation in both test sets. This is contrary to the experimental results of Park et al. (2019) , which claim that morphological analysis consistently improves machine translation performance. The main difference between our experiment and Park et al. (2019) is the vocabulary method. From these results, we can infer that the effect of applying morphological segmentation on NMT is relatively different depending on the vocabulary method. This indicates that prior to applying morphological segmentation, the vocabulary method must be considered to get improved NMT performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 303, |
|
"text": "Park et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 332, |
|
"text": "Park et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 884, |
|
"end": 902, |
|
"text": "Park et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1047, |
|
"end": 1065, |
|
"text": "Park et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 479, |
|
"end": 486, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Verification of the Effectiveness of Morphological Segmentation", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "ONE-Piece differs from existing tokenizers in that it utilizes VC method and the morphological segmentation followed by sentencepiece. In this section, we verify the effectiveness of ONE-Piece by comparing NMT performance using various preprocessing strategies based on the VC method. The results are shown in Table 4 . Compared to the VC-based tokenizer, ONE-Piece produces at least 3.32 BLEU score superior translation performance. This result suggests that further improvement can be made by applying ONE-Piece to other existing sentencepiece-based NMT models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 317, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Verification of the ONE-Piece", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "In sections 3.2.1 and 3.2.2, we revealed that vocabulary method and morphological segmentation significantly affect the NMT performance, but neither of these consistently improve the NMT performance by themselves. However as shown in table 4, by properly combining these two factors, we can derive mutual supplementation effect which lead to a meaningful improvement in the translation performance. This can be viewed as the new criteria for constructing corpus for training tokenizer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Verification of the ONE-Piece", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "We compare the performance of vanilla transformer model applying ONE-Piece with the performance of mBART (Liu et al., 2020) . mBART was trained with 610 M params and 5.6 B tokens from the CC corpus. mBART utilized morpheme based segmentation using MeCab-Ko in the Korean corpus and applied sentencepiece in the English corpus, which is the same tokenization method as VS MeCab-ko in Table 3 . As shown in Table 5 , when the same tokenization method used in mBART was applied to the baseline model, the performance was 7.52 BLEU lower than that of mBART. However, by applying ONE-Piece to the baseline model, the performance difference narrowed to a 2.02 BLEU score. This shows that applying ONE-Piece enables the vanilla transformer model to have similar performance to the SOTA model. Although the baseline model using ONE-Piece did not exceed the performance of mBART, it is a notable result considering that the number of parameters required by the baseline model is 32 M, approximately 5% of the number of parameters compared to mBART.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 123, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 390, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 412, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison with Existing Studies", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "The significance of this experiment is that simply by changing the tokenization method, a model with a small number of parameters can achieve a similar performance to SOTA model, which is trained with a more advanced algorithm and larger number of parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with Existing Studies", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "In this study, we proposed a new tokenization method called ONE-Piece. This can provide the best performance in Korean-English machine translation compared with other tokenization methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our results quantitatively confirmed the effect of the vocabulary method and morphological segmentation on NMT performance. Furthermore, we experimentally proved that the VC method and morphological segmentation cannot consistently improve the performance of NMT by themselves. Our results showed that significant and consistent performance improvement can only be achieved in NMT if they are properly used together. By using ONE-Piece, the vanilla transformer model shows comparable translation performance to the mBART. Accordingly, we expect that companies that have difficulties using the latest PFA-based model, due to an inadequate server environment, will be able to utilize our proposed model to provide sufficiently good performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://aihub.or.kr 2 http://opus.nlpl.eu/ OpenSubtitles-v2018.php 3 https://github.com/moses-smt/ mosesdecoder/blob/master/scripts/ generic/multi-bleu.perl", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Meaningless yet meaningful: Morphology grounded subword-level nmt", |
|
"authors": [ |
|
{ |
|
"first": "Tamali", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Second Workshop on Subword/Character LEvel Models", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tamali Banerjee and Pushpak Bhattacharyya. 2018. Meaningless yet meaningful: Morphology grounded subword-level nmt. In Proceedings of the Sec- ond Workshop on Subword/Character LEvel Models, pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Language models are few-shot learners", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Tom B Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Ryder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Subbiah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Shyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Sastry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Askell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.14165" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Overview of the iwslt 2017 evaluation campaign", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niehues", |
|
"middle": [], |
|
"last": "Jan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "St\u00fcker", |
|
"middle": [], |
|
"last": "Sebastian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudoh", |
|
"middle": [], |
|
"last": "Katsuitho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshino", |
|
"middle": [], |
|
"last": "Koichiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federmann", |
|
"middle": [], |
|
"last": "Christian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "In International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mauro Cettolo, Marcello Federico, Luisa Bentivogli, Niehues Jan, St\u00fcker Sebastian, Sudoh Katsuitho, Yoshino Koichiro, and Federmann Christian. 2017. Overview of the iwslt 2017 evaluation campaign. In International Workshop on Spoken Language Trans- lation, pages 2-14.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Electra: Pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than genera- tors. arXiv preprint arXiv:2003.10555.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Copied monolingual data improves low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Currey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Valerio Miceli-Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Currey, Antonio Valerio Miceli-Barone, and Ken- neth Heafield. 2017. Copied monolingual data im- proves low-resource neural machine translation. In Proceedings of the Second Conference on Machine Translation, pages 148-156.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huda", |
|
"middle": [], |
|
"last": "Khayrallah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikel", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Forcada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "726--739", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Huda Khayrallah, Kenneth Heafield, and Mikel L Forcada. 2018. Findings of the wmt 2018 shared task on parallel corpus filtering. In Pro- ceedings of the Third Conference on Machine Trans- lation: Shared Task Papers, pages 726-739.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.10959" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple sub- word candidates. arXiv preprint arXiv:1804.10959.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.06226" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.07291" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. arXiv preprint arXiv:1901.07291.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Multilingual denoising pre-training for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.08210" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. arXiv preprint arXiv:2001.08210.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A neural network based approach to automatic post-editing", |
|
"authors": [ |
|
{ |
|
"first": "Santanu", |
|
"middle": [], |
|
"last": "Pal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihaela", |
|
"middle": [], |
|
"last": "Sudip Kumar Naskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Vela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "281--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Santanu Pal, Sudip Kumar Naskar, Mihaela Vela, and Josef van Genabith. 2016. A neural network based approach to automatic post-editing. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 281-286.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting of the Association for Compu- tational Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Parallel corpus filtering and korean optimized subword tokenization for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chanjun", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gyeongmin", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heuiseok", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The 31st Annual Conference on Human Cognitive Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "221--224", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chanjun Park, Gyeongmin Kim, and HeuiSeok Lim. 2019. Parallel corpus filtering and korean optimized subword tokenization for machine translation. In The 31st Annual Conference on Human Cognitive Language Technology, pages 221-224.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Quality, not quantity? : Effect of parallel corpus quantity and quality on neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chanjun", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeonsu", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chanhee", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heuiseok", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The 32st Annual Conference on Human Cognitive Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--368", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chanjun Park, Yeonsu Lee, Chanhee Lee, and Heuiseok Lim. 2020a. Quality, not quantity? : Ef- fect of parallel corpus quantity and quality on neural machine translation. In The 32st Annual Conference on Human Cognitive Language Technology, pages 363-368.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Decoding strategies for improving low-resource machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chanjun", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeongwook", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kinam", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heuiseok", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Electronics", |
|
"volume": "9", |
|
"issue": "10", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chanjun Park, Yeongwook Yang, Kinam Park, and Heuiseok Lim. 2020b. Decoding strategies for im- proving low-resource machine translation. Electron- ics, 9(10):1562.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Konlpy: Korean natural language processing in python", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Eunjeong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungzoon", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 26th Annual Conference on Human Cognitive Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunjeong L. Park and Sungzoon Cho. 2014. Konlpy: Korean natural language processing in python. In Proceedings of the 26th Annual Conference on Hu- man Cognitive Language Technology, Chuncheon, Korea.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter J", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.10683" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text trans- former. arXiv preprint arXiv:1910.10683.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.07909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Mass: Masked sequence to sequence pre-training for language generation", |
|
"authors": [ |
|
{ |
|
"first": "Kaitao", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1905.02450" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie- Yan Liu. 2019. Mass: Masked sequence to sequence pre-training for language generation. arXiv preprint arXiv:1905.02450.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Ccnet: Extracting high quality monolingual datasets from web crawl data", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Anne", |
|
"middle": [], |
|
"last": "Lachaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.00359" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Wenzek, Marie-Anne Lachaux, Alexis Con- neau, Vishrav Chaudhary, Francisco Guzm\u00e1n, Ar- mand Joulin, and Edouard Grave. 2019. Ccnet: Ex- tracting high quality monolingual datasets from web crawl data. arXiv preprint arXiv:1911.00359.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5753-5763.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Transfer learning for lowresource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.02201" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low- resource neural machine translation. arXiv preprint arXiv:1604.02201.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Inference time of morphological analyzer" |
|
}, |
|
"TABREF0": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparison of BPE, sentencepiece and MeCab-ko segmentation results." |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Korean-English NMT results applying different vocabulary method in BPE and sentencepiece. SP refers to sentencepiece." |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Korean-English NMT results using MeCab-ko. All experiments are implemented using the VS method. sentencepiece is denoted as SP." |
|
}, |
|
"TABREF6": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Korean-English NMT results of different tokenization algorithms. All the experiments are implemented using the VC method." |
|
}, |
|
"TABREF8": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparison of proposed ONE-Piece model with mBART." |
|
} |
|
} |
|
} |
|
} |