{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T03:12:24.931352Z" }, "title": "Geographical Distance Is The New Hyperparameter: A Case Study Of Finding The Optimal Pre-trained Language For English-isiZulu Machine Translation", "authors": [ { "first": "Muhammad", "middle": [ "Umair" ], "last": "Nasir", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of the Witwatersrand", "location": { "country": "South Africa" } }, "email": "" }, { "first": "Amos", "middle": [], "last": "Mchechesi", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of the Witwatersrand", "location": { "country": "South Africa" } }, "email": "" }, { "first": "Ominor", "middle": [], "last": "Ai", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of the Witwatersrand", "location": { "country": "South Africa" } }, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Stemming from the limited availability of datasets and textual resources for low-resource languages such as isiZulu, there is a significant need to be able to harness knowledge from pre-trained models to improve low resource machine translation. Moreover, a lack of techniques to handle the complexities of morphologically rich languages has compounded the unequal development of translation models, with many widely spoken African languages being left behind. This study explores the potential benefits of transfer learning in an English-isiZulu translation framework. The results indicate the value of transfer learning from closely related languages to enhance the performance of low-resource translation models, thus providing a key strategy for low-resource translation going forward. We gathered results from 8 different language corpora, including one multilingual corpus, and saw that isiXhosa-isiZulu outperformed all languages, with a BLEU score of 8.56 on the test set which was better from the multilingual corpora pre-trained model by 2.73. We also derived a new coefficient, Nasir's Geographical Distance Coefficient (NGDC) which provides an easy selection of languages for the pre-trained models. NGDC also indicated that isiXhosa should be selected as the language for the pre-trained model.", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [ { "text": "Stemming from the limited availability of datasets and textual resources for low-resource languages such as isiZulu, there is a significant need to be able to harness knowledge from pre-trained models to improve low resource machine translation. Moreover, a lack of techniques to handle the complexities of morphologically rich languages has compounded the unequal development of translation models, with many widely spoken African languages being left behind. This study explores the potential benefits of transfer learning in an English-isiZulu translation framework. The results indicate the value of transfer learning from closely related languages to enhance the performance of low-resource translation models, thus providing a key strategy for low-resource translation going forward. We gathered results from 8 different language corpora, including one multilingual corpus, and saw that isiXhosa-isiZulu outperformed all languages, with a BLEU score of 8.56 on the test set which was better from the multilingual corpora pre-trained model by 2.73. We also derived a new coefficient, Nasir's Geographical Distance Coefficient (NGDC) which provides an easy selection of languages for the pre-trained models. NGDC also indicated that isiXhosa should be selected as the language for the pre-trained model.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Neural machine translation aims to automate the translation of text or speech from one language to another utilising neural networks (Nyoni and Bassett, 2021) . Consequently, the performance of neural machine translation (NMT) models is highly dependent on the availability of large parallel corpora to provide sufficient training data. Low-resource languages which are under-represented in internet sources lack suitable training corpora and therefore suffer from limited development, obtaining poor translation performance. This phenomenon is exacerbated by a lack of content creators, dataset curators and language specialists, resulting in barriers at many stages in the translation process (Lakew et al., 2020; Zoph et al., 2016; Sennrich and Zhang, 2019) .", "cite_spans": [ { "start": 133, "end": 158, "text": "(Nyoni and Bassett, 2021)", "ref_id": "BIBREF22" }, { "start": 695, "end": 715, "text": "(Lakew et al., 2020;", "ref_id": "BIBREF13" }, { "start": 716, "end": 734, "text": "Zoph et al., 2016;", "ref_id": "BIBREF29" }, { "start": 735, "end": 760, "text": "Sennrich and Zhang, 2019)", "ref_id": "BIBREF27" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Therefore, due to the historical focus on dominant languages such as English in the development of neural machine translation (NMT) models, lowresource and morphologically complex languages remain a challenge for current translation systems (Haddow et al., 2021; Koehn and Knowles, 2017) . Due to limited resources in terms of both computational expense and available datasets, it is vital to be able to leverage knowledge from current pretrained models to provide more effective solutions. Therefore, in this investigation, the effects of transfer learning from closely related languages, as well as comparison with high-resourced languages for pre-trained scenario, is explored in the context of English to Zulu translation.", "cite_spans": [ { "start": 241, "end": 262, "text": "(Haddow et al., 2021;", "ref_id": "BIBREF7" }, { "start": 263, "end": 287, "text": "Koehn and Knowles, 2017)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Furthermore, this study derives the Nasir's Geographical Distance coefficient. Geographical Distance (GD) (Holman et al., 2007) has been studied for various scientific research areas (Bei et al., 2021; Krajsa and Fojtova, 2011; Riginos and Nachman, 2001 ) as it provides deep insights in many aspects. We will also use GD as a hyperparameter for an attempt to get a language for a pre-trained model in an effective and with a O(n) complexity. Although there are many ways to find GD, we will use literal approximation of distance in kilometers and suggest the techniques in future directions.", "cite_spans": [ { "start": 106, "end": 127, "text": "(Holman et al., 2007)", "ref_id": "BIBREF8" }, { "start": 183, "end": 201, "text": "(Bei et al., 2021;", "ref_id": "BIBREF1" }, { "start": 202, "end": 227, "text": "Krajsa and Fojtova, 2011;", "ref_id": "BIBREF12" }, { "start": 228, "end": 253, "text": "Riginos and Nachman, 2001", "ref_id": "BIBREF25" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Previous studies have indicated poor translation performance for the isiZulu languages due to its morphological complexity and limited available data (Martinus and Abbott, 2019) . The challenging nature of English-isiZulu translation is highlighted in a benchmark of five low-resource African languages by Martinus and Abbott (2019) , where isiZulu obtains a much poorer BLEU score in comparison to other evaluated languages. The study suggests that the collection of higher quality datasets for isiZulu would greatly benefit translation performance.", "cite_spans": [ { "start": 150, "end": 177, "text": "(Martinus and Abbott, 2019)", "ref_id": "BIBREF17" }, { "start": 306, "end": 332, "text": "Martinus and Abbott (2019)", "ref_id": "BIBREF17" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "Furthermore, the challenges associated with the morphological complexity of Nguni languages such as isiZulu are tackled in a study by Moeng et al. (2021) . The investigation explores the use of supervised sequence-to-sequence models to tokenize isiZulu, isiXhosa, isiNdebele and siSwati sentences, demonstrating promising results for improved segmentation of morphologically complex Nguni languages.", "cite_spans": [ { "start": 134, "end": 153, "text": "Moeng et al. (2021)", "ref_id": "BIBREF18" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "A notable study by Nyoni and Bassett (2021) compares the use of zero-shot learning, transfer learning and multi-lingual learning on three Bantu languages, namely isiZulu, isiXhosa and chiShona. The results indicate that multi-lingual learning where a many-to-many model was trained using three different language pairs, English-isiZulu, English-isiXhosa and isiXhosa-isiZulu led to optimal results on their custom dataset.", "cite_spans": [ { "start": 19, "end": 43, "text": "Nyoni and Bassett (2021)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "In addition, the study found that transfer learning from a closely related Bantu language is highly effective for low resource translation models, with statistically significant results being obtained when transfer learning to isiZulu using the pretrained English-to-isiXhosa model (Nyoni and Bassett, 2021) . In contrast, transfer learning from the English-to-Shona model did not yield any statistically significant improvement, indicating the role of morphological similarity in the transfer learning process.", "cite_spans": [ { "start": 282, "end": 307, "text": "(Nyoni and Bassett, 2021)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "There has been a lot of work in providing assistance to low-resourced languages for machine translation focus of the area. Neubig and Hu (2018) trained multilingual models as seed models and then continued training on low-resourced language. Sennrich et al. (2015) looks into training monolingual data with automatic back-translation (Edunov et al., 2018; Caswell et al., 2019; Edunov et al., 2019) to improve scores through only a monolingual data. Another work that utilizes backtranslation for effecctive NMT training is done by Dou et al. (2020) . Koneru et al. (2022) proposes a cost-effective training procedure to increase the performance of models on NMT tasks, utilizing a small number of annotated sentences and dictionary entries. Park et al. (2020) looked into decoding strategies for low-resourced languages in an attempt to improve training. Nguyen and Chiang (2017) looked into related languages to a target language for low-resourced languages to prove effectiveness of similar languages.", "cite_spans": [ { "start": 123, "end": 143, "text": "Neubig and Hu (2018)", "ref_id": "BIBREF19" }, { "start": 242, "end": 264, "text": "Sennrich et al. (2015)", "ref_id": "BIBREF26" }, { "start": 334, "end": 355, "text": "(Edunov et al., 2018;", "ref_id": "BIBREF5" }, { "start": 356, "end": 377, "text": "Caswell et al., 2019;", "ref_id": null }, { "start": 378, "end": 398, "text": "Edunov et al., 2019)", "ref_id": "BIBREF6" }, { "start": 532, "end": 549, "text": "Dou et al. (2020)", "ref_id": "BIBREF4" }, { "start": 552, "end": 572, "text": "Koneru et al. (2022)", "ref_id": "BIBREF11" }, { "start": 742, "end": 760, "text": "Park et al. (2020)", "ref_id": "BIBREF24" } ], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "Similarly, this study aims to investigate whether transfer learning from a morphologically similar language will be effective on the novel, highquality Umsuka English-isiZulu parallel corpus and if so, how does it perform when we use highresourced mono-and multi-lingual corpora. This study will also derive a formula which will ease the way for selecting a language for a pre-trained model.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Background", "sec_num": "1.1" }, { "text": "This investigation evaluates several models pretrained on different language pairs, both low-and high-resourced, on a recently release English-Zulu parallel corpus. The dataset utilized to fine-tune and benchmark the models is discussed below.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Methodology", "sec_num": "2" }, { "text": "The Umsuka English-isiZulu Parallel Corpus (Mabuya et al., 2021) provides a novel, high-quality parallel dataset for machine translation, containing English sentences sampled from both News Crawl datasets which were then translated into isiZulu, and isiZulu sentences from the NCHLT monolingual corpus and UKZN isiZulu National monolingual corpus, which were then translated into English. Each translation was performed twice, by two differing translators, due to the high morphological complexity of the isiZulu language. This also serves the purpose of considering one translation as a reference and the other as target. This can be validated as both have been translated by human annotators and are different from each other. The dataset is publicly available from the Zenodo platform 1 .", "cite_spans": [ { "start": 43, "end": 64, "text": "(Mabuya et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Dataset", "sec_num": "2.1" }, { "text": "The three models tested are based on the MarianMT model (Junczys-Dowmunt et al., 2018) which is constructed using a Transformer architecture. Each model is pretrained on a different set of language pairs from the Helsinki Corpus.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "MarianMT (Junczys-Dowmunt et al., 2018) is a toolkit for neural machine translation written in C++ with over 1000 models trained on different language pairs from OPUS 2 , available at the Hug-gingFace Model Hub 3 . Each model is based on a Transformer encoder-decoder structure with 6 layers in each component (Junczys-Dowmunt et al., 2018) . From the available models, 8 pre-trained models were selected 4 , representing pre-training on a closely related language, pre-training on a more distantly related language within the same family and pre-training on multiple unrelated languages, with less and more data, respectively. Since each model was based on the same architecture, this allowed for a controlled comparison of the language pairs used for pre-training, as any discrepancies due to architectural differences were discounted.", "cite_spans": [ { "start": 310, "end": 340, "text": "(Junczys-Dowmunt et al., 2018)", "ref_id": "BIBREF9" } ], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "Since isiXhosa and isiZulu are both part of the Nguni branch of Bantu languages, isiXhosa is closely related to isiZulu in the Bantu language family tree (Nyoni and Bassett, 2021) . As well as Shona, or chiShona, is selected as it is also a part of Southern Bantu language group (Nyoni and Bassett, 2021) . Another Bantu language, Kiswahili was explored to determine the effects of transfer learning from another language within the Bantu family which is not as closely related to the target isiZulu language. While isiZulu is classified as a Southern Bantu and Nguni language, Kiswahili is part of the Northeast Bantu and Sabaki languages (Nurse et al., 1993) .", "cite_spans": [ { "start": 154, "end": 179, "text": "(Nyoni and Bassett, 2021)", "ref_id": "BIBREF22" }, { "start": 279, "end": 304, "text": "(Nyoni and Bassett, 2021)", "ref_id": "BIBREF22" }, { "start": 640, "end": 660, "text": "(Nurse et al., 1993)", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "Twi, or Akan-kasa, is spoken in Ghana, has been selected to have a representation from Western Africa and to explore the effects a dialect of the Akan language on fine-tuning isiZulu. Luganda is selected as a representation from Niger-Congo family of languages and is spoken in East-African Country of Uganda. This will able us to explore the fine-tuning regime in Niger-Congo languages.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "Arabic and French are selected as they are morphologically very different and are considered to be high-resourced (Ali et al., 2014; Besacier et al., 2014) . We explore effects of fine-tuning highresourced languages with different morphologies. As the notion of having more and multi-lingual data will be better for fine-tuning, we select a corpus of Romance languages, which is created by joining 48 Romance languages including French, Italian, Spanish, Walloon, Catalan, Occitan, Romansh etc. We include Romance languages so that we can cover the aspect of big multi-lingual corpora being fine-tuned on low-resourced isiZulu and to prove our hypothesis.", "cite_spans": [ { "start": 114, "end": 132, "text": "(Ali et al., 2014;", "ref_id": "BIBREF0" }, { "start": 133, "end": 155, "text": "Besacier et al., 2014)", "ref_id": "BIBREF2" } ], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "3 https://huggingface.co/ 4 https://github.com/umair-nasir14/NGDC", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Models", "sec_num": "2.2" }, { "text": "We believe all experiments must be Reproducible. To achieve this we are open-sourcing our code on GitHub (added in the footnote previously).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Implementation Reproducibility", "sec_num": "2.3" }, { "text": "Each model was benchmarked on the test set using the BLEU (Papineni et al., 2002) score as tabulated in Table 1 below. It can be observed that the optimal model is given by the MarianMT model pre-trained on the English-Xhosa dataset. This confirms our hypothesis that transfer learning from a geographically distant language would result in poor performance. Here GD is in Kilometers (Km) and corpus size is in Number Of Sentences in millions (M).", "cite_spans": [ { "start": 58, "end": 81, "text": "(Papineni et al., 2002)", "ref_id": "BIBREF23" } ], "ref_spans": [ { "start": 104, "end": 111, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Results", "sec_num": "3" }, { "text": "In Fig. 1 below, we can observe that the Mar-ianMT model pre-trained on the English-Xhosa dataset outperforms all other models by a good margin, obtaining a final BLEU score of 8.56. This result suggests that the morphological similarities between the isiZulu and isiXhosa languages plays a strong role in the benefits attained through finetuning.", "cite_spans": [], "ref_spans": [ { "start": 3, "end": 9, "text": "Fig. 1", "ref_id": null } ], "eq_spans": [], "section": "Results", "sec_num": "3" }, { "text": "Following identification of the optimal model, the MarianMT model pre-trained on the En-Xh dataset was further fine-tuned for 75 epochs on Umsuka dataset, giving a final optimal BLEU score of 17.61 on training set and 13.73 on test.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Results", "sec_num": "3" }, { "text": "We now present an analysis of the results in light of both the underlying theory and previous literature. In order to further understand the effects of pre-training on different languages, the datasets used for pre-training of the MarianMT models were inspected. Notably, although the number of sentences in English-Xhosa dataset is in order of magnitudes less than Romance languages corpus but still performs better. This justifies our hypothesis and opens up a path to effective fine-tuning through the knowladge of morphologies and not by adding multiple languages into a single corpus. Arabic and French having approximately 5 and 23 times more data also suggests the above mentioned hypothesis that with closer GD and lesser data is much better, in many ways, than larger data and farther GD.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "4" }, { "text": "Other Bantu languages that were selected, Kiswahili and chiShona performed almost similar to Arabic and French with order of magnitudes of lesser data which suggests that even if they are Table 1 : BLEU scores, GD and corpora size Figure 1 : BLEU scores per epoch according to different pre-training languages, indicates high performance of morphologically similar isiXhosa, which outperforms a model trained on a very large corpora and rest of corpora.", "cite_spans": [], "ref_spans": [ { "start": 188, "end": 195, "text": "Table 1", "ref_id": null }, { "start": 231, "end": 239, "text": "Figure 1", "ref_id": null } ], "eq_spans": [], "section": "Analysis", "sec_num": "4" }, { "text": "not as similar to isiZulu, the distance being very close to where isiZulu is spoken tends to have a great impact. We speak similar languages in neighbouring cities and countries which should have an effect on the model and so the result suggests. Twi and Luganda, having very less data and higher GD, gives us very poor results.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "4" }, { "text": "From Table 1 , we also observe that distance between the target language and the language from a pre-trained model is a very important factor. Alone, to a good extent it can serve the purpose of choosing the language of pre-trained model but we want to look one step deeper as one can argue that Romance languages corpora, French and Arabic perform relatively better but the distances are larger. Thus we also look into Size of Corpus (Table 1) . Which forces us to think about deriving a relation-ship that involves both distance and the size. This will be explained in the upcoming sub-section.", "cite_spans": [], "ref_spans": [ { "start": 5, "end": 12, "text": "Table 1", "ref_id": null }, { "start": 435, "end": 444, "text": "(Table 1)", "ref_id": null } ], "eq_spans": [], "section": "Analysis", "sec_num": "4" }, { "text": "In Figure 2 we can observe that there is a sensible relationship between BLEU scores and distance, and as a rule of thumb there should always be a relationship with corpus size (Lin et al., 2019) . With further analysis we can deduce that neither distance alone nor corpus size alone can be taken for granted when selecting a language for pre-trained model. Thus, we derive a formula which takes into account both distance and corpus size in account. This formula is intended to be used before training to know which language corpora to select. ", "cite_spans": [ { "start": 177, "end": 195, "text": "(Lin et al., 2019)", "ref_id": "BIBREF15" } ], "ref_spans": [ { "start": 3, "end": 11, "text": "Figure 2", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Nasir's Geographical Distance Coefficient", "sec_num": "4.1" }, { "text": "z = cD (1 \u2212 c)S \u03b4 = 1, if D \u2265 D max exp ( z) 1+exp ( z)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Nasir's Geographical Distance Coefficient", "sec_num": "4.1" }, { "text": ", otherwise where D is the distance between language to fine-tune and language of the pre-trained model, S is the size of corpus, c is the weight coefficient, set to 0.4, which could act as hyperparameter. D max is also hyperparameter to be tuned when it is being used in different languages in different parts of the world. \u03b4 is the coefficient we are introducing, Nasir's Geographical Distance Coefficient (NGDC). The goal here is to minimize NGDC. Table 2 , Figures 3 and 4 shows the results and effectiveness of our introduced NPC. We can observe that without imposing penalty we have Romance languages, Arabic and French as desired pre-trained model languages along with isiXhosa and Kiswahili, which makes absolute sense as some have more data and others are near to target language but we want to have morphologically closer languages which will get better results. It would also be better if lesser carbon footprint is left and lesser training resources are used. Thus, with the penalty we only get isiXhosa and Kiswahili as desired ones, which will eventually be better in all perspectives.", "cite_spans": [], "ref_spans": [ { "start": 451, "end": 458, "text": "Table 2", "ref_id": "TABREF2" }, { "start": 461, "end": 476, "text": "Figures 3 and 4", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Nasir's Geographical Distance Coefficient", "sec_num": "4.1" }, { "text": "The potential impacts of this investigation can be explored in light of the possible contributions, risks and societal impact.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Impact Statement", "sec_num": "5" }, { "text": "The study poses potential benefits to further research into low-resource languages as it motivates careful choice of the pre-trained model used for transfer learning in order to improve performance on low resource languages. This could provide a vital tool to improve the efficiency and performance of low resource translation pipelines, especially in resource-constrained environments. In addition, this principle could be applied more broadly to other language groups with morphologically similar languages.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Applications and Benefits", "sec_num": "5.1" }, { "text": "Moreover, effective transfer learning provides the additional advantage of promoting decreased computational expense since prior knowledge from previously trained networks can be leveraged effectively. This could work to mitigate the substantial detrimental environmental impact stemming from the intensive GPU training required to train neural machine translation models. This is critical to ensure sustainable development of machine translation models by minimising resource waste.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Applications and Benefits", "sec_num": "5.1" }, { "text": "It should be noted that any conclusions drawn from the study are based on the BLEU score as the sole whether the meaning of a sentence has been captured. A further improvement could be to conduct a similar study with additional expertise from a linguistic specialist to verify whether the output of the translation models is valid.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Limitations and Drawbacks", "sec_num": "5.2" }, { "text": "Societal impacts of low resource neural machine translation include furthering accessibility of information to under-represented languages and working to close the digital divide between highresource and low-resource languages. Machine translation is an essential component of applications ranging from voice-assisted smart-phone applications that provide healthcare to rural communities to ensuring multi-lingual access to educational materials. Therefore it is vital that machine translation technology is accessible and functional for low-resource languages to be able to build valuable tools which could have a beneficial societal impact.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Social Impact", "sec_num": "5.3" }, { "text": "English-isiZulu translation has historically obtained poor results on translation benchmarks due to a lack of high-quality training data and appropriate tokenization schemes able to handle the agglutinative structure of isiZulu sentences. In this investigation, the challenges of isiZulu translation in terms of both morphological complexity and a lack of textual resources are explored using the recently released Umsuka English-isiZulu Parallel Corpus. In order to investigate the effects of the impact of the pre-trained model selected for transfer learning, several models were fine-tuned and benchmarked on the Umsuka dataset.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and Future Directions", "sec_num": "6" }, { "text": "MariantMT models pre-trained on English-Xhosa, English-Swahili, English-Shona, English-Twi, English-Luganda, English-Arabic, English-French and English-Multilingual Romance languages, respectively. The study found that the pretrained English-Xhosa model attained the optimal results with a handsome margin. Thus, the results indicate that transfer learning is particularly effective when languages are within the same sub-family while transfer learning is less effective when the model is pre-trained on a more distantly related language, no matter the size of the data to an extreme extent. We have also introduced a novel Nasir's Geographical Distance Coefficient which will help researchers find a language for pre-trained model effectively and will result in using less resources.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and Future Directions", "sec_num": "6" }, { "text": "Therefore, this study motivates careful choice of the pre-trained model used for transfer learning, utilising existing knowledge of language family trees, to promote improved performance of low resource translation. In addition, we have opensourced 5 our best model which was fine-tuned for 75 epochs using the original MarianMT model pretrained on the English-Xhosa language pair, obtaining a final BLEU score of 17.61 on train while 13.73 on test set. We have also gathered all model cards for the models that were used for further experimentation.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and Future Directions", "sec_num": "6" }, { "text": "This study yeilds promising future directions as the experiment was done on only 8 corpora. We suggest to increase the number and observe the derivation of the result. We also suggest to combine Bantu language as one multi-lingual corpora and observe the result. The experiment has been done on a novel Umsuka parallel corpora, the study should extend to more common benchmarks. This study should extend to different low-resourced languages of different continents of our world. We have derived a formula that takes into the account just the distance and the size of corpora, a promising research would be to derive a formula that takes morphologies and/or phonologies and fives a distance based on that. With NGDC at hand, it motivates to create a framework where one enters a target language, a D max and a value for weight coefficient c and gets desirable models to train on. There are many precise ways of finding GD, such as Lambert's formula (Lambert, 1942) and Vincenty's formula (Vincenty, 1975) which may enhance NGDC's performance. It also opens up ways to introduce morphology in the formula, which we expect it to improve the overall selection of the models.", "cite_spans": [ { "start": 948, "end": 963, "text": "(Lambert, 1942)", "ref_id": "BIBREF14" }, { "start": 987, "end": 1003, "text": "(Vincenty, 1975)", "ref_id": "BIBREF28" } ], "ref_spans": [], "eq_spans": [], "section": "Conclusion and Future Directions", "sec_num": "6" }, { "text": "https://zenodo.org/record/5035171# .YZvn1fFBy3J2 https://opus.nlpl.eu/", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://huggingface.co/MUNasir/ umsuka-en-zu", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Advances in dialectal arabic speech recognition: A study using twitter to improve egyptian asr", "authors": [ { "first": "Ahmed", "middle": [], "last": "Ali", "suffix": "" }, { "first": "Hamdy", "middle": [], "last": "Mubarak", "suffix": "" }, { "first": "Stephan", "middle": [], "last": "Vogel", "suffix": "" } ], "year": 2014, "venue": "Proceedings of the 11th International Workshop on Spoken Language Translation: Papers", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ahmed Ali, Hamdy Mubarak, and Stephan Vogel. 2014. Advances in dialectal arabic speech recognition: A study using twitter to improve egyptian asr. In Pro- ceedings of the 11th International Workshop on Spo- ken Language Translation: Papers.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Motivations and willingness to provide care from a geographical distance, and the impact of distance care on caregivers' mental and physical health: A mixed-method systematic review protocol", "authors": [ { "first": "Eva", "middle": [], "last": "Bei", "suffix": "" }, { "first": "Miko\u0142aj", "middle": [], "last": "Zarzycki", "suffix": "" }, { "first": "Val", "middle": [], "last": "Morrison", "suffix": "" }, { "first": "Noa", "middle": [], "last": "Vilchinsky", "suffix": "" } ], "year": 2021, "venue": "BMJ open", "volume": "11", "issue": "7", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Eva Bei, Miko\u0142aj Zarzycki, Val Morrison, and Noa Vilchinsky. 2021. Motivations and willingness to provide care from a geographical distance, and the impact of distance care on caregivers' mental and physical health: A mixed-method systematic review protocol. BMJ open, 11(7):e045660.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Introduction to the special issue on processing under-resourced languages", "authors": [ { "first": "Laurent", "middle": [], "last": "Besacier", "suffix": "" }, { "first": "Etienne", "middle": [], "last": "Barnard", "suffix": "" }, { "first": "Alexey", "middle": [], "last": "Karpov", "suffix": "" }, { "first": "Tanja", "middle": [], "last": "Schultz", "suffix": "" } ], "year": 2014, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Laurent Besacier, Etienne Barnard, Alexey Karpov, and Tanja Schultz. 2014. Introduction to the special issue on processing under-resourced languages.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Dynamic data selection and weighting for iterative back-translation", "authors": [ { "first": "Zi-Yi", "middle": [], "last": "Dou", "suffix": "" }, { "first": "Antonios", "middle": [], "last": "Anastasopoulos", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" } ], "year": 2020, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2004.03672" ] }, "num": null, "urls": [], "raw_text": "Zi-Yi Dou, Antonios Anastasopoulos, and Graham Neubig. 2020. Dynamic data selection and weight- ing for iterative back-translation. arXiv preprint arXiv:2004.03672.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Understanding back-translation at scale", "authors": [ { "first": "Sergey", "middle": [], "last": "Edunov", "suffix": "" }, { "first": "Myle", "middle": [], "last": "Ott", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Auli", "suffix": "" }, { "first": "David", "middle": [], "last": "Grangier", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1808.09381" ] }, "num": null, "urls": [], "raw_text": "Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back-translation at scale. arXiv preprint arXiv:1808.09381.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "On the evaluation of machine translation systems trained with back-translation", "authors": [ { "first": "Sergey", "middle": [], "last": "Edunov", "suffix": "" }, { "first": "Myle", "middle": [], "last": "Ott", "suffix": "" }, { "first": "Marc'aurelio", "middle": [], "last": "Ranzato", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Auli", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1908.05204" ] }, "num": null, "urls": [], "raw_text": "Sergey Edunov, Myle Ott, Marc'Aurelio Ranzato, and Michael Auli. 2019. On the evaluation of machine translation systems trained with back-translation. arXiv preprint arXiv:1908.05204.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Survey of low-resource machine translation", "authors": [ { "first": "Barry", "middle": [], "last": "Haddow", "suffix": "" }, { "first": "Rachel", "middle": [], "last": "Bawden", "suffix": "" }, { "first": "Antonio", "middle": [], "last": "Valerio Miceli", "suffix": "" }, { "first": "Jind\u0159ich", "middle": [], "last": "Barone", "suffix": "" }, { "first": "Alexandra", "middle": [], "last": "Helcl", "suffix": "" }, { "first": "", "middle": [], "last": "Birch", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2109.00486" ] }, "num": null, "urls": [], "raw_text": "Barry Haddow, Rachel Bawden, Antonio Valerio Miceli Barone, Jind\u0159ich Helcl, and Alexandra Birch. 2021. Survey of low-resource machine translation. arXiv preprint arXiv:2109.00486.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "On the relation between structural diversity and geographical distance among languages: observations and computer simulations", "authors": [ { "first": "W", "middle": [], "last": "Eric", "suffix": "" }, { "first": "Christian", "middle": [], "last": "Holman", "suffix": "" }, { "first": "Dietrich", "middle": [], "last": "Schulze", "suffix": "" }, { "first": "S\u00f8ren", "middle": [], "last": "Stauffer", "suffix": "" }, { "first": "", "middle": [], "last": "Wichmann", "suffix": "" } ], "year": 2007, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Eric W Holman, Christian Schulze, Dietrich Stauffer, and S\u00f8ren Wichmann. 2007. On the relation between structural diversity and geographical distance among languages: observations and computer simulations.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Marian: Fast neural machine translation in c++", "authors": [ { "first": "Marcin", "middle": [], "last": "Junczys-Dowmunt", "suffix": "" }, { "first": "Roman", "middle": [], "last": "Grundkiewicz", "suffix": "" }, { "first": "Tomasz", "middle": [], "last": "Dwojak", "suffix": "" }, { "first": "Hieu", "middle": [], "last": "Hoang", "suffix": "" }, { "first": "Kenneth", "middle": [], "last": "Heafield", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Neckermann", "suffix": "" }, { "first": "Frank", "middle": [], "last": "Seide", "suffix": "" }, { "first": "Ulrich", "middle": [], "last": "Germann", "suffix": "" }, { "first": "Alham", "middle": [], "last": "Fikri Aji", "suffix": "" }, { "first": "Nikolay", "middle": [], "last": "Bogoychev", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1804.00344" ] }, "num": null, "urls": [], "raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Al- ham Fikri Aji, Nikolay Bogoychev, et al. 2018. Mar- ian: Fast neural machine translation in c++. arXiv preprint arXiv:1804.00344.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Six challenges for neural machine translation", "authors": [ { "first": "Philipp", "middle": [], "last": "Koehn", "suffix": "" }, { "first": "Rebecca", "middle": [], "last": "Knowles", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1706.03872" ] }, "num": null, "urls": [], "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for neural machine translation. arXiv preprint arXiv:1706.03872.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Costeffective training in low-resource neural machine translation", "authors": [ { "first": "Sai", "middle": [], "last": "Koneru", "suffix": "" }, { "first": "Danni", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2022, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2201.05700" ] }, "num": null, "urls": [], "raw_text": "Sai Koneru, Danni Liu, and Jan Niehues. 2022. Cost- effective training in low-resource neural machine translation. arXiv preprint arXiv:2201.05700.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Rtt measurement and its dependence on the real geographical distance", "authors": [ { "first": "Ondrej", "middle": [], "last": "Krajsa", "suffix": "" }, { "first": "Lucie", "middle": [], "last": "Fojtova", "suffix": "" } ], "year": 2011, "venue": "2011 34th International Conference on Telecommunications and Signal Processing (TSP)", "volume": "", "issue": "", "pages": "231--234", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ondrej Krajsa and Lucie Fojtova. 2011. Rtt measure- ment and its dependence on the real geographical distance. In 2011 34th International Conference on Telecommunications and Signal Processing (TSP), pages 231-234. IEEE.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Low resource neural machine translation: A benchmark for five african languages", "authors": [ { "first": "M", "middle": [], "last": "Surafel", "suffix": "" }, { "first": "Matteo", "middle": [], "last": "Lakew", "suffix": "" }, { "first": "Marco", "middle": [], "last": "Negri", "suffix": "" }, { "first": "", "middle": [], "last": "Turchi", "suffix": "" } ], "year": 2020, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2003.14402" ] }, "num": null, "urls": [], "raw_text": "Surafel M Lakew, Matteo Negri, and Marco Turchi. 2020. Low resource neural machine translation: A benchmark for five african languages. arXiv preprint arXiv:2003.14402.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "The distance between two widely separated points on the surface of the earth", "authors": [ { "first": "D", "middle": [], "last": "Walter", "suffix": "" }, { "first": "", "middle": [], "last": "Lambert", "suffix": "" } ], "year": 1942, "venue": "Journal of the Washington Academy of Sciences", "volume": "32", "issue": "5", "pages": "125--130", "other_ids": {}, "num": null, "urls": [], "raw_text": "Walter D Lambert. 1942. The distance between two widely separated points on the surface of the earth. Journal of the Washington Academy of Sciences, 32(5):125-130.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Choosing transfer languages for cross-lingual learning", "authors": [ { "first": "Yu-Hsiang", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Chian-Yu", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Jean", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Zirui", "middle": [], "last": "Li", "suffix": "" }, { "first": "Yuyan", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Mengzhou", "middle": [], "last": "Xia", "suffix": "" }, { "first": "Shruti", "middle": [], "last": "Rijhwani", "suffix": "" }, { "first": "Junxian", "middle": [], "last": "He", "suffix": "" }, { "first": "Zhisong", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Xuezhe", "middle": [], "last": "Ma", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1905.12688" ] }, "num": null, "urls": [], "raw_text": "Yu-Hsiang Lin, Chian-Yu Chen, Jean Lee, Zirui Li, Yuyan Zhang, Mengzhou Xia, Shruti Rijhwani, Junx- ian He, Zhisong Zhang, Xuezhe Ma, et al. 2019. Choosing transfer languages for cross-lingual learn- ing. arXiv preprint arXiv:1905.12688.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "A focus on neural machine translation for african languages", "authors": [ { "first": "Laura", "middle": [], "last": "Martinus", "suffix": "" }, { "first": "Jade", "middle": [ "Z" ], "last": "Abbott", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1906.05685" ] }, "num": null, "urls": [], "raw_text": "Laura Martinus and Jade Z Abbott. 2019. A focus on neural machine translation for african languages. arXiv preprint arXiv:1906.05685.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Canonical and surface morphological segmentation for nguni languages", "authors": [ { "first": "Tumi", "middle": [], "last": "Moeng", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2104.00767" ] }, "num": null, "urls": [], "raw_text": "Tumi Moeng, Sheldon Reay, Aaron Daniels, and Jan Buys. 2021. Canonical and surface morphological segmentation for nguni languages. arXiv preprint arXiv:2104.00767.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Rapid adaptation of neural machine translation to new languages", "authors": [ { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" }, { "first": "Junjie", "middle": [], "last": "Hu", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1808.04189" ] }, "num": null, "urls": [], "raw_text": "Graham Neubig and Junjie Hu. 2018. Rapid adapta- tion of neural machine translation to new languages. arXiv preprint arXiv:1808.04189.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Transfer learning across low-resource, related languages for neural machine translation", "authors": [ { "first": "Q", "middle": [], "last": "Toan", "suffix": "" }, { "first": "David", "middle": [], "last": "Nguyen", "suffix": "" }, { "first": "", "middle": [], "last": "Chiang", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1708.09803" ] }, "num": null, "urls": [], "raw_text": "Toan Q Nguyen and David Chiang. 2017. Trans- fer learning across low-resource, related languages for neural machine translation. arXiv preprint arXiv:1708.09803.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Swahili and Sabaki: A linguistic history", "authors": [ { "first": "Derek", "middle": [], "last": "Nurse", "suffix": "" }, { "first": "J", "middle": [], "last": "Thomas", "suffix": "" }, { "first": "G\u00e9rard", "middle": [], "last": "Hinnebusch", "suffix": "" }, { "first": "", "middle": [], "last": "Philipson", "suffix": "" } ], "year": 1993, "venue": "", "volume": "121", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Derek Nurse, Thomas J Hinnebusch, and G\u00e9rard Philip- son. 1993. Swahili and Sabaki: A linguistic history, volume 121. Univ of California Press.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Lowresource neural machine translation for southern african languages", "authors": [ { "first": "Evander", "middle": [], "last": "Nyoni", "suffix": "" }, { "first": "A", "middle": [], "last": "Bruce", "suffix": "" }, { "first": "", "middle": [], "last": "Bassett", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2104.00366" ] }, "num": null, "urls": [], "raw_text": "Evander Nyoni and Bruce A Bassett. 2021. Low- resource neural machine translation for southern african languages. arXiv preprint arXiv:2104.00366.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Bleu: a method for automatic evaluation of machine translation", "authors": [ { "first": "Kishore", "middle": [], "last": "Papineni", "suffix": "" }, { "first": "Salim", "middle": [], "last": "Roukos", "suffix": "" }, { "first": "Todd", "middle": [], "last": "Ward", "suffix": "" }, { "first": "Wei-Jing", "middle": [], "last": "Zhu", "suffix": "" } ], "year": 2002, "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "311--318", "other_ids": {}, "num": null, "urls": [], "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computa- tional Linguistics, pages 311-318.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Decoding strategies for improving low-resource machine translation", "authors": [ { "first": "Chanjun", "middle": [], "last": "Park", "suffix": "" }, { "first": "Yeongwook", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Kinam", "middle": [], "last": "Park", "suffix": "" }, { "first": "Heuiseok", "middle": [], "last": "Lim", "suffix": "" } ], "year": 2020, "venue": "Electronics", "volume": "9", "issue": "10", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Chanjun Park, Yeongwook Yang, Kinam Park, and Heuiseok Lim. 2020. Decoding strategies for improv- ing low-resource machine translation. Electronics, 9(10):1562.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Population subdivision in marine environments: the contributions of biogeography, geographical distance and discontinuous habitat to genetic differentiation in a blennioid fish, axoclinus nigricaudus", "authors": [ { "first": "C", "middle": [], "last": "Riginos", "suffix": "" }, { "first": "", "middle": [], "last": "Nachman", "suffix": "" } ], "year": 2001, "venue": "Molecular ecology", "volume": "10", "issue": "6", "pages": "1439--1453", "other_ids": {}, "num": null, "urls": [], "raw_text": "C Riginos and MW Nachman. 2001. Population subdi- vision in marine environments: the contributions of biogeography, geographical distance and discontin- uous habitat to genetic differentiation in a blennioid fish, axoclinus nigricaudus. Molecular ecology, 10(6):1439-1453.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Improving neural machine translation models with monolingual data", "authors": [ { "first": "Rico", "middle": [], "last": "Sennrich", "suffix": "" }, { "first": "Barry", "middle": [], "last": "Haddow", "suffix": "" }, { "first": "Alexandra", "middle": [], "last": "Birch", "suffix": "" } ], "year": 2015, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1511.06709" ] }, "num": null, "urls": [], "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Improving neural machine translation models with monolingual data. arXiv preprint arXiv:1511.06709.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Revisiting lowresource neural machine translation: A case study", "authors": [ { "first": "Rico", "middle": [], "last": "Sennrich", "suffix": "" }, { "first": "Biao", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1905.11901" ] }, "num": null, "urls": [], "raw_text": "Rico Sennrich and Biao Zhang. 2019. Revisiting low- resource neural machine translation: A case study. arXiv preprint arXiv:1905.11901.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Direct and inverse solutions of geodesics on the ellipsoid with application of nested equations", "authors": [ { "first": "Thaddeus", "middle": [], "last": "Vincenty", "suffix": "" } ], "year": 1975, "venue": "Survey review", "volume": "23", "issue": "176", "pages": "88--93", "other_ids": {}, "num": null, "urls": [], "raw_text": "Thaddeus Vincenty. 1975. Direct and inverse solu- tions of geodesics on the ellipsoid with application of nested equations. Survey review, 23(176):88-93.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Transfer learning for lowresource neural machine translation", "authors": [ { "first": "Barret", "middle": [], "last": "Zoph", "suffix": "" }, { "first": "Deniz", "middle": [], "last": "Yuret", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "May", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Knight", "suffix": "" } ], "year": 2016, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1604.02201" ] }, "num": null, "urls": [], "raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low- resource neural machine translation. arXiv preprint arXiv:1604.02201.", "links": null } }, "ref_entries": { "FIGREF0": { "type_str": "figure", "uris": null, "text": "Relationship between BLEU scores and distance (KM) of places where languages are spoken from the place where isiZulu is spoken.", "num": null }, "FIGREF1": { "type_str": "figure", "uris": null, "text": "NGDC with PenaltyFigure 4: NGDC without Penalty evaluation metric. This may provide a limited view of the true translation performance as it is based on n-gram similarity and does not necessarily measure", "num": null }, "TABREF2": { "html": null, "num": null, "text": "NGDC with and without Penalty.", "type_str": "table", "content": "" } } } }