{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T13:11:56.430296Z" }, "title": "Fine-tuning pre-trained models for Automatic Speech Recognition: experiments on a fieldwork corpus of Japhug (Trans-Himalayan family)", "authors": [ { "first": "S\u00e9verine", "middle": [], "last": "Guillaume", "suffix": "", "affiliation": { "laboratory": "LACITO", "institution": "Universit\u00e9 Sorbonne Nouvelle -INALCO", "location": { "country": "France (" } }, "email": "severine.guillaume@cnrs.fr" }, { "first": "Guillaume", "middle": [], "last": "Wisniewski", "suffix": "", "affiliation": { "laboratory": "Laboratoire de Linguistique Formelle (LLF)", "institution": "CNRS", "location": { "settlement": "Paris", "country": "France (" } }, "email": "guillaume.wisniewski@u-paris.fr" }, { "first": "C\u00e9cile", "middle": [], "last": "Macaire", "suffix": "", "affiliation": { "laboratory": "LACITO", "institution": "Universit\u00e9 Sorbonne Nouvelle -INALCO", "location": { "country": "France (" } }, "email": "cecile.macaire@univ-grenoble-alpes.fr" }, { "first": "Guillaume", "middle": [], "last": "Jacques", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "", "affiliation": { "laboratory": "LACITO", "institution": "Universit\u00e9 Sorbonne Nouvelle -INALCO", "location": { "country": "France (" } }, "email": "alexis.michaud@cnrs.fr" }, { "first": "Benjamin", "middle": [], "last": "Galliot", "suffix": "", "affiliation": { "laboratory": "LACITO", "institution": "Universit\u00e9 Sorbonne Nouvelle -INALCO", "location": { "country": "France (" } }, "email": "" }, { "first": "Maximin", "middle": [], "last": "Coavoux", "suffix": "", "affiliation": { "laboratory": "", "institution": "Universit\u00e9 Grenoble Alpes -Grenoble INP -INRIA", "location": { "addrLine": "(4) CRLAO" } }, "email": "maximin.coavoux@univ-grenoble-alpes.fr" }, { "first": "Solange", "middle": [], "last": "Rossato", "suffix": "", "affiliation": { "laboratory": "", "institution": "Universit\u00e9 Grenoble Alpes -Grenoble INP -INRIA", "location": { "addrLine": "(4) CRLAO" } }, "email": "solange.rossato@univ-grenoble-alpes.fr" }, { "first": "Minh-Ch\u00e2u", "middle": [], "last": "Nguy\u00ean", "suffix": "", "affiliation": { "laboratory": "", "institution": "Universit\u00e9 Grenoble Alpes -Grenoble INP -INRIA", "location": { "addrLine": "(4) CRLAO" } }, "email": "minhchau.ntm@gmail.com" }, { "first": "Maxime", "middle": [], "last": "Fily", "suffix": "", "affiliation": { "laboratory": "LACITO", "institution": "Universit\u00e9 Sorbonne Nouvelle -INALCO", "location": { "country": "France (" } }, "email": "maxime.fily@gmail.com" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "This is a report on results obtained in the development of speech recognition tools intended to support linguistic documentation efforts. The test case is an extensive fieldwork corpus of Japhug, an endangered language of the Trans-Himalayan (Sino-Tibetan) family. The goal is to reduce the transcription workload of field linguists. The method used is a deep learning approach based on the language-specific tuning of a generic pre-trained representation model, XLS-R, using a Transformer architecture. We note difficulties in implementation, in terms of learning stability. But this approach brings significant improvements nonetheless. The quality of phonemic transcription is improved over earlier experiments; and most significantly, the new approach allows for reaching the stage of automatic word recognition. Subjective evaluation of the tool by the author of the training data confirms the usefulness of this approach.", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [ { "text": "This is a report on results obtained in the development of speech recognition tools intended to support linguistic documentation efforts. The test case is an extensive fieldwork corpus of Japhug, an endangered language of the Trans-Himalayan (Sino-Tibetan) family. The goal is to reduce the transcription workload of field linguists. The method used is a deep learning approach based on the language-specific tuning of a generic pre-trained representation model, XLS-R, using a Transformer architecture. We note difficulties in implementation, in terms of learning stability. But this approach brings significant improvements nonetheless. The quality of phonemic transcription is improved over earlier experiments; and most significantly, the new approach allows for reaching the stage of automatic word recognition. Subjective evaluation of the tool by the author of the training data confirms the usefulness of this approach.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "The use of Transformer-type neural architectures to learn multilingual models of text and speech, coupled with methods for fine-tuning these generic representations, has opened up the possibility of developing tools for the many languages for which there is only a small amount of annotated data available. This approach has special appeal for linguistic documentation tasks: the development of semi-automatic or even automatic transcription and annotation methods based on a small amount of annotated data would reduce the annotation effort of field linguists and language workers, who could then focus their attention on linguistically and relationally meaningful tasks during fieldwork (Thieberger, 2017; Partanen et al., 2020; Prud'hommeaux et al., 2021) . In this multidisciplinary endeavour, it is clear that \"linguists and Natural Language Processing (NLP) scientists may want to adjust their expectations and workflows so that both can achieve optimal results with endangered data\" (Moeller, 2021) .", "cite_spans": [ { "start": 689, "end": 707, "text": "(Thieberger, 2017;", "ref_id": null }, { "start": 708, "end": 730, "text": "Partanen et al., 2020;", "ref_id": "BIBREF23" }, { "start": 731, "end": 758, "text": "Prud'hommeaux et al., 2021)", "ref_id": "BIBREF25" }, { "start": 990, "end": 1005, "text": "(Moeller, 2021)", "ref_id": "BIBREF15" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The present work reports on our experiments using a pre-trained model of speech, XLS-R (Conneau et al., 2020), to develop a phonemic recognition system for a minority language of China: Japhug (Ethnologue language code: jya, Glottolog code: japh1234; see Jacques 2019 Jacques , 2021 . The transcription of recordings in a newly documented language is a key task for fieldworkers (linguists and language workers). It is also an interesting topic for the speech processing community, as it raises several challenges, epistemological as well as practical.", "cite_spans": [ { "start": 255, "end": 267, "text": "Jacques 2019", "ref_id": null }, { "start": 268, "end": 282, "text": "Jacques , 2021", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "First of all, the amount of data available for such languages is very small: for instance, of the 197 languages in the Pangloss Collection (Michailovsky et al., 2014) , which hosts audio recordings in various languages of the world (most of them endangered), only 44 corpora contain more than one hour of recordings. There is therefore a need for speech recognition methods that require as little training data as possible. In this respect, Japhug can be considered as an outlier, since there is a 32-hour transcribed corpus, freely available in the Pangloss Collection 1 as well as from Zenodo (Galliot et al., 2021) 2 and as a Huggingface dataset. 3 The size of this corpus is one of the reasons for choosing Japhug as the test case for the present investigations: we wanted to be able to evaluate the amount of data that is necessary to obtain an automatic transcription of good quality -an important criterion here being the linguist's evaluation of the usefulness of the automatically generated transcript, as will be discussed again below.", "cite_spans": [ { "start": 139, "end": 166, "text": "(Michailovsky et al., 2014)", "ref_id": "BIBREF13" }, { "start": 595, "end": 617, "text": "(Galliot et al., 2021)", "ref_id": "BIBREF5" }, { "start": 650, "end": 651, "text": "3", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Research in the field of resource-constrained Automatic Speech Recognition (ASR) has brought out \"the importance of considering language-specific and corpus-specific factors and experimenting with multiple approaches when developing ASR systems for languages with limited training resources\" (Morris et al., 2021, 4354) . To mention two such factors:", "cite_spans": [ { "start": 292, "end": 319, "text": "(Morris et al., 2021, 4354)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Endangered/little-described languages have structural features of their own, which may be widely different from those of the languages routinely taken into account in the work of the speech processing community. (It has even been argued that highly elaborate linguistic structures and typological oddities are more likely to be found in minority languages, for sociolinguistic reasons: Haudricourt 2017 [original publication: 1961]; Trudgill 2011.) For example, Japhug has a degree of morphosyntactic complexity that is particularly impressive, especially in view of its areal context (Jacques, 2021, passim) .", "cite_spans": [ { "start": 587, "end": 610, "text": "(Jacques, 2021, passim)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Speakers of minority languages frequently use words (or multi-word expressions, or even entire sentences) from other languages -typically the majority language of the country, or of the area (Moore, 2018; Aikhenvald, 2020) . The presence of various loanwords, as well as cases of code-switching in the recordings, are a challenge for the automatic transcription of linguistic fieldwork data.", "cite_spans": [ { "start": 193, "end": 206, "text": "(Moore, 2018;", "ref_id": "BIBREF16" }, { "start": 207, "end": 224, "text": "Aikhenvald, 2020)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Conversely, there is one aspect in which automatic transcription tends to be easier for fieldwork data than for widely studied languages: namely, 1 https://pangloss.cnrs.fr/corpus/ Japhug 2 https://doi.org/10.5281/zenodo. 5521111", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "3 https://huggingface.co/datasets/ BenjaminGalliot/pangloss their high degree of orthographic transparency. Most endangered languages are languages transmitted through oral tradition, without a widely used writing system, and the transcriptions are usually made by linguists and language workers either in the International Phonetic Alphabet or in an orthography that is very close to the pronunciation. Thanks to this last characteristic one may realistically hope to achieve good quality transcriptions, as the system does not have to learn a complex spelling -unlike in the case of orthographies which have less straightforward correspondences between graphemes and phonemes (e.g. Uralic languages in Cyrillic orthography have a high degree of grapho-phonematic complexity, raising some technical difficulties: Gerstenberger et al., 2016) .", "cite_spans": [ { "start": 814, "end": 841, "text": "Gerstenberger et al., 2016)", "ref_id": "BIBREF6" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The sections below are organized as follows: we start out, in section 2, by briefly describing the model we have used. Then we move on to presenting, in section 3, the results of a first set of experiments on phonemic transcription, which show that XLS-R does indeed allow us to produce very good quality transcriptions from a small corpus of annotated data, and that these transcriptions meet a need from the linguists conducting language documentation and conservation work. However, a second set of experiments described in section 4 shows that this result is difficult to reproduce, which leads us to qualify our initial optimistic conclusion concerning the technological dimension of the work. 4", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "2 Fine-tuning pre-trained models Principle The approach implemented in this work is based on the fine-tuning of a multilingual signal representation model, a method introduced in the field of speech recognition by Conneau et al. (2020) to build speech recognition models from little data. This approach is today at the core of many NLP models and is considered by many to be the most promising way to develop NLP and speech systems beyond the thirty or so languages (representing only 0.5 % of the world's linguistic diversity) for which there are large amounts of annotated data (Pires et al., 2019; Muller et al., 2021) .", "cite_spans": [ { "start": 214, "end": 235, "text": "Conneau et al. (2020)", "ref_id": null }, { "start": 580, "end": 600, "text": "(Pires et al., 2019;", "ref_id": "BIBREF24" }, { "start": 601, "end": 621, "text": "Muller et al., 2021)", "ref_id": "BIBREF18" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The proposed approach is composed of two steps. In the first step, XLS-R, 5 a multilingual model trained in an unsupervised way on a corpus of 56,000 hours of recordings in 53 languages, is used to automatically build a language-independent, 'generic' representation of the signal. In a second step, this representation is used as input to a phonemic recognition system, trained on audio data that are time-aligned with a manual transcription provided by the linguist. This second step allows to learn how to match the signal representations with labels: in this case, it is essentially the labels corresponding to the phonemes.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In our experiments, we used the XLS-R multilingual model 6 and the HuggingFace API (Wolf et al., 2020) to use and fine-tune it. We ran the fine-tuning for 60 epochs (i.e. 60 iterations over the training data) to be assured that the fine-tuning had converged, and we kept the last model.", "cite_spans": [ { "start": 83, "end": 102, "text": "(Wolf et al., 2020)", "ref_id": "BIBREF31" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Using the model for phoneme prediction In order to apply the method described in the previous paragraph to the task of phoneme recognition, we simply defined a set of labels corresponding to the set of characters composing the phonemes. More precisely, the set of labels used for fine-tuning is made of the 44 characters that appear in at least one Japhug phoneme. 7 This technical choice is based on the experiments reported by Wisniewski et al. (2020) showing that the prediction of the characters composing the phonemes (instead of the phonemes as units) allows to obtain good predictions, sidestepping the task of explicitly listing the phonemes of the language (for example to specify that /\u0288\u0282\u02b0/ constitutes a single phoneme, noted by a trigraph: \u0288+\u0282+\u02b0). For the sake of simplicity at an initial exploratory stage, we also removed from the manual transcriptions all the punctuation marks and the other miscellaneous symbols used by linguists in their transcriptions (symbols to note linguistic phenomena of emphasis or focus, for example).", "cite_spans": [ { "start": 429, "end": 453, "text": "Wisniewski et al. (2020)", "ref_id": "BIBREF29" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "To this set of grapho-phonemic labels is added the space, to delimit words, thereby coming a step closer to the development of a true speech recognition system for endangered languages. The addition of a special character marking the word boundaries is a novelty in our work; 8 it aims at allow-ing the system to recognize words directly. This avoids the need for post-processing or for a second system to segment the lattice of phonemes into words, such as the ones developed by Godard et al. (2018) and Okabe et al. (2021) . To arrive at bona fide word recognition (and thus at fullfledged Automatic Speech Recognition), use of a language model is clearly the most efficient way to go, and this method has been successfully applied in the context of some minority/endangered languages (Partanen et al., 2020; Prud'hommeaux et al., 2021) , but it should be remembered that there is huge diversity among the data sets available for endangered/low-resource languages, so that, surprising as it may seem, \"no single ASR architecture outperforms all others\" (Morris et al. 2021, 4354 ; see also Macaire et al. 2022 on two Creole languages). The use case addressed here is one in which the amount of text available is no greater than a few tens of thousands of words, i.e. an insufficient amount to train a language model according to standard workflows.", "cite_spans": [ { "start": 480, "end": 500, "text": "Godard et al. (2018)", "ref_id": "BIBREF7" }, { "start": 505, "end": 524, "text": "Okabe et al. (2021)", "ref_id": "BIBREF22" }, { "start": 787, "end": 810, "text": "(Partanen et al., 2020;", "ref_id": "BIBREF23" }, { "start": 811, "end": 838, "text": "Prud'hommeaux et al., 2021)", "ref_id": "BIBREF25" }, { "start": 1055, "end": 1080, "text": "(Morris et al. 2021, 4354", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In order to facilitate the reproduction of the experiments, the Japhug corpus is made available as a Huggingface dataset 9 which can be used off-theshelf with the tools described here.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation on the Japhug language", "sec_num": "3" }, { "text": "The quality of our system is evaluated using two classical metrics: the character error rate (CER), i.e. the edit distance between the reference and the prediction computed at the character level, 10 and the word error rate (WER), a similar metric computed at the word level. Note that what makes the use of the latter metric possible is that the systems we trained are capable of predicting word boundaries (which was not the case in previous work such as .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental results", "sec_num": "3.1" }, { "text": "Using a ten-hour corpus for fine-tuning XLS-R, the system obtains a CER of 7.4 % and a WER of 18.5 %. Figure 1 shows how the performances of a guage documentation setting: it constitutes common practice in character-level ASR.", "cite_spans": [], "ref_spans": [ { "start": 102, "end": 110, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Experimental results", "sec_num": "3.1" }, { "text": "9 https://huggingface.co/datasets/ BenjaminGalliot/pangloss", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental results", "sec_num": "3.1" }, { "text": "10 Our system is predicting a stream of characters and not of phonemes (as stated in \u00a72, the label set is made of the characters used to write the phonemes) and the edit operations, at the heart of the CER computation, are defined directly on the characters. Computing the phoneme error rate in which each phonemes would be considered as an indivisible unit would weigh errors differently. fine-tuned model evolve for training sets whose size is close to the corpora usually collected in fieldwork on endangered/minority languages. It turns out that the CER is already very low (12.5 %) for a training corpus containing two hours of annotated data.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental results", "sec_num": "3.1" }, { "text": "These two results show that the proposed approach allows to obtain transcriptions of good quality, which reach the threshold at which the framework provided by the computer tool constitutes a useful starting point (preferable to the traditional method: a completely manual input). In particular, the performance is improved by 4 points compared to the results of Wisniewski et al. (2020) , which were also based on a neural method of phonemic transcription, but which learned a signal representation only from the training data, without using a pre-trained model. The word-level error is much higher than the character-level error, but the difference is primarily due to the way in which the two evaluation metrics are defined. There are significantly fewer words than characters, so that an error at the character level (which naturally translates into an error at the word level containing it) will have a stronger impact on the WER than on the CER. A closer analysis of the results shows that our system makes few errors on word boundaries: nearly 90 % of spaces are correctly predicted.", "cite_spans": [ { "start": 363, "end": 387, "text": "Wisniewski et al. (2020)", "ref_id": "BIBREF29" } ], "ref_spans": [], "eq_spans": [], "section": "Experimental results", "sec_num": "3.1" }, { "text": "To evaluate the usefulness of the system described in the previous section, a specialist of the Japhug language (Guillaume Jacques) corrected the automatic transcription of a recording that he had not yet transcribed. This pilot experiment is not systematized like that of Sperber et al. (2017) or other studies of post-editing processes in machine translation (Nitzke, 2021) , and moreover concerns only 236 words, corresponding to a 2-minute recording of the Japhug language. The evaluation could therefore be dismissed as impressionistic and unreliable from the point of view of NLP tool evaluation. But it cannot be overemphasized that there is a \"need for developers to directly engage with stakeholders when designing and deploying technologies for supporting language documentation\" (Prud'hommeaux et al., 2021, 491) . The point of view of end users is clearly significant and relevant to guide multidisciplinary team work of the type reported here.", "cite_spans": [ { "start": 273, "end": 294, "text": "Sperber et al. (2017)", "ref_id": "BIBREF26" }, { "start": 361, "end": 375, "text": "(Nitzke, 2021)", "ref_id": "BIBREF21" }, { "start": 790, "end": 823, "text": "(Prud'hommeaux et al., 2021, 491)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "The evaluation experiment, even though it is conducted in a way that is not standard in NLP evaluation, leads to a clear observation: the number of corrections to be made to obtain a quality transcription is much lower than the CER suggests. The linguist only had to correct 1.9 % of the characters. The figure becomes 4.2 % if punctuation is taken into account: punctuation marks are not predicted by the system -remember that they were removed from the training corpus at the preprocessing stage -and must therefore be added manually by the person taking up the automatic transcription for further processing. The corresponding WER is at 5.9 %. The difference between the estimated CER (computed on data that have been annotated beforehand) and the number of actual corrections is largely explained by the ambiguity inherent in the task of phonemic transcription: the linguist transcribing the data does not work at an exclusively phonetic-phonological level, but makes many decisions based on high-level information (in short: word identification based on context). Table 1 shows a sample of manual corrections made by the linguist to the output of our system.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "The observation of a gap between the metrics and the evaluation by the user is reminiscent of similar findings obtained in the evaluation of machine translation (Wisniewski et al., 2013) . Such observations are of great importance in the perspective of integrating the tools into workflows for linguistic documentation. It would seem that the actual degree of usefulness (the \"real\" quality) of the systems is higher than the evaluation metrics used so far would suggest. At least in the case of Japhug, the effort required to correct automatic transcrip-", "cite_spans": [ { "start": 161, "end": 186, "text": "(Wisniewski et al., 2013)", "ref_id": "BIBREF30" } ], "ref_spans": [], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "x t\u0255e k\u026f\u0255\u026f\u014bg\u026f t\u0255e i\u0255qha @mingchao(u\u2192\u2423 )\u026fra\u014bg n\u026f t\u0255u pj\u0264\u014bu t\u0255end\u0264re i\u0255qha n\u0264ki @yanguo k\u0264ti r\u025f\u0264lkh\u0264\u03b2 \u0263\u026f n\u026fr\u025f\u0264lpu n\u026f k\u026f, i\u0255qha n\u026f, i\u0255qha n\u026f \u026fftsa n\u026fn\u026f r\u025f\u0264lpu lus\u026fnd\u0264m pj\u0264s\u026fso. t\u0255e n\u026f r\u025f\u0264lpu lus\u026fnd\u0264m pj\u0264s\u026fso t\u0255e, t\u0255end\u0264re n\u0264kin\u026f, s\u0264t\u0255ha ra tos\u0264t\u0282o\u0281lo\u0281n\u026f \u0291o \u0255ti t\u0255e, t\u0255end\u0264re i\u0255qha n\u026f, @shandong n\u026ft\u0255u \u026frmi @zhangxiaobing k\u026frmi ci t\u026fts\u0263e \u026fk\u026f\u03b2zu ci pj\u0264tu, t\u0264t\u0255\u026f. t\u0255end\u0264re \u026fr\u0291a\u03b2 n\u026f \u026fskhr\u026f m\u026f\u0272\u0264\u03b2di \u03c7s\u026fsla ma m\u026fto\u03b2zu ri t\u0255end\u0264re \u026fp\u0255i jo\u026co\u0281nd\u0291i \u0272\u0264ph\u0263ond\u0291i pj\u0264ra mat\u0255i s\u0264t\u0255ha ra pj\u0264k\u0264t\u0282o\u0281lo\u0281ci qhe t\u0255e n\u026fra t\u0255etha k\u026fs\u0264\u0263\u0291i ra p\u026fme ma \u0272\u0264s\u026fsond\u0291i qhe t\u0255e n\u026f joph\u0263ond\u0291i.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "y t\u0255e k\u026f\u0255\u026f\u014bg\u026f t\u0255e i\u0255qha, @mingchao \u026fra\u014b n\u026ft\u0255u pj\u0264\u014bu\u2423 t\u0255end\u0264re i\u0255qha, n\u0264ki, @yanguo k\u0264ti r\u025f\u0264lkh\u0264\u03b2 \u0263\u026f, n\u026fr\u025f\u0264lpu n\u026f k\u026f, i\u0255qha n\u026f(.\u2192,) i\u0255qha n\u026f, \u026fftsa n\u026fn\u026f r\u025f\u0264lpu lus\u026fnd\u0264m pj\u0264s\u026fso. t\u0255e n\u026f r\u025f\u0264lpu lus\u026fnd\u0264m pj\u0264s\u026fso t\u0255e, t\u0255end\u0264re, n\u0264kin\u026f, s\u0264t\u0255ha ra tos\u0264t\u0282o\u0281lo\u0281n\u026f \u0291o \u0255ti t\u0255e, t\u0255end\u0264re i\u0255qha n\u026f, @shandong n\u026ft\u0255u, \u026frmi @zhangxiaobing k\u026frmi ci, t\u026fts\u0263e \u026fk\u026f\u03b2zu ci pj\u0264tu, t\u0264t\u0255\u026f. t\u0255end\u0264re \u026fr\u0291a\u03b2 n\u026f \u026fskhr\u026f\u2423 m\u026f\u0272\u0264\u03b2d(er,\u2192i) \u03c7s\u026fsla\u2423 ma m\u026fto\u03b2zu ri, t\u0255end\u0264re \u026fp\u0255i jo\u026co(n\u2192\u0281)nd\u0291i \u0272\u0264ph\u0263ond\u0291i pj\u0264ra mat\u0255i, s\u0264t\u0255ha ra pj\u0264k\u0264t\u0282o\u0281lo\u0281ci qhe t\u0255e n\u026fra t\u0255etha k\u026fs\u0264\u0263\u0291(\u026f\u2192i\u2423 )ra p\u026fme ma \u0272\u0264s\u026fson(\u026f\u2192d\u0291i) qhe t\u0255e n\u026f joph\u0263ond\u0291i. Table 1 : An excerpt from the manual corrections made to automatic transcriptions. System x, corresponding to the setup described in \u00a73, does not predict punctuation, nor does it predict the symbol @ (which indicates Chinese loanwords), whereas system y predicts these two elements.", "cite_spans": [], "ref_spans": [ { "start": 586, "end": 593, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "tions is considered \"very low\" by our expert on Japhug. A linguist's assessment of the amount of effort depends of course on many factors, including the degree of command of the target language. This makes the comparison from one case to another problematic; this is one of the difficulties encountered in interdisciplinary work between computer scientists and linguists. This point will be briefly taken up in the following paragraph.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Quality assessment of transcriptions by the linguist", "sec_num": "3.2" }, { "text": "The results presented in the previous section are, to say the least, highly encouraging. They show that it is possible to achieve very good quality automatic phonemic transcriptions, even for languages for which relatively little annotated data is available (about 2 hours). Not only is the quality of the transcriptions sufficient to serve as a basis for further linguistic documentation work, but approaches based on pre-learning of representations open up the possibility of recognition at the word level, a major advance for the intended use cases (documentation of endangered languages in fieldwork). In practice, a phoneme lattice is not the best basis for further work by a field linguist. For a phoneme transcription to be complete, each individual phoneme would have to be recognizable from the audio signal, which would be contrary to all expectations, given the well-documented variability in the phonetic realization of phonemes (Niebuhr and Kohler, 2011) . This variability, which carries a non-negligible part of the information contained in the signal, is particularly extensive in spontaneous speech, the object of study privileged by field linguists (Bouquiaux and Thomas, 1971; Newman and Ratliff, 2001) . Thus, the basic unit for the constitution of corpora of rare languages is clearly not the phoneme, but the morpheme (and the higherlevel units: word, sentence...).", "cite_spans": [ { "start": 941, "end": 967, "text": "(Niebuhr and Kohler, 2011)", "ref_id": "BIBREF20" }, { "start": 1167, "end": 1195, "text": "(Bouquiaux and Thomas, 1971;", "ref_id": "BIBREF3" }, { "start": 1196, "end": 1221, "text": "Newman and Ratliff, 2001)", "ref_id": "BIBREF19" } ], "ref_spans": [], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "Our initial results led us to consider more complex transcription tasks in which the system must also predict punctuation, as well as Chinese loanwords (cases of code-switching with the national language) found in Japhug documents (where they are transcribed according to the romanization conventions of standard Mandarin). The goal is, as before, to reduce the annotation effort of field linguists. Taking punctuation and loanwords into account essentially involves changing the preprocessing performed on the transcriptions before training.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "The difficulties which we encountered during the development of this new system led us to study in a systematic way the degree of stability of the learning process. Neural network training is a difficult task in that it involves a very large number of parameters and relies on the optimization of a non-convex objective function. In practice, the optimization methods at the heart of deep learning rely on a very large number of hyper-parameters, 11 the choice of which has a direct impact on the performance of the resulting system. Thus, for the task of fine-tuning the XLS-R model (used in the work reported here), it is possible to change the value of more than twenty parameters that include the initial value of the learning step, its scheduling, the optimization method, the size of the batches, as well as various parameters for dropout.", "cite_spans": [ { "start": 447, "end": 449, "text": "11", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "We have represented in Figure 2 the performances (evaluated by the CER) obtained on the validation set during the different trainings we have performed during the development of these systems. Note that the systems were fine-tuned on a three-hour corpus (10% of which, making up 18 minutes, were used as a validation set) in order to keep the training times to a reasonable duration. The experiments we conducted with a larger corpus did not lead to improvements in the results obtained. These learning curves were obtained by varying the various parameters for optimization (training step, values for dropout, choice of the training set), but also by trying various experimental conditions: in particular, by taking into account the punctuation or not.", "cite_spans": [], "ref_spans": [ { "start": 23, "end": 31, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "Among the 91 training curves shown in Figure 2 , the CERs obtained on the validation set vary between 8.8 % and 28.8 % (M = 14.8, S = 2.2). Most of the learned systems perform significantly worse than the system described in our first experiments: only 6 systems have a CER at validation that is below 12.0 %, and none of them reaches the performance of the system described in section 3. Although not all of these error rates are directly comparable, these results show not only that performance on the validation set is highly sensitive to the choice of hyper-parameters (as expected), but more importantly, that the optimal value of these parameters varies across corpora, train-test splits and configurations.", "cite_spans": [], "ref_spans": [ { "start": 38, "end": 46, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "However, as the results in Table 2 show, if we apply the different models obtained to the corrected text of section 3.2, the quality of the transcriptions is such that it requires only a small number of corrections. This result is all the more remarkable since these systems were only learned on 3 hours of annotated data, a reasonable amount of data to expect in scenarios of language documentation. Above all, it appears that the performance of the models on the validation set does not seem to be a value of which can only be found by trial-and-error and training a system completely. Tuning hyper-parameters tends to be highly time-consuming and resource-intensive. reliable indicator of their quality in practice. This makes their selection and more generally their development very difficult.", "cite_spans": [], "ref_spans": [ { "start": 27, "end": 34, "text": "Table 2", "ref_id": null } ], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "x y z CER validation 8.8 % 13.9 % WER 5.9 % 19.5 % 21.6 % CER 4.2 % 9.1 % 6.7 % punctuation 1.9 % 6.8 % 4.5 % Pinyin 0.7 % 2.9 % 4.0 % Table 2 : Detailed evaluation of the various systems for phonemic transcription: x is the system described in section 3, y and z are two of the systems from our second series of experiments (described in \u00a74): y is the systeme with lowest CER on the validation set, and z that with lowest CER on the test set. These last two systems predict punctuation and the @ symbol for loanwords.", "cite_spans": [], "ref_spans": [ { "start": 135, "end": 142, "text": "Table 2", "ref_id": null } ], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "In a more qualitative way, we have reported in Table 1 an extract of the transcription of this text by the system described in section 3 and by a system predicting the punctuation. It appears that, while the first system is able to achieve a perfect transcription except for Chinese words (romanized into Pinyin) and punctuation marks, the second system presents properties that may be quite interesting for innovative workflows for computational documentation of languages. First of all, it places the utterance boundaries (materialized by the dot) without errors. The division into sentences constitutes a fundamental dimension of the structure of linguistic documents, and an important dimension of the work curating transcriptions for electronic publication in language archives. Moreover, the model recognizes Chinese borrowings remarkably well, paving the way for their automatic identification. Such additional treatments down the line are key to a workflow that makes the most of a range of NLP tools. The ultimate aim is to arrive at Interlinear Glossed Texts (IGT), with annotation down to the level of the morpheme; in turn, IGT corpora have considerable usefulness in research, including possibilities for automatically inferring linguistic patterns from the glossed corpora (Zamaraeva et al., 2019) .", "cite_spans": [ { "start": 1287, "end": 1311, "text": "(Zamaraeva et al., 2019)", "ref_id": "BIBREF32" } ], "ref_spans": [ { "start": 47, "end": 54, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Taking a critical look at the process of training statistical models", "sec_num": "4" }, { "text": "In this work, we have described how the fine-tuning of a multilingual model could be used to learn an Figure 2 : CER over the validation set in the course of various optimizations. The curve in red corresponds to the median value for CER at each stage. automatic phonemic transcription system for an endangered language, and thus reduce the annotation effort of field linguists. Despite the large variability of the scores obtained on a validation set, we succeeded in developing systems whose predictions required only a small number of manual corrections by the linguist: a number that is much smaller than that estimated by the Character Error Rate (CER). This work shows the interest of this type of approach, and opens many perspectives. In particular, the approach seems to us to call for an extension of the experiments to other endangered languages (e.g. from other corpora hosted in archives of endangered languages, about which see Berez-Kroeker and Henke 2018), in order to evaluate more widely its usefulness for language documentation. We also wish, in our future work, to improve the quality of predictions at the word level, for example by integrating a language model.", "cite_spans": [], "ref_spans": [ { "start": 102, "end": 110, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Conclusion", "sec_num": "5" }, { "text": "The models and all the scripts used in our experiments are freely available https://github.com/ CNRS-LACITO/xlsr_for_pangloss.5 Note that many other pre-trained models are available, such as hubert-large-ls960-ft and wav2vec2-", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "large-100k-voxpopuli. 6 This model is named wav2vec2-large-xlsr-53 in Hugging Face API.7 This list is constructed simply by enumerating all the characters in the transcriptions and is not based on a phoneme inventory or a grapheme-to-phoneme mapping.8 Note that the use of a special character directly predicted by our model is only novel in the context of a low-resource/lan-", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Hyper-parameters are special parameters the optimal", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "We wish to express our deepest gratitude to the main Japhug language consultant, Tshendzin.Financial support was given by Agence Nationale de la Recherche as part of grants ANR-10-LABX-0083 (Laboratoire d'excellence \"Empir-ical Foundations of Linguistics\", 2011-2024) and ANR-19-CE38-0015 (\"Computational Language Documentation by 2025\", 2019-2024). Financial support was also contributed by the Institute for Language Diversity and Heritage (ILARA-EPHE).An important part of the linguistic resources used in the present work was collected in the course of the project \"Himalayan Corpora: Parallel corpora in languages of the Greater Himalayan area\" (ANR-12-CORP-0006).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgments", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Evaluating phonemic transcription of low-resource tonal languages for language documentation", "authors": [ { "first": "Oliver", "middle": [], "last": "Adams", "suffix": "" }, { "first": "Trevor", "middle": [], "last": "Cohn", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" }, { "first": "Hilaria", "middle": [], "last": "Cruz", "suffix": "" }, { "first": "Steven", "middle": [], "last": "Bird", "suffix": "" }, { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "" } ], "year": 2018, "venue": "LREC 2018 (Language Resources and Evaluation Conference)", "volume": "", "issue": "", "pages": "3356--3365", "other_ids": {}, "num": null, "urls": [], "raw_text": "Oliver Adams, Trevor Cohn, Graham Neubig, Hilaria Cruz, Steven Bird, and Alexis Michaud. 2018. Eval- uating phonemic transcription of low-resource tonal languages for language documentation. In LREC 2018 (Language Resources and Evaluation Confer- ence), pages 3356-3365.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Language contact and endangered languages. The Oxford handbook of language contact", "authors": [ { "first": "Alexandra", "middle": [], "last": "Aikhenvald", "suffix": "" } ], "year": 2020, "venue": "", "volume": "", "issue": "", "pages": "241--260", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alexandra Aikhenvald. 2020. Language contact and endangered languages. The Oxford handbook of lan- guage contact, pages 241-260.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Language archiving", "authors": [ { "first": "Andrea", "middle": [ "L" ], "last": "Berez-Kroeker", "suffix": "" }, { "first": "Ryan", "middle": [ "E" ], "last": "Henke", "suffix": "" } ], "year": 2018, "venue": "The Oxford handbook of endangered languages", "volume": "", "issue": "", "pages": "433--457", "other_ids": {}, "num": null, "urls": [], "raw_text": "Andrea L. Berez-Kroeker and Ryan E. Henke. 2018. Language archiving. In Kenneth L. Rehg and Lyle Campbell, editors, The Oxford handbook of endan- gered languages, pages 433-457. Oxford University Press, Oxford.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Enqu\u00eate et description des langues \u00e0 tradition orale. Volume I : l'enqu\u00eate de terrain et l'analyse grammaticale, 1976 (2e) edition. Soci\u00e9t\u00e9 d'\u00e9tudes linguistiques et anthropologiques de France", "authors": [ { "first": "Luc", "middle": [], "last": "Bouquiaux", "suffix": "" }, { "first": "Jacqueline", "middle": [], "last": "Thomas", "suffix": "" } ], "year": 1971, "venue": "", "volume": "3", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Luc Bouquiaux and Jacqueline Thomas. 1971. Enqu\u00eate et description des langues \u00e0 tradition orale. Volume I : l'enqu\u00eate de terrain et l'analyse grammaticale, 1976 (2e) edition. Soci\u00e9t\u00e9 d'\u00e9tudes linguistiques et anthropologiques de France, Paris. 3 volumes.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Abdelrahman Mohamed, and Michael Auli. 2020. Unsupervised cross-lingual representation learning for speech recognition. CoRR, abs", "authors": [ { "first": "Alexis", "middle": [], "last": "Conneau", "suffix": "" }, { "first": "Alexei", "middle": [], "last": "Baevski", "suffix": "" }, { "first": "Ronan", "middle": [], "last": "Collobert", "suffix": "" } ], "year": 2006, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alexis Conneau, Alexei Baevski, Ronan Collobert, Ab- delrahman Mohamed, and Michael Auli. 2020. Un- supervised cross-lingual representation learning for speech recognition. CoRR, abs/2006.13979.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Deux corpus audio transcrits de langues rares (japhug et na) normalis\u00e9s en vue d'exp\u00e9riences en traitement du signal", "authors": [ { "first": "Benjamin", "middle": [], "last": "Galliot", "suffix": "" }, { "first": "Guillaume", "middle": [], "last": "Wisniewski", "suffix": "" }, { "first": "S\u00e9verine", "middle": [], "last": "Guillaume", "suffix": "" }, { "first": "Laurent", "middle": [], "last": "Besacier", "suffix": "" }, { "first": "Guillaume", "middle": [], "last": "Jacques", "suffix": "" }, { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "" }, { "first": "Solange", "middle": [], "last": "Rossato", "suffix": "" }, { "first": "Minh-Ch\u00e2u", "middle": [], "last": "Nguy\u00ean", "suffix": "" }, { "first": "Maxime", "middle": [], "last": "Fily", "suffix": "" } ], "year": 2021, "venue": "Journ\u00e9es scientifiques du Groupement de recherche", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Benjamin Galliot, Guillaume Wisniewski, S\u00e9verine Guillaume, Laurent Besacier, Guillaume Jacques, Alexis Michaud, Solange Rossato, Minh-Ch\u00e2u Nguy\u00ean, and Maxime Fily. 2021. Deux corpus audio transcrits de langues rares (japhug et na) normalis\u00e9s en vue d'exp\u00e9riences en traitement du signal. In Journ\u00e9es scientifiques du Groupement de recherche \"Linguistique informatique, formelle et de terrain\" (GDR LIFT), Grenoble.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Utilizing language technology in the documentation of endangered Uralic languages", "authors": [ { "first": "Ciprian", "middle": [], "last": "Gerstenberger", "suffix": "" }, { "first": "Niko", "middle": [], "last": "Partanen", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Rie\u00dfler", "suffix": "" }, { "first": "Joshua", "middle": [], "last": "Wilbur", "suffix": "" } ], "year": 2016, "venue": "Northern European Journal of Language Technology", "volume": "4", "issue": "", "pages": "29--47", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ciprian Gerstenberger, Niko Partanen, Michael Rie\u00dfler, and Joshua Wilbur. 2016. Utilizing language tech- nology in the documentation of endangered Uralic languages. Northern European Journal of Language Technology, 4:29-47.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Unsupervised word segmentation from speech with attention", "authors": [ { "first": "Pierre", "middle": [], "last": "Godard", "suffix": "" }, { "first": "", "middle": [], "last": "Marcely Zanon", "suffix": "" }, { "first": "Lucas", "middle": [], "last": "Boito", "suffix": "" }, { "first": "Alexandre", "middle": [], "last": "Ondel", "suffix": "" }, { "first": "Fran\u00e7ois", "middle": [], "last": "Berard", "suffix": "" }, { "first": "", "middle": [], "last": "Yvon", "suffix": "" } ], "year": 2018, "venue": "Interspeech 2018", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Pierre Godard, Marcely Zanon Boito, Lucas Ondel, Alexandre Berard, Fran\u00e7ois Yvon, Aline Villavicen- cio, and Laurent Besacier. 2018. Unsupervised word segmentation from speech with attention. In Inter- speech 2018, Hyderabad, India.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Number of phonemes and number of speakers", "authors": [ { "first": "Andr\u00e9-Georges", "middle": [], "last": "Haudricourt", "suffix": "" } ], "year": 1961, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Andr\u00e9-Georges Haudricourt. 2017 [original publication: 1961]. Number of phonemes and number of speakers [translation of: Richesse en phon\u00e8mes et richesse en locuteurs].", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "A grammar of Japhug. Number 1 in Comprehensive Grammar Library", "authors": [ { "first": "Guillaume", "middle": [], "last": "Jacques", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Guillaume Jacques. 2021. A grammar of Japhug. Num- ber 1 in Comprehensive Grammar Library. Language Science Press, Berlin.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Automatic Speech Recognition and query by example for Creole languages documentation", "authors": [ { "first": "C\u00e9cile", "middle": [], "last": "Macaire", "suffix": "" }, { "first": "Didier", "middle": [], "last": "Schwab", "suffix": "" }, { "first": "Benjamin", "middle": [], "last": "Lecouteux", "suffix": "" }, { "first": "Emmanuel", "middle": [], "last": "Schang", "suffix": "" } ], "year": 2022, "venue": "Findings of the Association for Computational Linguistics: ACL 2022", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "C\u00e9cile Macaire, Didier Schwab, Benjamin Lecouteux, and Emmanuel Schang. 2022. Automatic Speech Recognition and query by example for Creole lan- guages documentation. In Findings of the Asso- ciation for Computational Linguistics: ACL 2022, Dublin, Ireland.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Documenting and researching endangered languages: the Pangloss Collection. Language Documentation and Conservation", "authors": [ { "first": "Boyd", "middle": [], "last": "Michailovsky", "suffix": "" }, { "first": "Martine", "middle": [], "last": "Mazaudon", "suffix": "" }, { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "" }, { "first": "S\u00e9verine", "middle": [], "last": "Guillaume", "suffix": "" } ], "year": 2014, "venue": "", "volume": "8", "issue": "", "pages": "119--135", "other_ids": {}, "num": null, "urls": [], "raw_text": "Boyd Michailovsky, Martine Mazaudon, Alexis Michaud, S\u00e9verine Guillaume, Alexandre Fran\u00e7ois, and Evangelia Adamou. 2014. Documenting and researching endangered languages: the Pangloss Col- lection. Language Documentation and Conservation, 8:119-135.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Integrating automatic transcription into the language documentation workflow: experiments with Na data and the Persephone toolkit", "authors": [ { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "" }, { "first": "Oliver", "middle": [], "last": "Adams", "suffix": "" }, { "first": "Trevor", "middle": [], "last": "Cohn", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" }, { "first": "S\u00e9verine", "middle": [], "last": "Guillaume", "suffix": "" } ], "year": 2018, "venue": "Language Documentation and Conservation", "volume": "12", "issue": "", "pages": "393--429", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alexis Michaud, Oliver Adams, Trevor Cohn, Graham Neubig, and S\u00e9verine Guillaume. 2018. Integrating automatic transcription into the language documen- tation workflow: experiments with Na data and the Persephone toolkit. Language Documentation and Conservation, 12:393-429.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Integrating machine learning into language documentation and description", "authors": [ { "first": "Sarah", "middle": [], "last": "Moeller", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Sarah Moeller. 2021. Integrating machine learning into language documentation and description. Ph.D. thesis, University of Colorado at Boulder.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Re-valuing code-switching: lessons from Kaska narrative performances", "authors": [ { "first": "Patrick", "middle": [], "last": "Moore", "suffix": "" } ], "year": 2018, "venue": "Activating the heart: Storytelling, knowledge sharing, and relationship", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Patrick Moore. 2018. Re-valuing code-switching: lessons from Kaska narrative performances. In Ac- tivating the heart: Storytelling, knowledge sharing, and relationship, Waterloo, Canada. Wilfrid Laurier University Press.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "One size does not fit all in resourceconstrained ASR", "authors": [ { "first": "Ethan", "middle": [], "last": "Morris", "suffix": "" }, { "first": "Robbie", "middle": [], "last": "Jimerson", "suffix": "" }, { "first": "Emily", "middle": [], "last": "Prud", "suffix": "" } ], "year": 2021, "venue": "Interspeech 2021", "volume": "", "issue": "", "pages": "4354--4358", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ethan Morris, Robbie Jimerson, and Emily Prud'hom- meaux. 2021. One size does not fit all in resource- constrained ASR. In Interspeech 2021, pages 4354-4358. ISCA.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "When being unseen from mBERT is just the beginning: Handling new languages with multilingual language models", "authors": [ { "first": "Benjamin", "middle": [], "last": "Muller", "suffix": "" }, { "first": "Antonios", "middle": [], "last": "Anastasopoulos", "suffix": "" }, { "first": "Beno\u00eet", "middle": [], "last": "Sagot", "suffix": "" }, { "first": "Djam\u00e9", "middle": [], "last": "Seddah", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "", "issue": "", "pages": "448--462", "other_ids": {}, "num": null, "urls": [], "raw_text": "Benjamin Muller, Antonios Anastasopoulos, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2021. When being un- seen from mBERT is just the beginning: Handling new languages with multilingual language models. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 448-462, Online. Association for Computa- tional Linguistics.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Linguistic fieldwork", "authors": [ { "first": "Paul", "middle": [], "last": "Newman", "suffix": "" }, { "first": "Martha", "middle": [], "last": "Ratliff", "suffix": "" } ], "year": 2001, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Paul Newman and Martha Ratliff. 2001. Linguistic fieldwork. Cambridge University Press, Cambridge.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Perception of phonetic detail in the identification of highly reduced words", "authors": [ { "first": "Oliver", "middle": [], "last": "Niebuhr", "suffix": "" }, { "first": "Klaus", "middle": [ "J" ], "last": "Kohler", "suffix": "" } ], "year": 2011, "venue": "Journal of Phonetics", "volume": "39", "issue": "3", "pages": "319--329", "other_ids": {}, "num": null, "urls": [], "raw_text": "Oliver Niebuhr and Klaus J. Kohler. 2011. Perception of phonetic detail in the identification of highly reduced words. Journal of Phonetics, 39(3):319-329.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "A short guide to post-editing. Number 16 in Translation and Multilingual Natural Language Processing", "authors": [ { "first": "Silvia", "middle": [], "last": "Nitzke", "suffix": "" }, { "first": "Jeanand", "middle": [], "last": "Hansen-Schirra", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Silvia Nitzke, Jeanand Hansen-Schirra. 2021. A short guide to post-editing. Number 16 in Translation and Multilingual Natural Language Processing. Lan- guage Science Press, Berlin.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Segmentation en mots faiblement supervis\u00e9e pour la documentation automatique des langues", "authors": [ { "first": "Shu", "middle": [], "last": "Okabe", "suffix": "" }, { "first": "Fran\u00e7ois", "middle": [], "last": "Yvon", "suffix": "" }, { "first": "Laurent", "middle": [], "last": "Besacier", "suffix": "" } ], "year": 2021, "venue": "Journ\u00e9es scientifiques du Groupement de recherche \"Linguistique informatique, formelle et de terrain\" (GDR LIFT)", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Shu Okabe, Fran\u00e7ois Yvon, and Laurent Besacier. 2021. Segmentation en mots faiblement supervis\u00e9e pour la documentation automatique des langues. In Journ\u00e9es scientifiques du Groupement de recherche \"Linguis- tique informatique, formelle et de terrain\" (GDR LIFT), Grenoble.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Speech recognition for endangered and extinct Samoyedic languages", "authors": [ { "first": "Niko", "middle": [], "last": "Partanen", "suffix": "" }, { "first": "Mika", "middle": [], "last": "H\u00e4m\u00e4l\u00e4inen", "suffix": "" }, { "first": "Tiina", "middle": [], "last": "Klooster", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 34th Pacific Asia Conference on Language, Information and Computation", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Niko Partanen, Mika H\u00e4m\u00e4l\u00e4inen, and Tiina Klooster. 2020. Speech recognition for endangered and extinct Samoyedic languages. In Proceedings of the 34th Pacific Asia Conference on Language, Information and Computation.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "How multilingual is multilingual BERT?", "authors": [ { "first": "Telmo", "middle": [], "last": "Pires", "suffix": "" }, { "first": "Eva", "middle": [], "last": "Schlinger", "suffix": "" }, { "first": "Dan", "middle": [], "last": "Garrette", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "4996--5001", "other_ids": {}, "num": null, "urls": [], "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4996-5001, Flo- rence, Italy. Association for Computational Linguis- tics.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Automatic speech recognition for supporting endangered language documentation. Language Documentation & Conservation", "authors": [ { "first": "Emily", "middle": [], "last": "Prud'hommeaux", "suffix": "" }, { "first": "Robbie", "middle": [], "last": "Jimerson", "suffix": "" }, { "first": "Richard", "middle": [], "last": "Hatcher", "suffix": "" }, { "first": "Karin", "middle": [], "last": "Michelson", "suffix": "" } ], "year": 2021, "venue": "", "volume": "15", "issue": "", "pages": "491--513", "other_ids": {}, "num": null, "urls": [], "raw_text": "Emily Prud'hommeaux, Robbie Jimerson, Richard Hatcher, and Karin Michelson. 2021. Automatic speech recognition for supporting endangered lan- guage documentation. Language Documentation & Conservation, 15:491-513.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Satoshi Nakamura, and Alex Waibel", "authors": [ { "first": "Matthias", "middle": [], "last": "Sperber", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" } ], "year": 2017, "venue": "Transcribing against time. Speech Communication", "volume": "93", "issue": "", "pages": "20--30", "other_ids": {}, "num": null, "urls": [], "raw_text": "Matthias Sperber, Graham Neubig, Jan Niehues, Satoshi Nakamura, and Alex Waibel. 2017. Transcribing against time. Speech Communication, 93:20-30.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "LD&C possibilities for the next decade. Language Documentation and Conservation", "authors": [], "year": 2017, "venue": "", "volume": "11", "issue": "", "pages": "1--4", "other_ids": {}, "num": null, "urls": [], "raw_text": "Nick Thieberger. 2017. LD&C possibilities for the next decade. Language Documentation and Conservation, 11:1-4.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Sociolinguistic typology: social determinants of linguistic complexity", "authors": [ { "first": "Peter", "middle": [], "last": "Trudgill", "suffix": "" } ], "year": 2011, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Peter Trudgill. 2011. Sociolinguistic typology: social determinants of linguistic complexity. Oxford Uni- versity Press, Oxford.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Phonemic transcription of lowresource languages: To what extent can preprocessing be automated?", "authors": [ { "first": "Guillaume", "middle": [], "last": "Wisniewski", "suffix": "" }, { "first": "S\u00e9verine", "middle": [], "last": "Guillaume", "suffix": "" }, { "first": "Alexis", "middle": [], "last": "Michaud", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 1st Joint SLTU (Spoken Language Technologies for Underresourced languages) and CCURL (Collaboration and Computing for Under-Resourced Languages) Workshop", "volume": "", "issue": "", "pages": "306--315", "other_ids": {}, "num": null, "urls": [], "raw_text": "Guillaume Wisniewski, S\u00e9verine Guillaume, and Alexis Michaud. 2020. Phonemic transcription of low- resource languages: To what extent can preprocess- ing be automated? In Proceedings of the 1st Joint SLTU (Spoken Language Technologies for Under- resourced languages) and CCURL (Collaboration and Computing for Under-Resourced Languages) Workshop, pages 306-315, Marseille, France. Eu- ropean Language Resources Association (ELRA).", "links": null }, "BIBREF30": { "ref_id": "b30", "title": "Design and analysis of a large corpus of post-edited translations: Quality estimation, failure analysis and the variability of post-edition", "authors": [ { "first": "Guillaume", "middle": [], "last": "Wisniewski", "suffix": "" }, { "first": "Anil", "middle": [ "Kumar" ], "last": "Singh", "suffix": "" }, { "first": "Natalia", "middle": [], "last": "Segal", "suffix": "" }, { "first": "Fran\u00e7ois", "middle": [], "last": "Yvon", "suffix": "" } ], "year": 2013, "venue": "Proceedings of Machine Translation Summit XIV: Papers", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Guillaume Wisniewski, Anil Kumar Singh, Natalia Se- gal, and Fran\u00e7ois Yvon. 2013. Design and analysis of a large corpus of post-edited translations: Qual- ity estimation, failure analysis and the variability of post-edition. In Proceedings of Machine Translation Summit XIV: Papers, Nice, France.", "links": null }, "BIBREF31": { "ref_id": "b31", "title": "Transformers: State-of-the-art natural language processing", "authors": [ { "first": "Thomas", "middle": [], "last": "Wolf", "suffix": "" }, { "first": "Lysandre", "middle": [], "last": "Debut", "suffix": "" }, { "first": "Victor", "middle": [], "last": "Sanh", "suffix": "" }, { "first": "Julien", "middle": [], "last": "Chaumond", "suffix": "" }, { "first": "Clement", "middle": [], "last": "Delangue", "suffix": "" }, { "first": "Anthony", "middle": [], "last": "Moi", "suffix": "" }, { "first": "Pierric", "middle": [], "last": "Cistac", "suffix": "" }, { "first": "Tim", "middle": [], "last": "Rault", "suffix": "" }, { "first": "Remi", "middle": [], "last": "Louf", "suffix": "" }, { "first": "Morgan", "middle": [], "last": "Funtowicz", "suffix": "" }, { "first": "Joe", "middle": [], "last": "Davison", "suffix": "" }, { "first": "Sam", "middle": [], "last": "Shleifer", "suffix": "" }, { "first": "Clara", "middle": [], "last": "Patrick Von Platen", "suffix": "" }, { "first": "Yacine", "middle": [], "last": "Ma", "suffix": "" }, { "first": "Julien", "middle": [], "last": "Jernite", "suffix": "" }, { "first": "Canwen", "middle": [], "last": "Plu", "suffix": "" }, { "first": "Teven", "middle": [ "Le" ], "last": "Xu", "suffix": "" }, { "first": "Sylvain", "middle": [], "last": "Scao", "suffix": "" }, { "first": "Mariama", "middle": [], "last": "Gugger", "suffix": "" }, { "first": "", "middle": [], "last": "Drame", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", "volume": "", "issue": "", "pages": "38--45", "other_ids": {}, "num": null, "urls": [], "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.", "links": null }, "BIBREF32": { "ref_id": "b32", "title": "Handling cross-cutting properties in automatic inference of lexical classes: A case study of Chintang", "authors": [ { "first": "Olga", "middle": [], "last": "Zamaraeva", "suffix": "" }, { "first": "Kristen", "middle": [], "last": "Howell", "suffix": "" }, { "first": "Emily", "middle": [ "M" ], "last": "Bender", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 3rd Workshop on the Use of Computational Methods in the Study of Endangered Languages", "volume": "1", "issue": "", "pages": "28--38", "other_ids": {}, "num": null, "urls": [], "raw_text": "Olga Zamaraeva, Kristen Howell, and Emily M. Bender. 2019. Handling cross-cutting properties in automatic inference of lexical classes: A case study of Chintang. In Proceedings of the 3rd Workshop on the Use of Computational Methods in the Study of Endangered Languages Volume 1 (Papers), pages 28-38, Hon- olulu. Association for Computational Linguistics.", "links": null } }, "ref_entries": { "FIGREF0": { "num": null, "text": "Evolution of performances as a function of the size of the training corpus.", "type_str": "figure", "uris": null } } } }