|
{ |
|
"paper_id": "Y15-1016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:41:53.225350Z" |
|
}, |
|
"title": "Unsupervised and Lightly Supervised Part-of-Speech Tagging Using Recurrent Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Othman", |
|
"middle": [], |
|
"last": "Zennaki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LIST, Vision and Content Engineering Laboratory", |
|
"location": { |
|
"settlement": "Gif-sur-Yvette", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "othman.zennaki@cea.fr" |
|
}, |
|
{ |
|
"first": "Nasredine", |
|
"middle": [], |
|
"last": "Semmar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LIST, Vision and Content Engineering Laboratory", |
|
"location": { |
|
"settlement": "Gif-sur-Yvette", |
|
"country": "France" |
|
} |
|
}, |
|
"email": "nasredine.semmar@cea.fr" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "laurent.besacier@imag.fr" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we propose a novel approach to induce automatically a Part-Of-Speech (POS) tagger for resource-poor languages (languages that have no labeled training data). This approach is based on cross-language projection of linguistic annotations from parallel corpora without the use of word alignment information. Our approach does not assume any knowledge about foreign languages, making it applicable to a wide range of resource-poor languages. We use Recurrent Neural Networks (RNNs) as multilingual analysis tool. Our approach combined with a basic crosslingual projection method (using word alignment information) achieves comparable results to the state-of-the-art. We also use our approach in a weakly supervised context, and it shows an excellent potential for very lowresource settings (less than 1k training utterances). 1 We did not use incremental training (as Duong et al. (2013) did).", |
|
"pdf_parse": { |
|
"paper_id": "Y15-1016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we propose a novel approach to induce automatically a Part-Of-Speech (POS) tagger for resource-poor languages (languages that have no labeled training data). This approach is based on cross-language projection of linguistic annotations from parallel corpora without the use of word alignment information. Our approach does not assume any knowledge about foreign languages, making it applicable to a wide range of resource-poor languages. We use Recurrent Neural Networks (RNNs) as multilingual analysis tool. Our approach combined with a basic crosslingual projection method (using word alignment information) achieves comparable results to the state-of-the-art. We also use our approach in a weakly supervised context, and it shows an excellent potential for very lowresource settings (less than 1k training utterances). 1 We did not use incremental training (as Duong et al. (2013) did).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Nowadays, Natural Language Processing (NLP) tools (part-of-speech tagger, sense tagger, syntactic parser, named entity recognizer, semantic role labeler, etc.) with the best performance are those built using supervised learning approaches for resourcerich languages (where manually annotated corpora are available) such as English, French, German, Chinese and Arabic. However, for a large number of resource-poor languages, annotated corpora do not exist. Their manual construction is labor intensive and very expensive, making supervised approaches not feasible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The availability of parallel corpora has recently led to several strands of research work exploring the use of unsupervised approaches based on linguistic annotations projection from the (resourcerich) source language to the (under-resourced) target language. The goal of cross-language projection is, on the one hand, to provide all languages with linguistic annotations, and on the other hand, to automatically induce NLP tools for these languages. Unfortunately, the state-of-the-art in unsupervised methods, is still quite far from supervised learning approaches. For example, Petrov et al. (2012) obtained an average accuracy of 95.2% for 22 resource-rich languages supervised POS taggers, while the state-of-the-art in the unsupervised POS taggers achieved by Das and Petrov (2011) and Duong et al. (2013) with an average accuracy reaches only 83.4% on 8 European languages. Section 2 presents a brief overview of related work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 581, |
|
"end": 601, |
|
"text": "Petrov et al. (2012)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 766, |
|
"end": 787, |
|
"text": "Das and Petrov (2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 811, |
|
"text": "Duong et al. (2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we first adapt a similar method than the one of Duong et al. (2013) 1 , to build an unsupervised POS tagger based on a simple cross-lingual projection (Section 3.1). Next, we explore the possibility of using a recurrent neural network (RNN) to induce multilingual NLP tools, without using word alignment information. To show the potential of our approach, we firstly investigate POS tagging.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 82, |
|
"text": "Duong et al. (2013)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 84, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our approach, a parallel corpus between a resource-rich language (having a POS tagger) and a lower-resourced language is used to extract a common words representation (cross-lingual words representation) based only on sentence level alignment. This representation is used with the source side of the parallel corpus (tagged corpus) to learn a neural network POS tagger for the source language. No word alignment information is needed in our approach. Based on this common representation of source and target words, this neural network POS tagger can also be used to tag target language text (Section 3.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We assume that these two models (baseline crosslingual projection and RNN) are complementary to each other (one relies on word-alignment information while the other does not), and the performance can be further improved by combining them (linear combination presented in Section 3.3). This unsupervised RNN model, obtained without any target language annotated data, can be easily adapted in a weakly supervised manner (if a small amount of annotated target data is available) in order to take into account the target language specificity (Section 4).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To evaluate our approach, we conducted an experiment, which consists of two parts. First, using only parallel corpora, we evaluate our unsupervised approach for 4 languages: French, German, Greek and Spanish. Secondly, the performance of our approach is evaluated for German in a weakly supervised context, using several amounts of target adaptation data (Section 5). Finally, Section 6 concludes our study and presents our future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several studies have used cross-lingual projection to transfer linguistic annotations from a resourcerich language to a resource-poor language in order to train NLP tools for the target language. The projection approach has been successfully used to transfer several linguistic annotations between languages. Examples include POS (Yarowsky et al., 2001; Das and Petrov, 2011; Duong et al., 2013) , named entity (Kim and Lee, 2012) , syntactic constituent (Jiang et al., 2011) , word senses (Bentivogli et al., 2004; Van der Plas and Apidianaki, 2014) , and semantic role labeling (Pad\u00f3 , 2007; Annesi and Basili, 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 353, |
|
"text": "(Yarowsky et al., 2001;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 375, |
|
"text": "Das and Petrov, 2011;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 395, |
|
"text": "Duong et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 430, |
|
"text": "(Kim and Lee, 2012)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 475, |
|
"text": "(Jiang et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 515, |
|
"text": "(Bentivogli et al., 2004;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 550, |
|
"text": "Van der Plas and Apidianaki, 2014)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 593, |
|
"text": "(Pad\u00f3 , 2007;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 618, |
|
"text": "Annesi and Basili, 2010)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In these approaches, the source language is tagged, and tags are projected from the source language to the target language through the use of word alignments in parallel corpora. Then, these partial noisy annotations can be used in conjunction with robust learning algorithms to build unsupervised NLP tools. One limitation of these approaches is due to the poor accuracy of word-alignment algo-rithms, and also to the weak or incomplete inherent match between the two sides of a bilingual corpus (the alignment is not only a one-to-one mapping, it can also be one-to-many, many-to-one, many-tomany or some words can remain unaligned). To deal with these limitations, recent studies have proposed to combine projected labels with partially supervised monolingual information in order to filter out invalid label sequences. For example, Li et al. (2012) , T\u00e4ckstr\u00f6m et al. (2013b) and Wisniewski et al. (2014) have proposed to improve projection performance by using a dictionary of valid tags for each word (coming from Wiktionary 2 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 836, |
|
"end": 852, |
|
"text": "Li et al. (2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 879, |
|
"text": "T\u00e4ckstr\u00f6m et al. (2013b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 884, |
|
"end": 908, |
|
"text": "Wisniewski et al. (2014)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In another vein, various studies based on crosslingual representation learning methods have proposed to avoid using such pre-processed and noisy alignments for label projection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "First, these approaches learn language-independent features, across many different languages (Al-Rfou et al., 2013) . Then, the induced representation space is used to train NLP tools by exploiting labeled data from the source language and apply them in the target language. To induce interlingual features, several resources have been used, including bilingual lexicon (Durrett et al., 2012; Gouws and S\u00f8gaard, 2015a) and parallel corpora (T\u00e4ckstr\u00f6m et al., 2013a; Gouws et al., 2015b) . Cross-lingual representation learning have achieved good results in different NLP applications such as cross-language POS tagging and cross-language super sense (SuS) tagging (Gouws and S\u00f8gaard, 2015a) , cross-language named entity recognition (T\u00e4ckstr\u00f6m et al., 2012) , cross-lingual document classification and lexical translation task (Gouws et al., 2015b) , cross language dependency parsing (Durrett et al., 2012; T\u00e4ckstr\u00f6m et al., 2013a; Xiao and Guo, 2014) and cross language semantic role labeling ( Titov and Klementiev, 2012) . Our approach described in next section, is inspired by these works since we also try to learn a common language-independent feature space. Our common (multilingual) representation is based on the occurrence of source and target words in a parallel corpus. Using this representation, we learn a cross-lingual POS tagger (multilingual POS tagger if a multilingual parallel corpus is used) based on a recurrent neural network (RNN) on the source labeled text and apply it to tag target language text. We also show that the architecture proposed is well suited for lightly supervised training (adaptation).", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 115, |
|
"text": "(Al-Rfou et al., 2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 392, |
|
"text": "(Durrett et al., 2012;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 418, |
|
"text": "Gouws and S\u00f8gaard, 2015a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 465, |
|
"text": "(T\u00e4ckstr\u00f6m et al., 2013a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 486, |
|
"text": "Gouws et al., 2015b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 664, |
|
"end": 690, |
|
"text": "(Gouws and S\u00f8gaard, 2015a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 757, |
|
"text": "(T\u00e4ckstr\u00f6m et al., 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 827, |
|
"end": 848, |
|
"text": "(Gouws et al., 2015b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 885, |
|
"end": 907, |
|
"text": "(Durrett et al., 2012;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 908, |
|
"end": 932, |
|
"text": "T\u00e4ckstr\u00f6m et al., 2013a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 933, |
|
"end": 952, |
|
"text": "Xiao and Guo, 2014)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 995, |
|
"end": 1024, |
|
"text": "( Titov and Klementiev, 2012)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Finally, several works have investigated how to apply neural networks to NLP applications (Bengio et al., 2006; Collobert and Weston, 2008; Collobert et al., 2011; Henderson, 2004; Mikolov et al., 2010; Federici and Pirrelli, 1993) . While Federici and Pirrelli (1993) was one of the earliest attempts to develop a part-of-speech tagger based on a special type of neural network, Bengio et al. (2006) and Mikolov et al. (2010) applied neural networks to build language models. Collobert and Weston (2008) and Collobert et al. (2011) employed a deep learning framework for multi-task learning including part-of-speech tagging, chunking, namedentity recognition, language modelling and semantic role-labeling. Henderson (2004) proposed training methods for learning a statistical parser based on neural network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 111, |
|
"text": "(Bengio et al., 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 139, |
|
"text": "Collobert and Weston, 2008;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 163, |
|
"text": "Collobert et al., 2011;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 180, |
|
"text": "Henderson, 2004;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 202, |
|
"text": "Mikolov et al., 2010;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 231, |
|
"text": "Federici and Pirrelli, 1993)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 400, |
|
"text": "Bengio et al. (2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 426, |
|
"text": "Mikolov et al. (2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 504, |
|
"text": "Collobert and Weston (2008)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 532, |
|
"text": "Collobert et al. (2011)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 708, |
|
"end": 724, |
|
"text": "Henderson (2004)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To avoid projecting label information from deterministic and error-prone word alignments, we propose to represent the bilingual word alignment information intrinsically in a neural network architecture. The idea consists in implementing a neural network as a cross-lingual POS tagger and show that, in combination with a simple cross-lingual projection method, this achieves comparable results to state-ofthe-art unsupervised POS taggers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised Approach Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our approach is the following: we assume that we have a POS tagger in the source language and a parallel corpus. The key idea is to learn a bilingual neural network POS tagger on the pre-annotated source side of the parallel corpus, and to use it for tagging target text. Before describing our bilingual neural network POS tagger, we present the simple crosslingual projection method, considered as our baseline in this work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised Approach Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our simple POS tagger (described by Algorithm 1) is close to the approach introduced in Yarowsky et al. 2001. These authors were the first to use automatic word alignments (from a bilingual parallel corpus) to project annotations from a source language to a target language, to build unsupervised POS taggers. The algorithm is shortly recalled below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised POS Tagger Based on a Simple Cross-lingual Projection", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Algorithm 1 : Simple POS Tagger 1: Tag source side of the parallel corpus. 2: Word align the parallel corpus with Giza++ (Och and Ney, 2000) or other word alignment tools. 3: Project tags directly for 1-to-1 alignments. 4: For many-to-one mappings project the tag of the middle word. 5: The unaligned words (target) are tagged with their most frequent associated tag in the corpus. 6: Learn POS tagger on target side of the bi-text with, for instance, TNT tagger (Brants, 2000) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 140, |
|
"text": "(Och and Ney, 2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 477, |
|
"text": "(Brants, 2000)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised POS Tagger Based on a Simple Cross-lingual Projection", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "There are two major architectures of neural networks: Feedforward (Bengio et al., 2006) and Recurrent Neural Networks (RNN) (Mikolov et al., 2010) . Sundermeyer et al. (2013) showed that language models based on recurrent architecture achieve better performance than language models based on feedforward architecture. This is due to the fact that recurrent neural networks do not use a context of limited size. This property led us to use, in our experiments, a simple recurrent architecture (Elman, 1990) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 87, |
|
"text": "(Bengio et al., 2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 146, |
|
"text": "(Mikolov et al., 2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 174, |
|
"text": "Sundermeyer et al. (2013)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 505, |
|
"text": "(Elman, 1990)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised POS Tagger Based on Recurrent Neural Network", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this section, we describe in detail our method for building an unsupervised POS tagger for a target language based on a recurrent neural network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised POS Tagger Based on Recurrent Neural Network", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The RNN consists of at least three layers: input layer in time t is x(t), hidden layer h(t) (also called context layer), and output layer is denoted as y(t). All neurons of the input layer are connected to every neuron of hidden layer by weight matrix U and W . The weight matrix V connects all neurons of the hidden layer to every neuron of output layer, as it can be seen in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 377, |
|
"end": 385, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "In our RNN POS tagger, the input layer is formed by concatenating vector representing current word w, and the copy of the hidden layer at previous time. We start by associating to each word in both the source and the target vocabularies a common vector representation, namely V wi , i = 1, ..., N , where N is the number of parallel sentences (bi-sentences in the parallel corpus). If w appears in i-th bi-sentence of the parallel corpus then V wi = 1. Therefore, all input neurons corresponding to current word w are set to 0 except those that correspond to bi-sentences containing w, which are set to 1. The idea is that, in general, a source word and its target translation appear together in the same bi-sentences and their vector representations are close.We can then use the RNN POS tagger, initially trained on source side, to tag the target side (because of our common vector representation).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We also use two hidden layers (our preliminary experiments have shown better performance than one hidden layer), with variable sizes (usually 80-1024 neurons) and sigmoid activation function. These hidden layers inherently capture word alignment information. The output layer of our model contains 12 neurons, this number is determined by the POS tagset size. To deal with the potential mismatch in the POS tagsets of source and target languages, we adopted the Petrov et al. (2012) universal tagset (12 tags common for most languages): NOUN (nouns), VERB (verbs), ADJ (adjectives), ADV (adverbs), PRON (pronouns), DET (determiners and articles), ADP (prepositions and postpositions), NUM (numerals), CONJ (conjunctions), PRT (particles), . (punctuation marks) and X (all other categories, e.g., foreign words, abbreviations).", |
|
"cite_spans": [ |
|
{ |
|
"start": 462, |
|
"end": 482, |
|
"text": "Petrov et al. (2012)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Therefore, each output neuron corresponds to one POS tag in the tagset. The softmax activation function is used to normalize the values of output neurons to sum up to 1. Finally, the current word w (in input) is tagged with most probable output tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "The first step in our approach is to train the neural network, given a parallel corpus (training corpus), and a validation corpus (different from train data) in the source language. In typical applications, the source language is a resource-rich language (which already has an efficient POS tagger). Before training the model, the following pre-processing steps are performed :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Source side of training corpus and validation corpus are annotated (using the available supervised POS tagger).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Using a parallel corpus, we build the common vector representations for source and target side words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Then, the neural network is trained through several epochs. Algorithm 2 below describes one training epoch.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Algorithm 2 : Training RNN POS Tagger 1: Initialize weights with Normal distribution. 2: Set time counter t = 0, and initialize state of the neurons in the hidden layer h(t) to 1. 3: Increase time counter t by 1. 4: Push at the input layer w(t) the vector representation of the current (source) word of training corpus. 5: Copy the state of the hidden layer h(t-1) to the input layer. 6: Perform a forward pass to obtain the predicted output y(t). 7: Compute the gradient of the error in the output layer e o (t) = d(t) \u2212 y(t) (difference between the predicted y(t) and the desired output d(t)). 8: Propagate the error back through the network and update weights with stochastic gradient descent using Back-Propagation (BP) and Back-Propagationthrough-time (BPTT) (Rumelhartet al., 1985) . 9: If not all training inputs were processed, go to 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 764, |
|
"end": 787, |
|
"text": "(Rumelhartet al., 1985)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "After each epoch, the neural network is used to tag the validation corpus, then the result is compared with the result of the supervised POS tagger, to calculate the per-token accuracy. If the per-token accuracy increases, training continues in the new epoch. Otherwise, the learning rate is halved at the start of the new epoch. After that, if the per-token accuracy does not increase anymore, training is stopped to prevent over-fitting. Generally convergence takes 5-10 epochs, starting with a learning rate \u03b1 = 0.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "After learning the model, step 2 simply consists in using the trained model as a target language POS tagger (using our common vector representation). It is important to note that if we train on a multilingual parallel corpus with N languages (N > 2), the same trained model will be able to tag all the N languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Hence, our approach assumes that the word order in both source and target languages are similar. In some languages such as English and French, word order for contexts containing nouns could be reversed most of the time. For example, the European Commission would be translated into la Commission europenne. In order to deal with the word order constraints, we combined the RNN model with the cross-lingual projection model, and we also propose Light Supervision (adaptation) of RNN model where a few amount of target data will help to learn the word order (and consequently POS order) in the target language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training the model", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Since the simple cross-lingual projection model M1 and RNN model M2 use different strategies for POS tagging (TNT is based on Markov models while RNN is a neural network), we assume that these two models are complementary. In addition, model M2 does not implement any out-of-vocabulary (OOV) words processing yet. So, to keep the benefits of each approach, we explore how to combine them with linear interpolation. Formally, the probability to tag a given word w is computed as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P M 12 (t|w) = (\u00b5P M 1 (t|w, C M 1 )+(1\u2212\u00b5)P M 2 (t|w, C M 2 ))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where, C M 1 and C M 2 are, respectively the context of w considered by M1 and M2. The relative importance of each model is adjusted through the interpolation parameter \u00b5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The word w is tagged with the most probable tag, using the function f described as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f (w) = arg max t (P M 12 (t|w))", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "4 Light Supervision (adaptation) of RNN model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "While the unsupervised RNN model described in the previous section has not seen any annotated data in the target language, we also consider the use of a small amount of adaptation data (manually annotated in target language) in order to capture target language specificity. Such an adaptation is performed on top of the unsupervised RNN model without retraining the full model. The full process is the following (steps 1 and 2 correspond to the unsupervised case):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "1. Each word in the parallel corpus is represented by a binary occurrence vector (same initial common vector representation).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "2. The source side of the parallel corpus (using the available supervised POS tagger) and common vector representation of words are combined to train the RNN (Algorithm 2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "3. The RNN trained is adapted in a light supervision manner, using a small monolingual target corpus (manually annotated) and the common vector representation of words (extracted from the initial parallel corpus).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Such an approach is particularly suited for an iterative scenario where a user would post-edit (correct) the unsupervised POS-tagger output in order to produce rapidly adaptation data in the training language (light supervision).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Simple Cross-lingual Projection and RNN Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Initially, we applied our method to the English-French language pair. French was considered as the target language here. French is certainly not a resource-poor language, but it was used as if no tagger was available (in fact, TreeTagger (Schmid, 1995) , a supervised POS tagger exists for this language and helps us to obtain a ground truth for Das & Petrov (2011) , Duong et al (2013) and Gouws & S\u00f8gaard (2015) . evaluation). To train the RNN POS tagger, we used a training set of 10, 000 parallel sentences extracted from the ARCADE II English-French corpus (Veronis et al., 2008) . Our validation corpus contains 1000 English sentences (these sentences are not in the train set) extracted from the AR-CADE II English corpus. The test corpus is also extracted from the ARCADE II corpus, and it contains 1000 French sentences (which are obviously different from the train set) tagged with the French TreeTagger Toolkit (Schmid, 1995) and manually checked.", |
|
"cite_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 252, |
|
"text": "(Schmid, 1995)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 365, |
|
"text": "Das & Petrov (2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 386, |
|
"text": "Duong et al (2013)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 413, |
|
"text": "Gouws & S\u00f8gaard (2015)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 584, |
|
"text": "(Veronis et al., 2008)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 936, |
|
"text": "(Schmid, 1995)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Encouraged by the results obtained on the English-French language pair, and in order to confirm our results, we run additional experiments on other languages, we applied our method to build RNN POS taggers for three more target languages -German, Greek and Spanish -with English as the source language, in order to compare our results with those of (Das and Petrov, 2011; Duong et al., 2013; Gouws and S\u00f8gaard, 2015a) . Our training and validation (English) data extracted from the Europarl corpus (Koehn, 2005) are a subset of the training data of (Das and Petrov, 2011; Duong et al., 2013) . The sizes of the data sets are: 65, 000 (train) and 10, 000 (dev) bi-sentences. For testing, we used the same test corpora (from CoNLL shared tasks on dependency parsing (Buchholz and Marsi, 2006) ) as (Das and Petrov, 2011; Duong et al., 2013; Gouws and S\u00f8gaard, 2015a) . The evaluation metric (per-token accuracy) and the Universal Tagset are the same as before. The source sides of the training corpora (ARCADE II and Europarl) and the validation corpora are tagged with the English TreeTagger Toolkit. Using the matching provided by Petrov et al. (2012) we map the TreeTagger and the CoNLL tagsets to a common Universal Tagset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 371, |
|
"text": "(Das and Petrov, 2011;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 391, |
|
"text": "Duong et al., 2013;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 417, |
|
"text": "Gouws and S\u00f8gaard, 2015a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 511, |
|
"text": "(Koehn, 2005)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 549, |
|
"end": 571, |
|
"text": "(Das and Petrov, 2011;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 591, |
|
"text": "Duong et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 764, |
|
"end": 790, |
|
"text": "(Buchholz and Marsi, 2006)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 818, |
|
"text": "(Das and Petrov, 2011;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 838, |
|
"text": "Duong et al., 2013;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 839, |
|
"end": 864, |
|
"text": "Gouws and S\u00f8gaard, 2015a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1131, |
|
"end": 1151, |
|
"text": "Petrov et al. (2012)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In order to build our unsupervised tagger based on a Simple Cross-lingual Projection (Algorithm 1), we tag the target side of the training corpus, with tags projected from English side through wordalignments established by GIZA++. After tags projection we use TNT Tagger to induce a target language POS Tagger (see Algorithm 1 described in Section 3.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Also, our proposed approach implements Algorithm 2 described before. We had to slightly modify the Recurrent Neural Network Language Modeling Toolkit (RNNLM) provided by Mikolov et al. (2011) , to learn our Recurrent Neural Network Based POS Tagger 5 . The modifications include:", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 191, |
|
"text": "Mikolov et al. (2011)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "(1) building the cross-lingual word representations automatically; and (2) learning and testing models with several hidden layers (common representation as input and universal POS tags as output).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The combined model is built for each considered language using cross-validation on the test corpus. First the test corpus is split into 2 equal parts and on each part, we estimate the interpolation parameter \u00b5 (Equation 1) which maximizes the per-token accuracy score. Then each part of test corpus is tagged using the combined model tuned (Equation 2) on the other part, and vice versa (standard cross-validation procedure).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Finally, we investigate how the performance of the adapted model changes according to target adaptation corpus size. We choose German as target adaptation language, because we dispose of a large German annotated data set (from CoNLL shared tasks on dependency parsing). Then, we generate German adaptation sets of 7 different sizes (from 100 to 10, 000 utterances). Each adaptation set is used to adapt our unsupervised RNN POS tagger.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As contrastive experiments, we also learn supervised POS Taggers based on RNN, TNT or their linear combination.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and tools", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In table 1 we report the results obtained for the unsupervised approach. Preliminary RNN experiments used one hidden layer, but we obtained lower performance compared to those with two hidden layers. So we report here RNN accuracy achieved using two hidden layers, containing respectively 640 and 160 neurons (RNN-640-160). As shown in the table, this accuracy is close to that of the simple projection tagger, the difference coming mostly from out-of-vocabulary (OOV) words. As OOV words are not in the training corpus, their vector representations are empty (they contain only 0), therefore the RNN model uses only the context information, which is insufficient to tag correctly the OOV words in the test corpus. We also observe that both methods seem complementary since the best results are achieved using the linearly combined model Projection+RNN-640-160. It achieves comparable results to Das and Petrov (2011) , Duong et al. (2013) (who used the full Europarl corpus while we used only a 65, 000 subset of it) and Gouws and S\u00f8gaard (2015a) (who in addition used Wiktionary and Wikipedia) methods. It is also important to note that a single RNN tagger applies to German, Greek and Spanish; so this is a truly multilingual POS tagger! Therefore, as for several other NLP tasks such as language modelling or machine translation (where standard and NN-based models are combined in a log-linear model), the use of both standard and RNN-based approaches seems necessary to obtain optimal performances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 896, |
|
"end": 917, |
|
"text": "Das and Petrov (2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 920, |
|
"end": 939, |
|
"text": "Duong et al. (2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised model", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In order to know in what respect using RNN improves combined model accuracy, and vice versa, we analyzed the French test corpus. In the example provided in table 2, RNN information helps to resolve the French word \"pr\u00e9cise\" tag ambiguity: in the Simple Projection model it is tagged as a verb English a precise breakdown of spending French une r\u00e9partition pr\u00e9cise des d\u00e9penses Simple une/DET r\u00e9partition/NOUN Projection pr\u00e9cise/VERB des/ADP ... Projection une/DET r\u00e9partition/NOUN + RNN pr\u00e9cise/ADJ des/ADP ... (VERB), whereas it is an adjective (ADJ) in this particular context. We hypothesize that the context information is better represented in RNN, because of the recurrent connections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised model", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In case of word order divergence, we observed that our model can still handle some divergence, notably for the following cases:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised model", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "\u2022 Obviously if the current tag word is unambiguous (case of ADJ and NOUN order from English to French -see table 3) , then the context (RNN history) information has no effect.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "French -see table 3)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Unsupervised model", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "\u2022 When the context is erroneous (due to the fact that word order for the target test corpus is different from the source training corpus), the right word tag can be recovered using the combination (RNN+Cross-lingual projection -see ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised model", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In target language data annotated (from 100 to 10, 000 utterances). We focus on German target language only. It is compared with two supervised approaches based on TNT or RNN. The supervised approaches are trained on the adaptation data only. For supervised RNN, it is important to mention that the input vector representation has a different dimension for each amount of adaptation data (we recall that the vector representation is V wi , i = 1, ..., N , where N is the number of sentences; and N is growing from 100 to 10, 000). The results show that our adaptation, on top of the unsupervised RNN is efficient in very low resource settings (< 1000 target language utterances). When more data is available (> 1000 utterances), the supervised approaches start to be better (but RNN and TNT are still complementary since their combination improves the tag accuracy). Figure 2 details the behavior of the same methods for OOV words. We clearly see the limitation of the Unsupervised RNN + Adaptation to handle OOV words, since the input vector representation is the same (comes from the initial parallel corpus) and does not evolve as more German adaptation data is available. Better handling OOV words in unsupervised RNN training is our priority for future works.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lightly supervised model", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "Finally, these results show that for all training data sizes, RNN brings complementary information on top of a more classical approach such as TNT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lightly supervised model", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "In this paper, we have presented a novel approach which uses a language-independent word representation (based only on word occurrence in a parallel corpus) within a recurrent neural network (RNN) to build multilingual POS tagger. Our method induces automatically POS tags from one language to another (or several others) and needs only a parallel corpus and a POS tagger in the source language (without using word alignment information).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We first empirically evaluated the proposed approach on two unsupervised POS taggers based on RNN : (1) English-French cross-lingual POS tagger; and (2) English-German-Greek-Spanish multilingual POS tagger. The performance of the second model is close to state-of-the-art with only a subset (65, 000) of Europarl corpus used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Additionally, when a small amount of supervised data is available, the experimental results demonstrated the effectiveness of our method in a weakly supervised context (especially for very-lowresourced settings).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Although our initial experiments are positive, we believe they can be improved in a number of ways. In future work, we plan, on the one hand, to better manage OOV representation (for instance using Cross-lingual Word Embeddings), and, on the other hand, to consider more complex tasks such as word senses projection or semantic role labels projection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://www.wiktionary.org/ PACLIC 29", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For RNN a single system is used for German, Greek and Spanish", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The modified source code is Available from the following URL https://github.com/othman-zennaki/RNN_ POS_Tagger.git", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Polyglot: Distributed word representations for multilingual nlp", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Al-Rfou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Perozzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Skiena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventeenth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "183--192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Al-Rfou, B. Perozzi and S. Skiena. 2013. Poly- glot: Distributed word representations for multilingual nlp, In Proceedings of the Seventeenth Conference on Computational Natural Language Learning:183-192.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Cross-lingual alignment of FrameNet annotations through Hidden Markov Models", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Annesi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Basili", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of CICLing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Annesi and R. Basili. 2010. Cross-lingual alignment of FrameNet annotations through Hidden Markov Models, In Proceedings of CICLing :12-25.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural probabilistic language models", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sen\u00e9cal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Morin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Gau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Innovations in Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "137--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Bengio, H. Schwenk, J. Sen\u00e9cal, F. Morin and J. Gau- vain. 2006. Neural probabilistic language models, In Innovations in Machine Learning:137-186.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Evaluating cross-language annotation transfer in the Multi-SemCor corpus", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Forner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Pianta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 20th international conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "364--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Bentivogli, P. Forner and E. Pianta. 2004. Evaluat- ing cross-language annotation transfer in the Multi- SemCor corpus, In Proceedings of the 20th interna- tional conference on Computational Linguistics:364- 370. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "CoNLL-X shared task on multilingual dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Tenth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Buchholz and E. Marsi. 2006. CoNLL-X shared task on multilingual dependency parsing, In Proceedings of the Tenth Conference on Computational Natural Language Learning:149-164. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "TnT: a statistical part-of-speech tagger", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the sixth conference on Applied natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "224--231", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Brants. 2000. TnT: a statistical part-of-speech tag- ger, In Proceedings of the sixth conference on Applied natural language processing:224-231.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Collobert and J. Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning, In Proceedings of the Interna- tional Conference on Machine Learning (ICML):160- 167.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "In Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Collobert, J. Weston, L. Bottou, M. Karlen, K. Kavukcuoglu, and P. Kuksa. 2011. Natural language processing (almost) from scratch, In Journal of Ma- chine Learning Research (JMLR), volume 12:2493- 2537.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Unsupervised Part-of-Speech Tagging with Bilingual Graph-Based Projections", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "600--609", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Das and S. Petrov. 2011. Unsupervised Part-of- Speech Tagging with Bilingual Graph-Based Projec- tions, In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies, volume 1:600-609. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Simpler unsupervised POS tagging with bilingual projections", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Duong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pecina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "634--639", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Duong, P.Cook, S. Bird and P. Pecina. 2013. Simpler unsupervised POS tagging with bilingual projections, In ACL (2) :634-639.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Syntactic transfer using a bilingual lexicon", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Pauls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning:1-11. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Durrett, A. Pauls and D. Klein. 2012. Syntactic trans- fer using a bilingual lexicon, In Proceedings of the 2012 Joint Conference on Empirical Methods in Natu- ral Language Processing and Computational Natural Language Learning:1-11. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Finding structure in time", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Cognitive science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "179--211", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J.L. Elman. 1990. Finding structure in time, In Cogni- tive science:179-211.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Discriminative training of a neural network statistical parser", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Henderson. 2004. Discriminative training of a neu- ral network statistical parser, In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL):95-102.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Analogical modelling of text tagging, unpublished report, Istituto diLinguistica Computazionale", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Federici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Pirrelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Federici and V. Pirrelli. 1993. Analogical modelling of text tagging, unpublished report, Istituto diLin- guistica Computazionale, Pisa, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Simple task-specific bilingual word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "1386--1390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Gouws and A. S\u00f8gaard 2015. Simple task-specific bilingual word embeddings, In The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, NAACL'15:1386-1390.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Bil-BOWA: Fast Bilingual Distributed Representations without Word Alignments", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 32nd International Conference on Machine Learning", |
|
"volume": "2015", |
|
"issue": "", |
|
"pages": "748--756", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Gouws, Y. Bengio and G. Corrado 2015. Bil- BOWA: Fast Bilingual Distributed Representations without Word Alignments, In Proceedings of the 32nd International Conference on Machine Learning, ICML 2015:748-756.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Relaxed cross-lingual projection of constituent syntax", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "L\u00fc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1192--1201", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. Jiang, Q. Liu and Y. L\u00fc, 2011 Relaxed cross-lingual projection of constituent syntax, In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing:1192-1201. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multilingual named entity recognition using parallel data and metadata from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "694--702", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim, K. Toutanova and H. Yu, 2012 Multilin- gual named entity recognition using parallel data and metadata from wikipedia, In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers-volume 1:694-702. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Wiki-ly supervised part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Gra\u00e7a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1389--1398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li, J.V Gra\u00e7a and B. Taskar. 2012 Wiki-ly super- vised part-of-speech tagging, In Proceedings of the 2012 Joint Conference on Empirical Methods in Nat- ural Language Processing and Computational Natu- ral Language Learning:1389-1398. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Recurrent neural network based language model", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Karafi\u00e1t", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Burget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Cernock\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1045--1048", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Mikolov, M. Karafi\u00e1t , L. Burget, J. Cernock\u00fd and S. Khudanpur. 2010. Recurrent neural network based language model, In INTERSPEECH:1045-1048.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "RNNLM-Recurrent neural network language modeling toolkit", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kombrink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Deoras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Burget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Cernocky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 2011 ASRU Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--201", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Mikolov, S. Kombrink , A. Deoras, L. Burget and J. Cernocky. 2011. RNNLM-Recurrent neural network language modeling toolkit, In Proc. of the 2011 ASRU Workshop:196-201.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Improved Statistical Alignment Models", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "00", |
|
"issue": "", |
|
"pages": "440--447", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F.Och and H.Ney. 2000. Improved Statistical Alignment Models, In ACL00:440-447.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Cross-Lingual Annotation Projection Models for Role-Semantic Information", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Pad\u00f3. 2007. Cross-Lingual Annotation Projection Models for Role-Semantic Information, In German Research Center for Artificial Intelligence and Saar- land University, volume 21.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A Universal Part-of-Speech Tagset", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC '12", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2089--2096", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Petrov, D. Das and R. McDonald. 2012. A Univer- sal Part-of-Speech Tagset, In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC '12):2089-2096.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Europarl: A parallel corpus for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "MT summit", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Koehn 2005. Europarl: A parallel corpus for statistical machine translation, In MT summit, volume 5 :79-86.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Learning internal representations by error propagation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Rumelhart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Rumelhart, E. Hinton and R.J. Williams . 1985. Learning internal representations by error propagation, In Learning internal representations by error propaga- tion .", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "TreeTagger-a Language Independent Part-of-speech Tagger", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Institut f\u00fcr Maschinelle Sprachverarbeitung", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Schmid. 1995. TreeTagger-a Language Indepen- dent Part-of-speech Tagger, In Institut f\u00fcr Maschinelle Sprachverarbeitung, Universit\u00e4t Stuttgart, volume 43 :28.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Comparison of feedforward and recurrent neural network language models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sundermeyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Oparin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Gauvain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Freiberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Schluter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "In Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8430--8434", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Sundermeyer, I. Oparin, J. Gauvain, B. Freiberg, R. Schluter and H.Ney. 2013. Comparison of feedfor- ward and recurrent neural network language models, In Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference:8430-8434.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Cross-lingual word clusters for direct transfer of linguistic structure", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "477--487", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. T\u00e4ckstr\u00f6m, R. McDonald and J. Uszkoreit. 2012. Cross-lingual word clusters for direct transfer of lin- guistic structure, In Proceedings of the 2012 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies:477-487. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Target language adaptation of discriminative transfer parsers", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech-nologies (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. T\u00e4ckstr\u00f6m, R. McDonald, J. Nivre. 2013. Target lan- guage adaptation of discriminative transfer parsers, In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguis- tics: Human Language Tech-nologies (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Token and type constraints for crosslingual part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "In Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. T\u00e4ckstr\u00f6m, D. Das, S. Petrov, R. McDonald and J. Nivre. 2013. Token and type constraints for cross- lingual part-of-speech tagging, In Transactions of the Association for Computational Linguistics: volume 1 :1-12. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Crosslingual induction of semantic roles", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Klementiev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "647--656", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Titov and A. Klementiev. 2012. Crosslingual induction of semantic roles, In Proceedings of the 50th Annual Meeting of the Association for Computational Linguis- tics: Long Papers-volume 1:647-656. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Cross-lingual Word Sense Disambiguation for Predicate Labelling of French", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Van Der Plas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 21st TALN (Traitement Automatique des Langues Naturelles", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "46--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Van der Plas and M. Apidianaki. 2014. Cross-lingual Word Sense Disambiguation for Predicate Labelling of French, In Proceedings of the 21st TALN (Traitement Automatique des Langues Naturelles) conference :46- 55.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Arcade II Action de recherche concerte sur l'alignement de documents et son valuation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Veronis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Hamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Ayache", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Belmouhoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Kraif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laurent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"M H" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Semmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Stuck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Wajdi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Veronis, O. Hamon, C. Ayache, R. Belmouhoub, O. Kraif, D. Laurent, T.M.H. Nguyen, N. Semmar, F. Stuck and Z. Wajdi. 2008. Arcade II Action de recherche concerte sur l'alignement de documents et son valuation, Chapitre 2, Editions Herm\u00e9s .", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Visualizing data using t-SNE", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Van Der Maaten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "In Journal of Machine Learning Research (JMLR", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "2579--2605", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Van der Maaten and G. Hinton 2008 Visualizing data using t-SNE, In Journal of Machine Learning Research (JMLR), 9:2579-2605.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Cross-Lingual Part-of-Speech Tagging through Ambiguous Learning", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wisniewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "P\u00e9cheux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gahbiche-Braham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Yvon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1779--1785", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Wisniewski, N. P\u00e9cheux, S. Gahbiche-Braham and F. Yvon. 2014. Cross-Lingual Part-of-Speech Tagging through Ambiguous Learning, In EMNLP'14:1779- 1785.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Distributed Word Representation Learning for Cross-Lingual Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "CoNLL-2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--129", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Xiao and Y. Guo. 2014. Distributed Word Represen- tation Learning for Cross-Lingual Dependency Pars- ing, In CoNLL-2014:119-129.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Inducing multilingual text analysis tools via robust projection across aligned corpora", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Ngai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wicentowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the first international conference on Human language technology research:1-8. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Yarowsky, G. NGAI and R. Wicentowski. 2001. In- ducing multilingual text analysis tools via robust pro- jection across aligned corpora, In Proceedings of the first international conference on Human language technology research:1-8. Association for Computa- tional Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Architecture of the recurrent neural network." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Accuracy on OOV according to German training corpus size for Unsupervised RNN + DE Adaptation, Supervised RNN DE and Supervised TNT DE." |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Improved tagged example for french target language.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>EN Supervised</td><td>... two/NUM local/ADJ</td></tr><tr><td>Treetagger</td><td>groups/NOUN ...</td></tr><tr><td colspan=\"2\">FR Unsupervised ... deux/NUM groupes/NOUN</td></tr><tr><td>RNN</td><td>locaux/NOUN ...</td></tr><tr><td>Projection</td><td>... deux/NUM groupes/NOUN</td></tr><tr><td>+ RNN</td><td>locaux/ ADJ ...</td></tr></table>", |
|
"num": null, |
|
"text": "Word order divergence -unambiguous tag word-.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Word order divergence -ambiguous tag word-.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td>we report the results obtained after</td></tr><tr><td>adaptation with a gradually increasing amount of</td></tr></table>", |
|
"num": null, |
|
"text": "1% 82.1% 87.3% 90.4% 90.7% 91.2% 91.4% 92.4%", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Lightly supervised model : effect of German adaptation corpus (manually annotated) size on method described in Section 4 (Unsupervised RNN + DE Adaptation trained on EN Europarl and adapted to German). Contrastive experiments with German supervised POS taggers using same data (RNN, TNT and RNN+TNT). 0 means no German corpus used during training.", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |