|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:19:57.113653Z" |
|
}, |
|
"title": "PhoNLP: A joint multi-task learning model for Vietnamese part-of-speech tagging, named entity recognition and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Linh", |
|
"middle": [ |
|
"The" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"settlement": "Hanoi", |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dat", |
|
"middle": [ |
|
"Quoc" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"settlement": "Hanoi", |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present the first multi-task learning model-named PhoNLP-for joint Vietnamese part-of-speech (POS) tagging, named entity recognition (NER) and dependency parsing. Experiments on Vietnamese benchmark datasets show that PhoNLP produces state-of-the-art results, outperforming a single-task learning approach that fine-tunes the pre-trained Vietnamese language model PhoBERT (Nguyen and Nguyen, 2020) for each task independently. We publicly release PhoNLP as an open-source toolkit under the Apache License 2.0. Although we specify PhoNLP for Vietnamese, our PhoNLP training and evaluation command scripts in fact can directly work for other languages that have a pre-trained BERT-based language model and gold annotated corpora available for the three tasks of POS tagging, NER and dependency parsing. We hope that PhoNLP can serve as a strong baseline and useful toolkit for future NLP research and applications to not only Vietnamese but also the other languages. Our", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present the first multi-task learning model-named PhoNLP-for joint Vietnamese part-of-speech (POS) tagging, named entity recognition (NER) and dependency parsing. Experiments on Vietnamese benchmark datasets show that PhoNLP produces state-of-the-art results, outperforming a single-task learning approach that fine-tunes the pre-trained Vietnamese language model PhoBERT (Nguyen and Nguyen, 2020) for each task independently. We publicly release PhoNLP as an open-source toolkit under the Apache License 2.0. Although we specify PhoNLP for Vietnamese, our PhoNLP training and evaluation command scripts in fact can directly work for other languages that have a pre-trained BERT-based language model and gold annotated corpora available for the three tasks of POS tagging, NER and dependency parsing. We hope that PhoNLP can serve as a strong baseline and useful toolkit for future NLP research and applications to not only Vietnamese but also the other languages. Our", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Vietnamese NLP research has been significantly explored recently. It has been boosted by the success of the national project on Vietnamese language and speech processing (VLSP) KC01.01/2006-2010 and VLSP workshops that have run shared tasks since 2013. 1 Fundamental tasks of POS tagging, NER and dependency parsing thus play important roles, providing useful features for many downstream application tasks such as machine translation (Tran et al., 2016) , sentiment analysis (Bang and Sornlertlamvanich, 2018) , relation extraction (To and Do, 2020), semantic parsing , open information extraction (Truong et al., 2017) and question answering 1 https://vlsp.org.vn/ (Nguyen et al., 2017; Le-Hong and Bui, 2018) . Thus, there is a need to develop NLP toolkits for linguistic annotations w.r.t. Vietnamese POS tagging, NER and dependency parsing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 454, |
|
"text": "(Tran et al., 2016)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 510, |
|
"text": "(Bang and Sornlertlamvanich, 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 620, |
|
"text": "(Truong et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 688, |
|
"text": "(Nguyen et al., 2017;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 711, |
|
"text": "Le-Hong and Bui, 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "VnCoreNLP (Vu et al., 2018) is the previous public toolkit employing traditional featurebased machine learning models to handle those Vietnamese NLP tasks. However, VnCoreNLP is now no longer considered state-of-the-art because its performance results are significantly outperformed by ones obtained when fine-tuning PhoBERT-the current state-of-the-art monolingual pre-trained language model for Vietnamese . Note that there are no publicly available fine-tuned BERT-based models for the three Vietnamese tasks. Assuming that there would be, a potential drawback might be that an NLP package wrapping such fine-tuned BERTbased models would take a large storage space, i.e. three times larger than the storage space used by a BERT model (Devlin et al., 2019) , thus it would not be suitable for practical applications that require a smaller storage space. Jointly multi-task learning is a promising solution as it might help reduce the storage space. In addition, POS tagging, NER and dependency parsing are related tasks: POS tags are essential input features used for dependency parsing and POS tags are also used as additional features for NER. Jointly multi-task learning thus might also help improve the performance results against the single-task learning (Ruder, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 27, |
|
"text": "(Vu et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 758, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1262, |
|
"end": 1275, |
|
"text": "(Ruder, 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a new multi-task learning model-named PhoNLP-for joint POS tagging, NER and dependency parsing. In particular, given an input sentence of words to PhoNLP, an encoding layer generates contextualized word embeddings that represent the input words. These contextualized word embeddings are fed into a POS tagging layer that is in fact a linear prediction layer (Devlin et al., 2019) tag is then represented by two \"soft\" embeddings that are later fed into NER and dependency parsing layers separately. More specifically, based on both the contextualized word embeddings and the \"soft\" POS tag embeddings, the NER layer uses a linear-chain CRF predictor (Lafferty et al., 2001) to predict NER labels for the input words, while the dependency parsing layer uses a Biaffine classifier (Dozat and Manning, 2017) to predict dependency arcs between the words and another Biaffine classifier to label the predicted arcs. Our contributions are summarized as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 405, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 699, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 830, |
|
"text": "(Dozat and Manning, 2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To the best of our knowledge, PhoNLP is the first proposed model to jointly learn POS tagging, NER and dependency parsing for Vietnamese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We discuss a data leakage issue in the Vietnamese benchmark datasets, that has not yet been pointed out before. Experiments show that PhoNLP obtains state-of-the-art performance results, outperforming the PhoBERT-based single task learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We publicly release PhoNLP as an open-source toolkit that is simple to setup and efficiently run from both the command-line and Python API. We hope that PhoNLP can serve as a strong baseline and useful toolkit for future NLP research and downstream applications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Figure 1 illustrates our PhoNLP architecture that can be viewed as a mixture of a BERT-based encoding layer and three decoding layers of POS tagging, NER and dependency parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given an input sentence consisting of n word tokens w 1 , w 2 , ..., w n , the encoding layer employs PhoBERT to generate contextualized latent feature embeddings e i each representing the i th word w i :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder & Contextualized embeddings", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e i = PhoBERT base w 1:n , i", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Encoder & Contextualized embeddings", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In particular, the encoding layer employs the PhoBERT base version. Because PhoBERT uses BPE (Sennrich et al., 2016) to segment the input sentence with subword units, the encoding layer in fact represents the i th word w i by using the contextualized embedding of its first subword.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 116, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder & Contextualized embeddings", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Following a common manner when fine-tuning a pre-trained language model for a sequence labeling task (Devlin et al., 2019) , the POS tagging layer is a linear prediction layer that is appended on top of the encoder. In particular, the POS tagging layer feeds the contextualized word embeddings e i into a feed-forward network (FFNN POS ) followed by a softmax predictor for POS tag prediction:", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 122, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS tagging", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p i = softmax FFNN POS e i", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "POS tagging", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where the output layer size of FFNN POS is the number of POS tags. Based on probability vectors p i , a cross-entropy objective loss L POS is calculated for POS tagging during training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS tagging", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The NER layer creates a sequence of vectors v 1:n in which each v i is resulted in by concatenating the contextualized word embedding e i and a \"soft\" POS tag embedding t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "i : v i = e i \u2022 t (1) i (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where following Hashimoto et al. (2017) , the \"soft\" POS tag embedding t", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 39, |
|
"text": "Hashimoto et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "( 1)i is computed by multiplying a label weight matrix W (1) with the corresponding probability vector p i :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "t (1) i = W (1) p i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The NER layer then passes each vector v i into a FFNN (FFNN NER ):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "h i = FFNN NER v i (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where the output layer size of FFNN NER is the number of BIO-based NER labels. The NER layer feeds the output vectors h i into a linear-chain CRF predictor for NER label prediction (Lafferty et al., 2001) . A cross-entropy loss L NER is calculated for NER during training while the Viterbi algorithm is used for inference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The dependency parsing layer creates vectors z 1:n in which each z i is resulted in by concatenating e i and another \"soft\" POS tag embedding t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(2) i : Dozat and Manning (2017) , the dependency parsing layer uses FFNNs to split z i into head and dependent representations:", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 32, |
|
"text": "Dozat and Manning (2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "z i = e i \u2022 t (2) i (5) t (2) i = W (2) p i Following", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "h (A-H) i = FFNN Arc-Head z i (6) h (A-D) i = FFNN Arc-Dep z i (7) h (L-H) i = FFNN Label-Head z i (8) h (L-D) i = FFNN Label-Dep z i (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To predict potential dependency arcs, based on input vectors h", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(A-H) i and h (A-D) j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": ", the parsing layer uses a Biaffine classifier's variant (Qi et al., 2018) that additionally takes into account the distance and relative ordering between two words to produce a probability distribution of arc heads for each word. For inference, the Chu-Liu/Edmonds' algorithm is used to find a maximum spanning tree (Chu and Liu, 1965; Edmonds, 1967) . The parsing layer also uses another Biaffine classifier to label the predicted arcs, based on input vectors h", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 74, |
|
"text": "(Qi et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 336, |
|
"text": "(Chu and Liu, 1965;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 351, |
|
"text": "Edmonds, 1967)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(L-H) i and h (L-D) j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": ". An objective loss L DEP is computed by summing a cross entropy loss for unlabeled dependency parsing and another cross entropy loss for dependency label prediction during training based on gold arcs and arc labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency parsing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The final training objective loss L of our model PhoNLP is the weighted sum of the POS tagging loss L POS , the NER loss L NER and the dependency parsing loss L DEP :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint multi-task learning", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "L = \u03bb 1 L POS +\u03bb 2 L NER +(1\u2212\u03bb 1 \u2212\u03bb 2 )L DEP (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint multi-task learning", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "Discussion: Our PhoNLP can be viewed as an extension of previous joint POS tagging and dependency parsing models (Hashimoto et al., 2017; Li et al., 2018; Nguyen and Verspoor, 2018; Nguyen, 2019; Kondratyuk and Straka, 2019) , where we additionally incorporate a CRF-based prediction layer for NER. Unlike Hashimoto et al. (2017) , Nguyen and Verspoor (2018) , Li et al. (2018) and Nguyen (2019) that use BiLSTM-based encoders to extract contextualized feature embeddings, we use a BERT-based encoder. Kondratyuk and Straka (2019) also employ a BERT-based encoder. However, different from PhoNLP where we construct a hierarchical architecture over the POS tagging and dependency parsing layers, Kondratyuk ", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 137, |
|
"text": "(Hashimoto et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 154, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 181, |
|
"text": "Nguyen and Verspoor, 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 195, |
|
"text": "Nguyen, 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 224, |
|
"text": "Kondratyuk and Straka, 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 329, |
|
"text": "Hashimoto et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 358, |
|
"text": "Nguyen and Verspoor (2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 377, |
|
"text": "Li et al. (2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 395, |
|
"text": "Nguyen (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 530, |
|
"text": "Kondratyuk and Straka (2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 695, |
|
"end": 705, |
|
"text": "Kondratyuk", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint multi-task learning", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "To conduct experiments, we use the benchmark datasets of the VLSP 2013 POS tagging dataset, 3 the VLSP 2016 NER dataset and the VnDT dependency treebank v1.1 Nguyen et al. (2014) , following the setup used by the Vn-CoreNLP toolkit (Vu et al., 2018) . Here, VnDT is converted from the Vietnamese constituent treebank (Nguyen et al., 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 178, |
|
"text": "Nguyen et al. (2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 249, |
|
"text": "(Vu et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 338, |
|
"text": "(Nguyen et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "We further discover an issue of data leakage, that has not yet been pointed out before. That is, all sentences from the VLSP 2016 NER dataset and the VnDT treebank are included in the VLSP 2013 POS tagging dataset. In particular, 90+% of sentences from both validation and test sets for NER and dependency parsing are included in the POS tagging training set, resulting in an unrealistic evaluation scenario where the POS tags are used as input features for NER and dependency parsing. To handle the data leakage issue, we have to re-split the VLSP 2013 POS tagging dataset to avoid the data leakage issue: The POS tagging validation/test set now only contains sentences that appear in the union of the NER and dependency parsing validation/test sets (i.e. the validation/test sentences for NER and dependency parsing only appear in the POS tagging validation/test set). In addition, there are 594 duplicated sentences in the VLSP 2013 POS tagging dataset (here, sentence duplication is not found in the union of the NER and dependency parsing sentences). Thus we have to perform duplication removal on the POS tagging dataset. Table 1 details the statistics of the experimental datasets. 3 https://vlsp.org.vn/vlsp2013/eval", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1128, |
|
"end": 1189, |
|
"text": "Table 1 details the statistics of the experimental datasets.", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data leakage issue:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "PhoNLP is implemented based on PyTorch (Paszke et al., 2019) , employing the PhoBERT encoder implementation available from the transformers library (Wolf et al., 2020) and the Biaffine classifier implementation from Qi et al. (2020) . We set both the label weight matrices W (1) and W (2) to have 100 rows, resulting in 100-dimensional soft POS tag embeddings. In addition, following Qi et al. (2018 Qi et al. ( , 2020 , FFNNs in equations 6-9 use 400dimensional output layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 60, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 167, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 232, |
|
"text": "Qi et al. (2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 288, |
|
"text": "(2)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 399, |
|
"text": "Qi et al. (2018", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 418, |
|
"text": "Qi et al. ( , 2020", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We use the AdamW optimizer (Loshchilov and Hutter, 2019) and a fixed batch size at 32, and train for 40 epochs. The sizes of training sets are different, in which the POS tagging training set is the largest, consisting of 23906 sentences. Thus for each training epoch, we repeatedly sample from the NER and dependency parsing training sets to fill the gaps between the training set sizes. We perform a grid search to select the initial AdamW learning rate, \u03bb 1 and \u03bb 2 . We find the optimal initial AdamW learning rate, \u03bb 1 and \u03bb 2 at 1e-5, 0.4 and 0.2, respectively. Here, we compute the average of the POS tagging accuracy, NER F 1 -score and dependency parsing score LAS after each training epoch on the validation sets. We select the model checkpoint that produces the highest average score over the validation sets to apply to the test sets. Each of our reported scores is an average over 5 runs with different random seeds. Table 2 presents results obtained for our PhoNLP and compares them with those of a baseline approach of single-task training. For the single-task training approach: (i) We follow a common approach to fine-tune a pre-trained language model for POS tagging, appending a linear prediction layer on top of PhoBERT, as briefly described in Section 2.2. (ii) For NER, instead of a linear prediction layer, we append a CRF prediction layer on top of PhoBERT. (iii) For dependency parsing, predicted POS tags are produced by the learned single-task POS tagging model; then POS tags are represented by embeddings that are concatenated with the corresponding PhoBERT-based contextualized word embeddings, resulting in a sequence of input vectors for the Biaffine-based classifiers for dependency parsing (Qi et al., 2018) . Here, the single-task training approach is based on the PhoBERT base version, employing the same hyper- Table 2 : Performance results (in %) on the test sets for POS tagging (i.e. accuracy), NER (i.e. F 1 -score) and dependency parsing (i.e. LAS and UAS scores). \"Leak.\" abbreviates \"leakage\", denoting the results obtained w.r.t. the data leakage issue. \"Re-spl\" denotes the results obtained w.r.t. the data re-split and duplication removal for POS tagging to avoid the data leakage issue. \"Single-task\" refers to as the single-task training approach. \u2020 denotes scores taken from the PhoBERT paper . Note that \"Singletask\" NER is not affected by the data leakage issue. parameter tuning and model selection strategy that we use for PhoNLP. Note that PhoBERT helps produce state-of-theart results for multiple Vietnamese NLP tasks (including but not limited to POS tagging, NER and dependency parsing in a single-task training strategy), and obtains higher performance results than VnCoreNLP. However, in both the PhoBERT and VnCoreNLP papers Vu et al., 2018) , results for POS tagging and dependency parsing are reported w.r.t. the data leakage issue. Our \"Single-task\" results in Table 2 regarding \"Re-spl\" (i.e. the data re-split and duplication removal for POS tagging to avoid the data leakage issue) can be viewed as new PhoBERT results for a proper experimental setup. Table 2 shows that in both setups \"Leak.\" and \"Re-spl\", our joint multi-task training approach PhoNLP performs better than the PhoBERT-based single-task training approach, thus resulting in state-of-theart performances for the three tasks of Vietnamese POS tagging, NER and dependency parsing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 56, |
|
"text": "(Loshchilov and Hutter, 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1724, |
|
"end": 1741, |
|
"text": "(Qi et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 2787, |
|
"end": 2803, |
|
"text": "Vu et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 930, |
|
"end": 937, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1848, |
|
"end": 1855, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2926, |
|
"end": 2933, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 3120, |
|
"end": 3127, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We present in this section a basic usage of our PhoNLP toolkit. We make PhoNLP simple to setup, i.e. users can install PhoNLP from either source or pip (e.g. pip3 install phonlp). We also aim to make PhoNLP simple to run from both the command-line and the Python API. For example, annotating a corpus with POS tagging, NER and dependency parsing can be performed by using a simple command as in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 395, |
|
"end": 403, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PhoNLP toolkit", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Assume that the input file \"input.txt\" in Figure 2 contains a sentence \"T\u00f4i \u0111ang l\u00e0m_vi\u1ec7c t\u1ea1i python3 run_phonlp.py --save_dir ./pretrained_phonlp --mode annotate --input_file input.txt --output_file output.txt Figure 2 : Minimal command to run PhoNLP. Here \"save_dir\" denote the path to the local machine folder that stores the pre-trained PhoNLP model. CH O 3 punct Table 3 : The output in the output file \"output.txt\" for the sentence \"T\u00f4i \u0111ang l\u00e0m_vi\u1ec7c t\u1ea1i VinAI .\" from the input file \"input.txt\" in Figure 2 . The output is formatted with 6 columns representing word index, word form, POS tag, NER label, head index of the current word and its dependency relation type.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 50, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 219, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 375, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 513, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PhoNLP toolkit", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "VinAI .\" (I T\u00f4i am \u0111ang working l\u00e0m_vi\u1ec7c at t\u1ea1i VinAI). Table 3 shows the annotated output in plain text form for this sentence. Similarly, we also get the same output by using the Python API as simple as in Figure 3 . Furthermore, commands to (re-)train and evaluate PhoNLP using gold annotated corpora are detailed in the PhoNLP GitHub repository. Note that it is absolutely possible to directly employ our PhoNLP (re-)training and evaluation command scripts for other languages that have gold annotated corpora available for the three tasks and a pre-trained BERT-based language model available from the transformers library.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 63, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 216, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PhoNLP toolkit", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We perform a sole CPU-based speed test using a personal computer with Intel Core i5 8265U 1.6GHz & 8GB of memory. For a GPUbased speed test, we employ a machine with a single NVIDIA RTX 2080Ti GPU. For performing the three NLP tasks jointly, PhoNLP obtains a speed at 15 sentences per second for the CPU-based test and 129 sentences per second for the GPU-based test, respectively, with an average of 23 word tokens per sentence and a batch size of 8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speed test:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have presented the first multi-task learning model PhoNLP for joint POS tagging, NER and dependency parsing in Vietnamese. Experiments on Vietnamese benchmark datasets show that PhoNLP outperforms its strong fine-tuned PhoBERT-based import phonlp # Automatically download the pre-trained PhoNLP model # and save it in a local machine folder phonlp.download(save_dir='./pretrained_phonlp') # Load the pre-trained PhoNLP model model = phonlp.load(save_dir='./pretrained_phonlp') # Annotate a corpus model.annotate(input_file='input.txt', output_file='output.txt') # Annotate a sentence model.print_out(model.annotate(text=\"T\u00f4i \u0111ang l\u00e0m_vi\u1ec7c t\u1ea1i VinAI .\")) single-task training baseline, producing state-ofthe-art performance results. We publicly release PhoNLP as an easy-to-use open-source toolkit and hope that PhoNLP can facilitate future NLP research and applications. In future work, we will also apply PhoNLP to other languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In our preliminary experiments, not feeding the POS tag embeddings into the dependency parsing layer decreases the performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Sentiment classification for hotel booking review based on sentence dependency structure and subopinion analysis", |
|
"authors": [ |
|
{ |
|
"first": "Tran", |
|
"middle": [], |
|
"last": "Sy Bang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Virach", |
|
"middle": [], |
|
"last": "Sornlertlamvanich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEICE Transactions on Information and Systems", |
|
"volume": "101", |
|
"issue": "4", |
|
"pages": "909--916", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tran Sy Bang and Virach Sornlertlamvanich. 2018. Sentiment classification for hotel booking review based on sentence dependency structure and sub- opinion analysis. IEICE Transactions on Informa- tion and Systems, E101.D(4):909-916.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "On the Shortest Arborescence of a Directed Graph", |
|
"authors": [ |
|
{ |
|
"first": "Yoeng-Jin", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tseng-Hong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "Science Sinica", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1396--1400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoeng-Jin Chu and Tseng-Hong Liu. 1965. On the Shortest Arborescence of a Directed Graph. Science Sinica, 14:1396-1400.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of NAACL, pages 4171- 4186.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep Biaffine Attention for Neural Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Dozat and Christopher D. Manning. 2017. Deep Biaffine Attention for Neural Dependency Parsing. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Optimum Branchings", |
|
"authors": [], |
|
"year": 1967, |
|
"venue": "Journal of Research of the National Bureau of Standards", |
|
"volume": "71", |
|
"issue": "", |
|
"pages": "233--240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jack Edmonds. 1967. Optimum Branchings. Journal of Research of the National Bureau of Standards, 71:233-240.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A Joint Many-Task Model: Growing a Neural Network for Multiple NLP Tasks", |
|
"authors": [ |
|
{ |
|
"first": "Kazuma", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1923--1933", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kazuma Hashimoto, Caiming Xiong, Yoshimasa Tsu- ruoka, and Richard Socher. 2017. A Joint Many- Task Model: Growing a Neural Network for Multi- ple NLP Tasks. In Proceedings of EMNLP, pages 1923-1933.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "75 Languages, 1 Model: Parsing Universal Dependencies Universally", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Kondratyuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milan", |
|
"middle": [], |
|
"last": "Straka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of EMNLP-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2779--2795", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Kondratyuk and Milan Straka. 2019. 75 Lan- guages, 1 Model: Parsing Universal Dependencies Universally. In Proceedings of EMNLP-IJCNLP, pages 2779-2795.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Conditional Random Fields: Probabilistic Models for Segmenting and Labeling Sequence Data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [ |
|
"C N" |
|
], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional Random Fields: Probabilistic Models for Segmenting and Labeling Sequence Data. In Proceedings of ICML, pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A Factoid Question Answering System for Vietnamese", |
|
"authors": [ |
|
{ |
|
"first": "Phuong", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duc-Thien", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Companion Proceedings of the The Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1049--1055", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phuong Le-Hong and Duc-Thien Bui. 2018. A Fac- toid Question Answering System for Vietnamese. In Companion Proceedings of the The Web Conference 2018, page 1049-1055.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Joint Learning of POS and Dependencies for Multilingual Universal Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Zuchao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shexia", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuosheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL 2018 Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zuchao Li, Shexia He, Zhuosheng Zhang, and Hai Zhao. 2018. Joint Learning of POS and Dependen- cies for Multilingual Universal Dependency Parsing. In Proceedings of the CoNLL 2018 Shared Task, pages 65-73.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Decoupled Weight Decay Regularization", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled Weight Decay Regularization. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A Pilot Study of Text-to-SQL Semantic Parsing for Vietnamese", |
|
"authors": [ |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mai", |
|
"middle": [ |
|
"Hoang" |
|
], |
|
"last": "Dao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dat Quoc", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4079--4085", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anh Tuan Nguyen, Mai Hoang Dao, and Dat Quoc Nguyen. 2020. A Pilot Study of Text-to-SQL Se- mantic Parsing for Vietnamese. In Findings of EMNLP 2020, pages 4079-4085.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A neural joint model for Vietnamese word segmentation, POS tagging and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of ALTA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "28--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen. 2019. A neural joint model for Viet- namese word segmentation, POS tagging and depen- dency parsing. In Proceedings of ALTA, pages 28- 34.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "PhoBERT: Pre-trained language models for Vietnamese", |
|
"authors": [ |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1037--1042", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen and Anh Tuan Nguyen. 2020. PhoBERT: Pre-trained language models for Viet- namese. In Findings of EMNLP 2020, pages 1037- 1042.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Ripple Down Rules for Question Answering", |
|
"authors": [], |
|
"year": 2017, |
|
"venue": "Semantic Web", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "511--532", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Dai Quoc Nguyen, and Son Bao Pham. 2017. Ripple Down Rules for Question An- swering. Semantic Web, 8(4):511-532.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "From Treebank Conversion to Automatic Dependency Parsing for Vietnamese", |
|
"authors": [], |
|
"year": 2014, |
|
"venue": "Proceedings of NLDB", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Dai Quoc Nguyen, Son Bao Pham, Phuong-Thai Nguyen, and Minh Le Nguyen. 2014. From Treebank Conversion to Automatic Depen- dency Parsing for Vietnamese. In Proceedings of NLDB, pages 196-207.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An improved neural network model for joint POS tagging and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL 2018 Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen and Karin Verspoor. 2018. An improved neural network model for joint POS tag- ging and dependency parsing. In Proceedings of the CoNLL 2018 Shared Task, pages 81-91.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "VLSP Shared Task: Named Entity Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Huyen", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quyen", |
|
"middle": [], |
|
"last": "Ngo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luong", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vu", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hien", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of Computer Science and Cybernetics", |
|
"volume": "34", |
|
"issue": "4", |
|
"pages": "283--294", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huyen Nguyen, Quyen Ngo, Luong Vu, Vu Tran, and Hien Nguyen. 2019. VLSP Shared Task: Named Entity Recognition. Journal of Computer Science and Cybernetics, 34(4):283-294.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Building a Large Syntactically-Annotated Corpus of Vietnamese", |
|
"authors": [ |
|
{ |
|
"first": "Phuong-Thai", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuan-Luong", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thi-Minh-Huyen", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong-Phuong", |
|
"middle": [], |
|
"last": "Van-Hiep Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of LAW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "182--185", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phuong-Thai Nguyen, Xuan-Luong Vu, Thi-Minh- Huyen Nguyen, Van-Hiep Nguyen, and Hong- Phuong Le. 2009. Building a Large Syntactically- Annotated Corpus of Vietnamese. In Proceedings of LAW, pages 182-185.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NeurIPS 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8024--8035", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, et al. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In Proceedings of NeurIPS 2019, pages 8024-8035.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Universal Dependency parsing from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL 2018 Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Timothy Dozat, Yuhao Zhang, and Christo- pher D. Manning. 2018. Universal Dependency parsing from scratch. In Proceedings of the CoNLL 2018 Shared Task, pages 160-170.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Stanza: A python natural language processing toolkit for many human languages", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Bolton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ACL: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "101--108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. 2020. Stanza: A python natural language processing toolkit for many human languages. In Proceedings of ACL: System Demonstrations, pages 101-108.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Neural Transfer Learning for Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder. 2019. Neural Transfer Learning for Natural Language Processing. Ph.D. thesis, Na- tional University of Ireland, Galway.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Neural Machine Translation of Rare Words with Subword Units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In Proceedings of ACL, pages 1715-1725.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Extracting triples from vietnamese text to create knowledge graph", |
|
"authors": [ |
|
{ |
|
"first": "Huong", |
|
"middle": [], |
|
"last": "Duong To", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phuc", |
|
"middle": [], |
|
"last": "Do", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of KSE", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "219--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huong Duong To and Phuc Do. 2020. Extracting triples from vietnamese text to create knowledge graph. In Proceedings of KSE, pages 219-223.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A reordering model for Vietnamese-English statistical machine translation using dependency information", |
|
"authors": [ |
|
{ |
|
"first": "Huyen Thuong", |
|
"middle": [], |
|
"last": "Viet Hong Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thu", |
|
"middle": [ |
|
"Hoai" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinh", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Van Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of RIVF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "125--130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viet Hong Tran, Huyen Thuong Vu, Thu Hoai Pham, Vinh Van Nguyen, and Minh Le Nguyen. 2016. A reordering model for Vietnamese-English statistical machine translation using dependency information. In Proceedings of RIVF, pages 125-130.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Vietnamese open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Diem", |
|
"middle": [], |
|
"last": "Truong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duc-Thuan", |
|
"middle": [], |
|
"last": "Vo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uyen Trang", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SoICT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "135--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diem Truong, Duc-Thuan Vo, and Uyen Trang Nguyen. 2017. Vietnamese open information extraction. In Proceedings of SoICT, page 135-142.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "VnCoreNLP: A Vietnamese Natural Language Processing Toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dai Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thanh Vu, Dat Quoc Nguyen, Dai Quoc Nguyen, Mark Dras, and Mark Johnson. 2018. VnCoreNLP: A Vietnamese Natural Language Processing Toolkit. In Proceedings of NAACL: Demonstrations, pages 56-60.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Transformers: State-of-the-Art Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of EMNLP 2020: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, et al. 2020. Transform- ers: State-of-the-Art Natural Language Processing. In Proceedings of EMNLP 2020: System Demon- strations, pages 38-45.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Illustration of our PhoNLP model.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "A simple and complete example code for using PhoNLP in Python.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "to predict POS tags for the corresponding input words. Each predicted POS", |
|
"content": "<table><tr><td/><td/><td/><td>sub</td><td/><td>vmod</td></tr><tr><td/><td/><td colspan=\"2\">BIAFFINE</td><td/><td>BIAFFINE</td></tr><tr><td>Dependency</td><td/><td/><td/><td/></tr><tr><td>parsing</td><td>FFNN</td><td/><td/><td>FFNN</td><td>FFNN</td></tr><tr><td>CRF</td><td>O</td><td>O</td><td/><td/><td>B-LOC</td></tr><tr><td>NER</td><td>FFNN</td><td colspan=\"2\">FFNN</td><td/><td>FFNN</td></tr><tr><td/><td>PRON</td><td/><td>VERB</td><td/><td>NOUN</td></tr><tr><td>POS Tagging</td><td>softmax</td><td/><td>softmax</td><td/><td>softmax</td></tr><tr><td/><td>FFNN</td><td/><td>FFNN</td><td/><td>FFNN</td></tr><tr><td>BERT-based encoder</td><td/><td colspan=\"3\">Pre-trained BERT-based LM</td></tr><tr><td/><td>\u0110\u00e2y This</td><td/><td>l\u00e0 is</td><td/><td>H\u00e0_N\u1ed9i Ha_Noi</td></tr><tr><td>ID</td><td>Form POS</td><td/><td>NER</td><td colspan=\"2\">Head DepRel</td></tr><tr><td>1</td><td colspan=\"2\">\u0110\u00e2y This PRON</td><td>O</td><td>2</td><td>sub</td></tr><tr><td>2</td><td colspan=\"2\">l\u00e0 is VERB</td><td>O</td><td>0</td><td>root</td></tr><tr><td>3</td><td colspan=\"4\">H\u00e0_N\u1ed9i Ha_Noi NOUN B-LOC 2</td><td>vmod</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>-</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "\u2020 93.69 78.77 \u2020 85.22 \u2020", |
|
"content": "<table><tr><td/><td>Model</td><td>POS NER LAS</td><td>UAS</td></tr><tr><td>Leak.</td><td colspan=\"2\">Single-task 96.7 PhoNLP 96.76 94.41 79.11</td><td>85.47</td></tr><tr><td>Re-spl</td><td colspan=\"2\">Single-task 93.68 93.69 77.89 PhoNLP 93.88 94.51 78.17</td><td>84.78 84.95</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |