|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:38:31.433932Z" |
|
}, |
|
"title": "Validating Label Consistency in NER Data Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mengxia", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tianwen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "mjiang2@nd.edu" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "\u2021twjiang@ir.hit.edu.cn" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Data annotation plays a crucial role in ensuring your named entity recognition (NER) projects are trained with the correct information to learn from. Producing the most accurate labels is a challenge due to the complexity involved with annotation. Label inconsistency between multiple subsets of data annotation (e.g., training set and test set, or multiple training subsets) is an indicator of label mistakes. In this work, we present an empirical method to explore the relationship between label (in-)consistency and NER model performance. It can be used to validate the label consistency (or catch the inconsistency) in multiple sets of NER data annotation. In experiments, our method identified the label inconsistency of test data in SCIERC and CoNLL03 datasets (with 26.7% and 5.4% label mistakes). It validated the consistency in the corrected version of both datasets.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Data annotation plays a crucial role in ensuring your named entity recognition (NER) projects are trained with the correct information to learn from. Producing the most accurate labels is a challenge due to the complexity involved with annotation. Label inconsistency between multiple subsets of data annotation (e.g., training set and test set, or multiple training subsets) is an indicator of label mistakes. In this work, we present an empirical method to explore the relationship between label (in-)consistency and NER model performance. It can be used to validate the label consistency (or catch the inconsistency) in multiple sets of NER data annotation. In experiments, our method identified the label inconsistency of test data in SCIERC and CoNLL03 datasets (with 26.7% and 5.4% label mistakes). It validated the consistency in the corrected version of both datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Named entity recognition (NER) is one of the foundations of many downstream tasks such as relation extraction, event detection, and knowledge graph construction. NER models require vast amounts of labeled data to learn and identify patterns that humans cannot continuously. It is really about getting accurate data to train the models. When end-to-end neural models achieve excellent performance on NER in various domains (Lample et al., 2016; Liu et al., 2018; Luan et al., 2018; Zeng et al., , 2021 , building useful and challenging NER benchmarks, such as CoNLL03, WNUT16, and SCIERC, contributes significantly to the research community.", |
|
"cite_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 443, |
|
"text": "(Lample et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 461, |
|
"text": "Liu et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 480, |
|
"text": "Luan et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 500, |
|
"text": "Zeng et al., , 2021", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Data annotation plays a crucial role in building benchmarks and ensuring NLP models are trained with the correct information to learn from (Luan et al., 2018; . Producing the necessary annotation from any asset at scale is a challenge, mainly because of the complexity involved with annotation. Getting the most accurate labels demands time and expertise.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 158, |
|
"text": "(Luan et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Label mistakes can hardly be avoided, especially when the labeling process splits the data into multiple sets for distributed annotation. The mistakes cause label inconsistency between subsets of annotated data (e.g., training set and test set or multiple training subsets). For example, in the CoNLL03 dataset (Sang and De Meulder, 2003) , a standard NER benchmark that has been cited over 2,300 times, label mistakes were found in 5.38% of the test set (Wang et al., 2019) . Note that the stateof-the-art results on CoNLL03 have achieved an F1 score of \u223c .93. So even if the label mistakes make up a tiny part, they cannot be negligible when researchers are trying to improve the results further. In the work of Wang et al., five annotators were recruited to correct the label mistakes. Compared to the original test set results, the corrected test set results are more accurate and stable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 338, |
|
"text": "(Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 474, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, two critical issues were not resolved in this process: i) How to identify label inconsistency between the subsets of annotated data? ii) How to validate that the label consistency was recovered by the correction?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another example is SCIERC (Luan et al., 2018 ) (cited \u223c50 times) which is a multi-task (including NER) benchmark in AI domain. It has 1,861 sentences for training, 455 for dev, and 551 for test. When we looked at the false predictions given by SCIIE which was a multi-task model released along with the SCIERC dataset, we found that as many as 147 (26.7% of the test set) sentences were not properly annotated. (We also recruited five annotators and counted a mistake when all the annotators report it.) Three examples are given in Table 1 : two of them have wrong entity types; the third has a wrong span boundary. As shown in the experiments section, after the correction, the NER performance becomes more accurate and stable. Table 1 : Three examples to compare original and corrected annotation in the test set of the SCIERC dataset. If the annotation on the test set consistently followed the \"codebook\" that was used to annotate training data, the entities in the first two examples would be labelled as \"Task\" (not \"Method\") for sure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 44, |
|
"text": "(Luan et al., 2018", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 532, |
|
"end": 539, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 736, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Corrected Examples Starting from a DP-based solution to the [traveling salesman problem]Method, we present a novel technique ...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Starting from a DP-based solution to the [traveling salesman problem]Task, we present a novel technique ... FERRET utilizes a novel approach to [Q/A]Method known as predictive questioning which attempts to identify ... Test:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "! ! ! ! sampling Train:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Test: \"PureTrain\" \"TestTrain\" Figure 1 : Identifying label inconsistency of test set with training set: We sample three exclusive subsets (of size x) from the training set (orange, green, and blue). We use one subset as the new test set (orange). We apply the SCIIE NER model on the new test set. We build three new training sets: i) \"TrainTest\" (blue-red), ii) \"PureTrain\" (green-blue), iii) \"TestTrain\" (red-blue). Results on SCIERC show that the test set (red) is less predictive of training samples (orange) than the training set itself (blue or green). This was not observed on two other datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 38, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Original Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Besides the significant correction on the SCI-ERC dataset, our contributions in this work are as follows: i) an empirical, visual method to identify the label inconsistency between subsets of annotated data (see Figure 1 ), ii) a method to validate the label consistency of corrected data annotation (see Figure 2 ). Experiments show that they are effective on the CoNLL03 and SCIERC datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 220, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 313, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Original Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Suppose the labeling processes on two parts of annotated data were consistent. They are likely to be equivalently predictive of each other. In other words, if we train a model with a set of samples from either part A or part B to predict a different set from part A, the performance should be similar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Take SCIERC as an example. We were wondering whether the labels in the test set were consistent with those in the training set. Our method to identify the inconsistency is presented in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 193, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We sample three exclusive subsets (of size x) from the training set. We set x = 550 according to the size of the original test set. We use one of the subsets as the new test set. Then we train the SCIIE NER model (Luan et al., 2018) to perform on the new test set. We build three new training sets to feed into the model:", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 232, |
|
"text": "(Luan et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u2022 \"TrainTest\": first fed with one training subset and then the original test set; \u2022 \"PureTrain\": fed with two training subsets;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u2022 \"TestTrain\": first fed with the original test set and then one of the training subsets. Results show that \"TestTrain\" performed the worst at the early stage because the quality of the We corrected z of y + z sentences in the test set. We sampled three exclusive subsets of size x, y, and w from the training set. We use the first subset (of size x) as the new test set. We build four new training sets as shown in the figure and feed them into the SCIIE model (at the top of the figure) . Results show that the label mistakes (red parts of the curves on the left) do hurt the performance no matter fed at the beginning or later; and the corrected test set performs as well as the training set (on the right).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 488, |
|
"text": "figure)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "original test set is not reliable. In \"TrainTest\" the performance no longer improved when the model started being fed with the original test set. \"Pure-Train\" performed the best. All the observations conclude that the original test set is less predictive of training samples than the training set itself. It may be due to the issue of label inconsistency. Moreover, we do not have such observations on two other datasets, WikiGold and WNUT16.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to identify label inconsistency", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "After we corrected the label mistakes, how could we empirically validate the recovery of label consistency? Again, we use a subset of training data as the new test set. We evaluate the predictability of the original wrong test subset, the corrected test subset, and the rest of the training set. We expect to see that the wrong test subset delivers weaker performance and the other two sets make comparable good predictions. Figure 2 illustrates this idea. Take SCIERC as an example. Suppose we corrected z of y + z sentences in the test set. The original wrong test subset (\"Mistake\") and the corrected test subset (\"Correct\") are both of size z.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 425, |
|
"end": 433, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A method to validate label consistency after correction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Here z = 147 and the original good test subset y = 404 (\"Test\"). We sampled three exclusive subsets of size x, y, and w = 804 from the training set (\"Train\"). We use the first subset (of size x) as the new test set. We build four new training sets and feed into the SCIIE model. Each new training set has y + w + z = 1, 355 sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to validate label consistency after correction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 \"TestTrainMistake\"/\"TestTrainCorrect\": the original good test subset, the third sampled training subset, and the original wrong test subset (or the corrected test subset); \u2022 \"PureTrainMistake\"/\"PureTrainCorrect\": the second and third sampled training subsets and the original wrong test subset (or the corrected test subset); \u2022 \"MistakeTestTrain\"/\"CorrectTestTrain\": the original wrong test subset (or the corrected test subset), the original good test subset, and the third sampled training subset; \u2022 \"MistakePureTrain\"/\"CorrectPureTrain\": the original wrong test subset (or the corrected test subset) and the second and third sampled training subsets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to validate label consistency after correction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Results show that the label mistakes (i.e., original wrong test subset) hurt the model performance whenever being fed at the beginning or later. The corrected test subset delivers comparable performance with the original good test subset and the training set. This demonstrates the label consistency of the corrected test set with the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A method to validate label consistency after correction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The visual results of the proposed methods have been presented in Section 2. Here we deploy five state-of-the-art NER models to investigate their performance on the corrected SCIERC dataset. The NER models are BiLSTM-CRF (Lample et al., 2016) , LM-BiLSTM-CRF (Liu et al., 2018) , singletask and multi-task SCIIE (Luan et al., 2018) , and multi-task DyGIE (Luan et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 242, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 277, |
|
"text": "(Liu et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 331, |
|
"text": "(Luan et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 374, |
|
"text": "(Luan et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on SCIERC", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 2 , all NER models deliver better performance on the corrected SCIERC than the original dataset. So the training set is more consistent with the fixed test set than the original wrong test set. In future work, we will explore more baselines in the leaderboard.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on SCIERC", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Based on the correction contributed by (Wang et al., 2019) , we use the proposed method to justify label inconsistency though the label mistakes take \"only\" 5.38%. It also validates the label consistency after recovery. Figure 3(a) shows that starting with the wrong labels in the original test set makes the performance worse than starting with the training set or the good test subset. After label correction, this issue is fixed in Figure 3(b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 58, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 231, |
|
"text": "Figure 3(a)", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 446, |
|
"text": "Figure 3(b)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on CoNLL03", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "NER is typically cast as a sequence labeling problem and solved by models integrate LSTMs, CRF, and language models (Lample et al., 2016; Liu et al., 2018; Zeng et al., 2019 . Another idea is to generate span candidates and predict their type. Span-based models have been proposed with multitask learning strategies (Luan et al., 2018 (Luan et al., , 2019 . The multiple tasks include concept recognition, relation extraction, and co-reference resolution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 137, |
|
"text": "(Lample et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 155, |
|
"text": "Liu et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 173, |
|
"text": "Zeng et al., 2019", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 334, |
|
"text": "(Luan et al., 2018", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 355, |
|
"text": "(Luan et al., , 2019", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Researchers notice label mistakes in many NLP tasks (Manning, 2011; Wang et al., 2019; Eskin, 2000; Kv\u0207to\u0148 and Oliva, 2002) . For instance, it is reported that the bottleneck of the POS tagging task is the consistency of the annotation result (Manning, 2011). People tried to detect label mistakes automatically and minimize the influence of noise in training. The mistake re-weighting mechanism is effective in the NER task (Wang et al., 2019) . We focus on visually evaluating the label consistency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 67, |
|
"text": "(Manning, 2011;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 68, |
|
"end": 86, |
|
"text": "Wang et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 87, |
|
"end": 99, |
|
"text": "Eskin, 2000;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 100, |
|
"end": 123, |
|
"text": "Kv\u0207to\u0148 and Oliva, 2002)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 444, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We presented an empirical method to explore the relationship between label consistency and NER model performance. It identified the label inconsistency of test data in SCIERC and CoNLL03 datasets (with 26.7% and 5.4% label mistakes). It validated the label consistency in multiple sets of NER data annotation on two benchmarks, CoNLL03 and SCIERC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by National Science Foundation IIS-1849816 and CCF-1901059. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Detecting errors within a corpus using anomaly detection", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eleazar Eskin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 1st North American chapter of the Association for Computational Linguistics conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eleazar Eskin. 2000. Detecting errors within a corpus using anomaly detection. In Proceedings of the 1st North American chapter of the Association for Com- putational Linguistics conference, pages 148-153. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Biomedical knowledge graphs construction from conditional statements", |
|
"authors": [ |
|
{ |
|
"first": "Tianwen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Nitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Chawla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE/ACM transactions on computational biology and bioinformatics", |
|
"volume": "18", |
|
"issue": "3", |
|
"pages": "823--835", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianwen Jiang, Qingkai Zeng, Tong Zhao, Bing Qin, Ting Liu, Nitesh V Chawla, and Meng Jiang. 2020. Biomedical knowledge graphs construction from conditional statements. IEEE/ACM transac- tions on computational biology and bioinformatics, 18(3):823-835.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "(semi-)automatic detection of errors in PoS-tagged corpora", |
|
"authors": [ |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kv\u0207to\u0148", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karel", |
|
"middle": [], |
|
"last": "Oliva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "COL-ING 2002: The 19th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pavel Kv\u0207to\u0148 and Karel Oliva. 2002. (semi-)automatic detection of errors in PoS-tagged corpora. In COL- ING 2002: The 19th International Conference on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.01360" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. arXiv preprint arXiv:1603.01360.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Empower sequence labeling with task-aware neural language model", |
|
"authors": [ |
|
{ |
|
"first": "Liyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [ |
|
"Fangzheng" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Gui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liyuan Liu, Jingbo Shang, Xiang Ren, Frank Fangzheng Xu, Huan Gui, Jian Peng, and Jiawei Han. 2018. Empower sequence la- beling with task-aware neural language model. In Thirty-Second AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Multi-task identification of entities, relations, and coreference for scientific knowledge graph construction", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Luheng He, Mari Ostendorf, and Hannaneh Hajishirzi. 2018. Multi-task identification of enti- ties, relations, and coreference for scientific knowl- edge graph construction. Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A general framework for information extraction using dynamic span graphs", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dave", |
|
"middle": [], |
|
"last": "Wadden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amy", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Dave Wadden, Luheng He, Amy Shah, Mari Ostendorf, and Hannaneh Hajishirzi. 2019. A gen- eral framework for information extraction using dy- namic span graphs. Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Part-of-speech tagging from 97% to 100%: is it time for some linguistics?", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "International conference on intelligent text processing and computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D Manning. 2011. Part-of-speech tagging from 97% to 100%: is it time for some linguistics? In International conference on intelligent text pro- cessing and computational linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition. arXiv preprint cs/0306050.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Crossweigh: Training named entity tagger from imperfect annotations", |
|
"authors": [ |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lihao", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihan Wang, Jingbo Shang, Liyuan Liu, Lihao Lu, Ji- acheng Liu, and Jiawei Han. 2019. Crossweigh: Training named entity tagger from imperfect anno- tations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP).", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Identifying referential intention with heterogeneous contexts", |
|
"authors": [ |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengxia", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The Web Conference 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhao Yu, Mengxia Yu, Tong Zhao, and Meng Jiang. 2020. Identifying referential intention with hetero- geneous contexts. In Proceedings of The Web Con- ference 2020.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Enhancing taxonomy completion with concept generation via fusing relational representations", |
|
"authors": [ |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Cleland-Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingkai Zeng, Jinfeng Lin, Wenhao Yu, Jane Cleland- Huang, and Meng Jiang. 2021. Enhancing taxon- omy completion with concept generation via fus- ing relational representations. In ACM SIGKDD In- ternational Conference on Knowledge Discovery & Data Mining (KDD).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Faceted hierarchy: A new graph type to organize scientific concepts and a construction method", |
|
"authors": [ |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengxia", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinjun", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiyu", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingkai Zeng, Mengxia Yu, Wenhao Yu, Jinjun Xiong, Yiyu Shi, and Meng Jiang. 2019. Faceted hierarchy: A new graph type to organize scientific concepts and a construction method. In Proceedings of the Thir- teenth Workshop on Graph-Based Methods for Nat- ural Language Processing (TextGraphs-13).", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Tritrain: Automatic pre-fine tuning between pretraining and fine-tuning for sciner", |
|
"authors": [ |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengxia", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianwen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Weninger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingkai Zeng, Wenhao Yu, Mengxia Yu, Tianwen Jiang, Tim Weninger, and Meng Jiang. 2020. Tri- train: Automatic pre-fine tuning between pre- training and fine-tuning for sciner. In Proceedings of the 2020 Conference on Empirical Methods in Natu- ral Language Processing: Findings.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "utilizes a novel approach to [Q/A]Task known as predictive questioning which attempts to identify ... The goal of this work is the enrichment of [human-machine interactions]Task in a natural language environment. The goal of this work is the [enrichment of human-machine interactions]Task in a natural language environment.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Validating label consistency in corrected test set:", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "Identifying label inconsistency and validating the consistency in the original & corrected CoNLL03.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"text": "Five NER models perform consistently better on the corrected SCIERC than on the original dataset. 47.95 52.64 56.13 48.07 51.79 LM-BiLSTM-CRF 62.78 58.20 60.40 59.15 57.15 58.13 SCIIE-single 71.20 62.88 66.79 65.77 60.90 63.24 SCIIE-multi 72.66 63.22 67.61 67.66 61.72 64.56 DyGIE-multi 69.64 67.02 68.31 65.09 65.28 65.18", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Method</td><td>Corrected SCIERC Original SCIERC P R F1 P R F1</td></tr><tr><td>BiLSTM-CRF</td><td>58.35</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |