|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:24:22.823335Z" |
|
}, |
|
"title": "FITAnnotator: A Flexible and Intelligent Text Annotation System", |
|
"authors": [ |
|
{ |
|
"first": "Yanzeng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "liyanzeng@iie.ac.cn" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "yubowen@iie.ac.cn" |
|
}, |
|
{ |
|
"first": "Quangang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "liquangang@iie.ac.cn" |
|
}, |
|
{ |
|
"first": "Tingwen", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "liutingwen@iie.ac.cn" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we introduce FITAnnotator, a generic web-based tool for efficient text annotation. Benefiting from the fully modular architecture design, FITAnnotator provides a systematic solution for the annotation of a variety of natural language processing tasks, including classification, sequence tagging and semantic role annotation, regardless of the language. Three kinds of interfaces are developed to annotate instances, evaluate annotation quality and manage the annotation task for annotators, reviewers and managers, respectively. FITAnnotator also gives intelligent annotations by introducing task-specific assistant to support and guide the annotators based on active learning and incremental learning strategies. This assistant is able to effectively update from the annotator feedbacks and easily handle the incremental labeling scenarios. 1", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we introduce FITAnnotator, a generic web-based tool for efficient text annotation. Benefiting from the fully modular architecture design, FITAnnotator provides a systematic solution for the annotation of a variety of natural language processing tasks, including classification, sequence tagging and semantic role annotation, regardless of the language. Three kinds of interfaces are developed to annotate instances, evaluate annotation quality and manage the annotation task for annotators, reviewers and managers, respectively. FITAnnotator also gives intelligent annotations by introducing task-specific assistant to support and guide the annotators based on active learning and incremental learning strategies. This assistant is able to effectively update from the annotator feedbacks and easily handle the incremental labeling scenarios. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Manually-labeled gold standard annotations are the first prerequisite for the training and evaluation of modern Natural Language Processing (NLP) methods. With the development of deep learning, neural networks have achieved state-of-the-art performance in a variety of NLP fields. These impressive achievements rely on large-scale training data for supervised training. However, building annotation requires a significant amount of human effort and incurs high costs, and can place heavy demands on human annotators for maintaining annotation quality and consistency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To improve annotation productivity and reduce the financial cost of annotation, many text annotation softwares are developed by constraining user actions and providing an effective interface. In the early days, platforms for linguistic annotations such as O' Donnell (2008) , BART (Stenetorp et al., 2012) , WebAnno-13 (Yimam et al., 2013) mainly focused on providing a visual interface for user labeling process, making annotation accessible to non-expert users. Recently, integrating active learning into annotation systems for providing suggestions to user has became mainstream (TextPro (Pianta et al., 2008) , WebAnno-14 (Yimam et al., 2014) , Active DOP (van Cranenburgh, 2018) , IN-CEpTION (Klie et al., 2018) , etc), but most of these works focus on English text and rarely consider the multi-lingual setting, which is necessary due to the growing demand for annotation in other languages. In addition to the interface and efficiency, incremental annotation is also necessary in realworld scenarios since the pre-defined annotation standards and rules cannot handle rapidly emerging novel classes in the real world, while being less addressed in existing annotation tools.", |
|
"cite_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 273, |
|
"text": "Donnell (2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 305, |
|
"text": "(Stenetorp et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 308, |
|
"end": 339, |
|
"text": "WebAnno-13 (Yimam et al., 2013)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 612, |
|
"text": "(Pianta et al., 2008)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 646, |
|
"text": "WebAnno-14 (Yimam et al., 2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 683, |
|
"text": "(van Cranenburgh, 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 716, |
|
"text": "IN-CEpTION (Klie et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the challenges above, we propose FITAnnotator, a generic web-based tool for text annotation, which fulfills the following requirements:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Extremely flexible and configurable: our system architecture is fully modular, even the user interface is a replaceable module. Which means it is model-agnostic and supports annotation on a variety of linguistic tasks, including tagging, classification, parsing, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Active learning: learning from small amounts of data, and selecting by itself what data it would like the user to label from an unlabeled dataset. Annotators label these selected instances and add them to the training set. A new model is automatically trained on the updated training set. This process repeats and results in dramatic reductions in the amount of labeling required to train the NLP model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Expansible data provider: the previous annotation tools are compatible with the static corpus for annotation, which is not convenient for annotating from sketch and expansion. FI-TAnnotator sets up an independent data loader and data provider, which can continuously import data to the corpus in bulk. The flexible data provider also brings new problems, such as dynamic labeling schema, which should be solved by incremental learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Incremental learning: creating a prototype for each category and enabling the prototypes of the novel categories far from the prototypes of the original categories while maintaining features to cluster near the corresponding category prototypes, which makes the tool suitable for annotating with new classes added incrementally.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Collaboration & crowdsourcing: the system is designed for the multi-user scenario, where multiple annotators can work collaboratively at the same time. When multiple users cooperate in annotation, the dismountable crowdsourcing algorithm interface can be used to allocate overlapping data in apiece task packages, for evaluating the annotation quality of each user. Also, the system provides a manual review interface, which can perform sampling inspection and evaluation on various users' annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Figure 1 reflects our design philosophy and comprehension of the interaction between the three major elements in our annotation system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, the NLP community has developed several annotation tools (Neves and \u0160eva, 2019) . Yedda (Yang et al., 2018b) provides an easy-to-use and lightweight GUI software for collaborative text annotation, and provides certain administrator analysis for evaluating multi-annotators. FLAT 2 introduces generalised paradigm and well-defined annotation format defined in folia (van Gompel, 2012), and provides web-based annotation interface. Doccano (Nakayama et al., 2018) is an open-source, web-based text annotation tool that provides collaboration, intelligent recommendation functions, and includes a user-friendly annotation interface. INCEpTION ) is a comprehensive text annotation system, which is also web-based and open-source, integrates active learning algorithms and provides various interfaces for different annotation tasks, and it is developing for more tasks (de Castilho et al., 2018), more convenient (Boullosa et al., 2018) and low-resource scenarios (Klie et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 96, |
|
"text": "(Neves and \u0160eva, 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 105, |
|
"end": 125, |
|
"text": "(Yang et al., 2018b)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 478, |
|
"text": "(Nakayama et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 976, |
|
"end": 995, |
|
"text": "(Klie et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition, commercial annotation tools such as prodigy 3 , tagtog 4 , LightTag 5 also provide powerful active learning support, team-collaboration functions, efficient user interfaces, and provide more related commercial solutions, which have gained appreciable business achievement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "All of these intelligent text annotation tools have several common features: supporting active learning and a rich variety of tasks. And commercial annotation tools pay more attention to user experience and collaboration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The architecture of FITAnnotator is influenced by the ideas of functional programming and, in particular, by the desire to combine functional with object-oriented programming. The adherence to the programming principles such as immutability and modularity, FITAnnotator is developed by hybrid programming language Python. An overview of our system is shown in Figure 2 , which has four main modules:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 368, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. core module controls all data flow and provides the gateway for other modules. Tasks and projects are stored in the database of this module, and there are some fields to specify the URI of each related module. The system is based on these URIs to transfer and process data between modules. This module also Figure 2 : The overall architecture of the system provides an administrator control panel for managing the system and database.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 318, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2. data-loader module contains fundamental tokenizer and data-loader of specific machine learning model. By deploy multifarious data-loader module with different tokenizers, we can adapt this system to different languages and tasks. In addition, we also provide data expansion function in this module. Expanded data would be cleaned in this module and passed to core module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "3. intelligent annotation module acts as the assistant which provides a pre-built machine learning model according to the type of tasks. This model could be simple as FastText (Joulin et al., 2017) or complex as BERT (Devlin et al., 2019) . With such a model, we can obtain automatic labeling results for unlabeled data, and calculate their ranking scores according to the active learning strategy. By reordering the unlabeled data before pushing them to annotators, the annotation speed could be accelerated. Besides, incremental learning is also implemented in this module. We describe the details of this module in Section 4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 197, |
|
"text": "(Joulin et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 238, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "web interfaces: annotator, reviewer and administrator. The annotator interface presents the ranked unlabeled instances based on the recommendation score provided by the active learning module. Upon annotating a new sentence, the annotator is presented with the most probable labels recommended by the active learning model (see Figure 4) . When the anno-tators make a decision for confirming model recommendation or altering the labels, the operations will be fed back to the backend system and update the parameters of the active learning model. In the reviewer interface, the users monitor the progress of the annotation and see statistics such as the number of annotated instances, and the remaining unlabeled data. The reviewers can also review these already annotated instances and introduce corrections if necessary. In the administrator interface (shown in Figure 3 ), the project manager defines the annotation standards and sets all parameters for the annotation process, including the configures of active learning models, the management of annotators and reviewers, the assignment of tasks and so on.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 337, |
|
"text": "Figure 4)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 864, |
|
"end": 872, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "interface module contains three separate", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The system is written with a modular design intended to be easily modifiable. Modules and interfaces (except core module and administrator interface) can be replaced easily for specific requirements. The flexibility it easy to adapt to multiple tasks and languages. FITAnnotator has three built-in annotation templates now: text classification, sequence tagging and semantic structure annotation, which cover most common NLP tasks, including sentence classification, sentence pair matching, named entity recognition and semantic role annotation. Users can also migrate to other tasks through simple modification of the framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "interface module contains three separate", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Creating high-quality annotated corpora is a laborious process and requires experts who are highly familiar with the annotation schemes and stan- dards. To accelerate the annotation process, we introduce the intelligent assistant that incorporates task-specific neural networks which actively assist and guide annotators. The cores of intelligent annotation are two adaptive learning mechanisms: active learning and incremental learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intelligent Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A framework where a model learns from small amounts of data, and optimizes the selection of the most informative or diverse sample to annotate in order to maximize training utility value, is referred to as active learning (Gal et al., 2017; Schr\u00f6der and Niekler, 2020) . In particular, we employ a fused active learning method as a default strategy for evaluating, re-ranking and re-sampling data, which considers uncertainty and diversity at the same time (Zhou et al., 2017; Lutnick et al., 2019) . Using such a strategy, the most difficult and diverse instances will be annotated first, which are more valuable for model learning with respect to the rest of the corpus. After the instances have been selected by active learning, the system displays them in the annotator interface with the highlighted suggestion labels. The annotator can then accept or modify the suggestion. The choices are stored and passed to the active learning module as new training data to update the parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 240, |
|
"text": "(Gal et al., 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 268, |
|
"text": "Schr\u00f6der and Niekler, 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 476, |
|
"text": "(Zhou et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 498, |
|
"text": "Lutnick et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For analyzing the effectiveness of active learning strategies in FITAnnotator, we conduct a simple but representative comparative experiment based on the IMDb movie reviews sentiment classification task (Maas et al., 2011) . In this experiment, we respectively explore the effectiveness of the uncertainty sampling and the diversity sampling in active learning (Fu et al., 2013) , and employ a random sampling strategy as the baseline method. Two kinds of popular text classification models (Fast-Text (Joulin et al., 2017) and BERT (Devlin et al., 2019) ) are respectively implemented as the backbone of active learning. We use accuracy+ as the indicator to measure the performance (Lu et al., 2019) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 222, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 378, |
|
"text": "(Fu et al., 2013)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 523, |
|
"text": "(Joulin et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 554, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 700, |
|
"text": "(Lu et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "accuracy+ = T P H + T N H + T P M + T N M N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where N is the size of dataset, H and M represent the human-annotated labels and the modelpredicted labels respectively. The evaluation is continuously carried out with the annotation process of the IMDb training set. Every 100 new annotation samples are generated, the performance of the backbone is evaluated on the standard test set. The results are shown in Figure 6 . Apparently, the BERT-based active learning method outperforms the FastText-based method. In terms of training convergence speed, the sampling strategy based on the uncertainty criterion is similar to the diversity criterion, but both of them are obviously faster than the random sampling baseline. After plenty of samples are labeled, the accuracy of those sampling methods tends to be approximate. This observation demonstrates that our system is able to accelerate the training process of the models by introducing active learning algorithms, so as to provide users with label recommendations more quickly and accurately.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 370, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Active Learning", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Existing annotation tools focus on labeling instances based on a fixed annotation scheme. However, the pre-defined standards may not cover all the cases met in the annotation process, especially for the classification task with constantly updated source data. Take the case of aspect category classification (ACC). In E-commerce platforms, online reviews are valuable resources for providers to get feedback for their services. ACC aims to identify all the aspects discussed in a given review. Yet in the real world, new reviews and products are rapidly emerging, and it is impossible to annotate reviews with a pre-defined set of aspect categories once to cover all aspects (Toh and Su, 2015; Wu et al., 2018) . Considering the enormous cost of re-labeling the entire corpus, in an ideal annotation system, the new classes should be integrated into the existing labeled instances, sharing the previously learned parameters of active learning. To this end, we introduce an incremental learning mechanism into our annotation system. As Figure 5 shown, by creating a prototype for each category, the classification problem is converted into a problem of matching the samples to the prototypes (Yang et al., 2018a). During the training process, the loss function is designed to minimize the distance between the sample and the prototype (m in Figure 5 is the minimal margin between prototypes) and maximize the distance between prototypes. Thus the space of representation is sparse and clear outside of prototype clusters, a new prototype of the category can be added easily (Rebuffi et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 675, |
|
"end": 693, |
|
"text": "(Toh and Su, 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 710, |
|
"text": "Wu et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1191, |
|
"end": 1204, |
|
"text": "(Yang et al.,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1573, |
|
"end": 1595, |
|
"text": "(Rebuffi et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1035, |
|
"end": 1043, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1340, |
|
"end": 1348, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Incremental Learning", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To verify the effectiveness of FITAnnotator combined with incremental learning, we conduct experiments on the AG News dataset 6 , which is collected from the news corpus with four classes. In order to simulate the real-world scenario, we first use samples belonging to three of the four categories for annotation. After labeling 1000 samples, we import the data of the fourth category, and use the class-incremental function provided by FITAnnotator to change the annotation schema. For evaluation, we construct a word-level LSTM + CNN representation model with glove word embedding (Pennington et al., 2014) as the encoder, and compare our prototype-based method with the classic softmax-based classifier. The micro-F1 score is chosen as the evaluation metric. Figure 7 illustrates the experimental results. In the ordinary text classification task, the performance of the softmax-based classifier and the prototype-based classifier is relatively approximate. After introducing the fourth class (new class), the performance of the softmax-based classifier occurs a catastrophic recession. On the contrary, the prototype-based method shows impressive results in the class-incremental scenario, and the negative effect of the newly introduced class is negligible.", |
|
"cite_spans": [ |
|
{ |
|
"start": 583, |
|
"end": 608, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 762, |
|
"end": 770, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Incremental Learning", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "http://github.com/proycon/flat 3 https://prodi.gy/ 4 http://www.tagtog.net 5 https://www.lighttag.io/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://groups.di.unipi.it/~gulli/AG_ corpus_of_news_articles.html5 ConclusionIn this paper, we present FITAnnotator, a webbased system for interactive NLP annotation. In order to reduce the workload of annotators, we integrate an active learning strategy in our system recommendation part, and introduce an incremental learning strategy to facilitate the rapid annotation of incessantly emerging novel categories. It supports a range of annotation types, and analyzing, assessing, and managing the annotations. In future work, FITAnnotator will integrate more advanced incremental learning and active learning algorithms, and be enhanced to develop more task templates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by the Strategic Priority Research Program of Chinese Academy of Sciences, Grant No. XDC02040400.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Integrating knowledge-supported search into the inception annotation platform", |
|
"authors": [ |
|
{ |
|
"first": "Beto", |
|
"middle": [], |
|
"last": "Boullosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Christoph", |
|
"middle": [], |
|
"last": "Klie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "127--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Beto Boullosa, Richard Eckart de Castilho, Naveen Ku- mar, Jan-Christoph Klie, and Iryna Gurevych. 2018. Integrating knowledge-supported search into the in- ception annotation platform. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, volume Demo Papers, pages 127-132.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Linking text and knowledge using the inception annotation platform", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Christoph", |
|
"middle": [], |
|
"last": "Klie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beto", |
|
"middle": [], |
|
"last": "Boullosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 14th eScience IEEE International Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "327--328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Eckart de Castilho, Jan-Christoph Klie, Naveen Kumar, Beto Boullosa, and Iryna Gurevych. 2018. Linking text and knowledge using the inception anno- tation platform. In Proceedings of the 14th eScience IEEE International Conference, pages 327-328.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT 2019: Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In NAACL-HLT 2019: Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A survey on instance selection for active learning", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingquan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Knowledge and information systems", |
|
"volume": "35", |
|
"issue": "2", |
|
"pages": "249--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Fu, Xingquan Zhu, and Bin Li. 2013. A survey on instance selection for active learning. Knowledge and information systems, 35(2):249-283.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Deep bayesian active learning with image data", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Riashat", |
|
"middle": [], |
|
"last": "Islam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1183--1192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal, Riashat Islam, and Zoubin Ghahramani. 2017. Deep bayesian active learning with image data. In In- ternational Conference on Machine Learning, pages 1183-1192.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bag of tricks for efficient text classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "427--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, volume 2, pages 427-431.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The inception platform: Machine-assisted and knowledge-oriented interactive annotation", |
|
"authors": [ |
|
{ |
|
"first": "Jan-Christoph", |
|
"middle": [], |
|
"last": "Klie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bugert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beto", |
|
"middle": [], |
|
"last": "Boullosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan-Christoph Klie, Michael Bugert, Beto Boullosa, Richard Eckart de Castilho, and Iryna Gurevych. 2018. The inception platform: Machine-assisted and knowledge-oriented interactive annotation. In Pro- ceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pages 5-9. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "From Zero to Hero: Human-In-The-Loop Entity Linking in Low Resource Domains", |
|
"authors": [ |
|
{ |
|
"first": "Jan-Christoph", |
|
"middle": [], |
|
"last": "Klie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6982--6993", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.624" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan-Christoph Klie, Richard Eckart de Castilho, and Iryna Gurevych. 2020. From Zero to Hero: Human- In-The-Loop Entity Linking in Low Resource Do- mains. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6982-6993, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Investigating the effectiveness of representations based on word-embeddings in active learning for labelling text datasets. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Jinghui", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maeve", |
|
"middle": [], |
|
"last": "Henchion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [ |
|
"Mac" |
|
], |
|
"last": "Namee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinghui Lu, Maeve Henchion, and Brian Mac Namee. 2019. Investigating the effectiveness of representa- tions based on word-embeddings in active learning for labelling text datasets. arXiv, pages arXiv-1910.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "An integrated iterative annotation technique for easing neural network training in medical image analysis", |
|
"authors": [ |
|
{ |
|
"first": "Brendon", |
|
"middle": [], |
|
"last": "Lutnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [], |
|
"last": "Ginley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darshana", |
|
"middle": [], |
|
"last": "Govind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Mcgarry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Laviolette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rabi", |
|
"middle": [], |
|
"last": "Yacoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Tomaszewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuang-Yu", |
|
"middle": [], |
|
"last": "Jen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pinaki", |
|
"middle": [], |
|
"last": "Sarder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Nature Machine Intelligence", |
|
"volume": "1", |
|
"issue": "2", |
|
"pages": "112--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brendon Lutnick, Brandon Ginley, Darshana Govind, Sean D. McGarry, Peter S. LaViolette, Rabi Yacoub, Sanjay Jain, John E. Tomaszewski, Kuang-Yu Jen, and Pinaki Sarder. 2019. An integrated iterative an- notation technique for easing neural network training in medical image analysis. Nature Machine Intelli- gence, 1(2):112-119.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 142-150, Portland, Oregon, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "doccano: Text annotation tool for human", |
|
"authors": [ |
|
{ |
|
"first": "Hiroki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takahiro", |
|
"middle": [], |
|
"last": "Kubo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junya", |
|
"middle": [], |
|
"last": "Kamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yasufumi", |
|
"middle": [], |
|
"last": "Taniguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroki Nakayama, Takahiro Kubo, Junya Kamura, Yasu- fumi Taniguchi, and Xu Liang. 2018. doccano: Text annotation tool for human. Software available from https://github.com/doccano/doccano.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "An extensive review of tools for manual annotation of documents", |
|
"authors": [ |
|
{ |
|
"first": "Mariana", |
|
"middle": [], |
|
"last": "Neves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jurica", |
|
"middle": [], |
|
"last": "\u0160eva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Briefings in Bioinformatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mariana Neves and Jurica \u0160eva. 2019. An extensive review of tools for manual annotation of documents. Briefings in Bioinformatics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Demonstration of the uam corpustool for text and image annotation", |
|
"authors": [ |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Mick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Donnell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the ACL-08: HLT Demo Session", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mick O'Donnell. 2008. Demonstration of the uam corpustool for text and image annotation. In Pro- ceedings of the ACL-08: HLT Demo Session, pages 13-16.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The textpro tool suite", |
|
"authors": [ |
|
{ |
|
"first": "Emanuele", |
|
"middle": [], |
|
"last": "Pianta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Girardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zanoli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emanuele Pianta, Christian Girardi, and Roberto Zanoli. 2008. The textpro tool suite. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "icarl: Incremental classifier and representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Sylvestre-Alvise Rebuffi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Kolesnikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Sperl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lampert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2001--2010", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. 2017. icarl: In- cremental classifier and representation learning. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2001-2010.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A survey of active learning for text classification using deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Schr\u00f6der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Niekler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2008.07267" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Schr\u00f6der and Andreas Niekler. 2020. A survey of active learning for text classifica- tion using deep neural networks. arXiv preprint arXiv:2008.07267.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "brat: a web-based tool for nlp-assisted text annotation", |
|
"authors": [ |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Topi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Demonstrations at the 13th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topi\u0107, Tomoko Ohta, Sophia Ananiadou, and Jun'ichi Tsu- jii. 2012. brat: a web-based tool for nlp-assisted text annotation. In Proceedings of the Demonstrations at the 13th Conference of the European Chapter of the Association for Computational Linguistics, pages 102-107.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Nlangp: Supervised machine learning system for aspect category classification and opinion target extraction", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqiang", |
|
"middle": [], |
|
"last": "Toh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "496--501", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqiang Toh and Jian Su. 2015. Nlangp: Supervised machine learning system for aspect category classifi- cation and opinion target extraction. In Proceedings of the 9th International Workshop on Semantic Eval- uation (SemEval 2015), pages 496-501.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Active dop: A constituency treebank annotation tool with online learning", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas van Cranenburgh. 2018. Active dop: A con- stituency treebank annotation tool with online learn- ing. In Proceedings of the 27th International Confer- ence on Computational Linguistics: System Demon- strations, page 38.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Folia: Format for linguistic annotation", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Maarten Van Gompel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maarten van Gompel. 2012. Folia: Format for linguistic annotation. CLIN22, Tilburg.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A hybrid unsupervised method for aspect term and opinion target extraction. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Chuhan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fangzhao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sixing", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhigang", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongfeng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "148", |
|
"issue": "", |
|
"pages": "66--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuhan Wu, Fangzhao Wu, Sixing Wu, Zhigang Yuan, and Yongfeng Huang. 2018. A hybrid unsupervised method for aspect term and opinion target extraction. Knowledge-Based Systems, 148:66-73.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Robust classification with convolutional prototype learning", |
|
"authors": [ |
|
{ |
|
"first": "Hong-Ming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu-Yao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheng-Lin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3474--3482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hong-Ming Yang, Xu-Yao Zhang, Fei Yin, and Cheng- Lin Liu. 2018a. Robust classification with convo- lutional prototype learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3474-3482.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Yedda: A lightweight collaborative text span annotation tool", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingxuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Yang, Yue Zhang, Linwei Li, and Xingxuan Li. 2018b. Yedda: A lightweight collaborative text span annotation tool.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Automatic annotation suggestions and custom annotation layers in webanno", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Seid Muhie Yimam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--96", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seid Muhie Yimam, Chris Biemann, Richard Eckart de Castilho, and Iryna Gurevych. 2014. Automatic annotation suggestions and custom annotation layers in webanno. In Proceedings of 52nd Annual Meet- ing of the Association for Computational Linguistics: System Demonstrations, pages 91-96.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Webanno: A flexible, web-based and visually supported system for distributed annotations", |
|
"authors": [ |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Seid Muhie Yimam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Eckart De Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seid Muhie Yimam, Iryna Gurevych, Richard Eckart de Castilho, and Chris Biemann. 2013. Webanno: A flexible, web-based and visually supported system for distributed annotations. In Proceedings of the 51st", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Fine-tuning convolutional neural networks for biomedical image analysis: Actively and incrementally", |
|
"authors": [ |
|
{ |
|
"first": "Zongwei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jae", |
|
"middle": [], |
|
"last": "Shin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suryakanth", |
|
"middle": [], |
|
"last": "Gurudu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Gotway", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianming", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "2017", |
|
"issue": "", |
|
"pages": "4761--4772", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zongwei Zhou, Jae Shin, Lei Zhang, Suryakanth Gurudu, Michael Gotway, and Jianming Liang. 2017. Fine-tuning convolutional neural networks for biomedical image analysis: Actively and incre- mentally. In 2017 IEEE Conference on Computer Vi- sion and Pattern Recognition (CVPR), volume 2017, pages 4761-4772.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The interaction of the three major elements of the intelligent annotation system", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Screenshot of administrator interface (a) Text classification. (b) Named entity recognition. (c) Sequence tagging.", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Screenshots of annotator interface for different tasks.", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": ": 2-dim sketch of prototype-based incremental learning", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Results of different active learning strategies and models over imdb dataset. Curves start from 10 at along x-axis.", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Results of different classifier (softmax-based and prototype-based) in class-incremental scenario.", |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |