|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:06:14.100117Z" |
|
}, |
|
"title": "Assertion Detection in Clinical Notes: Medical Language Models to the Rescue?", |
|
"authors": [ |
|
{ |
|
"first": "Betty", |
|
"middle": [], |
|
"last": "Van Aken", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beuth University of Applied Sciences Berlin", |
|
"location": {} |
|
}, |
|
"email": "bvanaken@beuth-hochschule.de" |
|
}, |
|
{ |
|
"first": "Ivana", |
|
"middle": [], |
|
"last": "Trajanovska", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beuth University of Applied Sciences Berlin", |
|
"location": {} |
|
}, |
|
"email": "ivtrajanovska@gmail.com" |
|
}, |
|
{ |
|
"first": "Amy", |
|
"middle": [], |
|
"last": "Siu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beuth University of Applied Sciences Berlin", |
|
"location": {} |
|
}, |
|
"email": "siu@beuth-hochschule.de" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Mayrdorfer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charit\u00e9 Universit\u00e4tsmedizin Berlin", |
|
"location": {} |
|
}, |
|
"email": "manuel.mayrdorfer@charite.de" |
|
}, |
|
{ |
|
"first": "Klemens", |
|
"middle": [], |
|
"last": "Budde", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charit\u00e9 Universit\u00e4tsmedizin Berlin", |
|
"location": {} |
|
}, |
|
"email": "klemens.budde@charite.de" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "L\u00f6ser", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beuth University of Applied Sciences Berlin", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In order to provide high-quality care, health professionals must efficiently identify the presence, possibility, or absence of symptoms, treatments and other relevant entities in freetext clinical notes. Such is the task of assertion detection-to identify the assertion class (present, possible, absent) of an entity based on textual cues in unstructured text. We evaluate state-of-the-art medical language models on the task and show that they outperform the baselines in all three classes. As transferability is especially important in the medical domain we further study how the best performing model behaves on unseen data from two other medical datasets. For this purpose we introduce a newly annotated set of 5,000 assertions for the publicly available MIMIC-III dataset. We conclude with an error analysis that reveals situations in which the models still go wrong and points towards future research directions.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In order to provide high-quality care, health professionals must efficiently identify the presence, possibility, or absence of symptoms, treatments and other relevant entities in freetext clinical notes. Such is the task of assertion detection-to identify the assertion class (present, possible, absent) of an entity based on textual cues in unstructured text. We evaluate state-of-the-art medical language models on the task and show that they outperform the baselines in all three classes. As transferability is especially important in the medical domain we further study how the best performing model behaves on unseen data from two other medical datasets. For this purpose we introduce a newly annotated set of 5,000 assertions for the publicly available MIMIC-III dataset. We conclude with an error analysis that reveals situations in which the models still go wrong and points towards future research directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The clinical information buried in narrative reports is difficult for humans to access for clinical, teaching, or research purposes (Perera et al., 2013) . To provide high-quality patient care, health professionals need to have better and faster access to crucial information in a summarized and interpretable format. In this paper, we focus on English discharge summaries and the task of assertion detection, which is the classification of clinical information as demonstrated in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 153, |
|
"text": "(Perera et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 489, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given a piece of text, we need to identify two pieces of information -a medical entity and textual cues indicating the presence or absence of that entity. Medical entity extraction has been studied extensively (Lewis et al., 2020), we thus focus our work on the task of predicting the present / possible / absent class over a medical entity, addressing an important information need of health professionals. This setting is reflected in the dataset released by the 2010 i2b2 Challenge Assertions Task (de Bruijn et al., 2011a) , on which we base our main evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 501, |
|
"end": 526, |
|
"text": "(de Bruijn et al., 2011a)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Clinical assertion detection is known to be a difficult task (Chen, 2019) due to the free-text format of considered clinical notes. Detecting possible assertions is particularly challenging, because they are often vaguely expressed, and they occur far less frequently than present and absent assertions. Language models pre-trained on medical data have shown to create useful representations for a multitude of tasks in the domain (Peng et al., 2019) . We apply them to our setup of assertion detection to evaluate whether they can increase performance (especially on the minority class) and where they still need improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 73, |
|
"text": "(Chen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 450, |
|
"text": "(Peng et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We argue that clinical assertion detection models must be transferable to data that differs from the training data, e.g. due to different writing styles of health professionals from other clinics or from other medical fields. As existing datasets do not represent such diversity, we manually annotate 5,000 assertions in clinical notes from several fields in the publicly available MIMIC-III dataset. We then use these annotated notes as an additional evaluation set to test the transferability of the best performing model. Task discharge summaries 21,064 1,418 6,144 BioScope scientific publications -3,474 2,161 MIMIC-III Clinical Database (New) discharge summaries 2,610 250 980 physician letters 204 34 66 nurse letters 293 14 59 radiology reports 249 40 130 Table 1 : Distribution of text types and classes in the three employed datasets. Note that possible is a minority class across datasets as well as text types. In the i2b2 dataset, for instance, only 5% of all labels are possible.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 525, |
|
"end": 800, |
|
"text": "Task discharge summaries 21,064 1,418 6,144 BioScope scientific publications -3,474 2,161 MIMIC-III Clinical Database (New) discharge summaries 2,610 250 980 physician letters 204 34 66 nurse letters 293 14 59 radiology reports 249 40 130 Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are summarized as follows: 1) We evaluate medical language models on assertion detection in clinical notes and show that they clearly outperform previous baselines. We further study the transferability of such models to clinical text from other medical areas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "present possible absent 2010 i2b2 Challenge Assertion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2) We manually annotate 5,000 assertions for the MIMIC-III Clinical Database (Johnson et al., 2016) . We release the annotations to the research community 1 to tackle the problem of label sparsity and the lack of diversity in existing assertion data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 99, |
|
"text": "MIMIC-III Clinical Database (Johnson et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "present possible absent 2010 i2b2 Challenge Assertion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3) We conduct an error analysis to understand the capabilities of the best performing model on the task and to reveal directions for improvement. We make our system publicly available as a web application to allow further analyses 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "present possible absent 2010 i2b2 Challenge Assertion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "One of the earliest approaches to assertion detection is NegEx (Chapman et al., 2001) , where hand-crafted word patterns are used to extract the absent category of assertions in discharge summaries. In 2010, the i2b2 Challenge Assertions Task (de Bruijn et al., 2011a) was introduced, and an accompanying corpus was released.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 85, |
|
"text": "NegEx (Chapman et al., 2001)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There is a variety of prior work focused on scope resolution for assertions, which differs from our setting in that it does not consider medical concepts but scopes of a certain assertion cue. Representative current approaches for this task setup include a CNN-based (Convolutional Neural Network) one by Qian et al. (2016) , reaching an F1 of 0.858 on the more challenging possible category. Sergeeva et al. (2019) propose a LSTM-based (Long Short-Term Memory) approach to detect only absent 1 Annotated data available at: https://github.com/bvanaken/ clinical-assertion-data 2 Demo application: https://ehr-assertion-detection.demo. datexis.com scopes. When \"gold negation cues\" are made available to the model and synthetic features are applied, an F1 of 0.926 is reached. NegBert (Khandelwal and Sawant, 2020) is another approach to detect absent scopes. As its name suggests, it is BERT-based and reaches an F1 of 0.957 on BioScope abstracts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 323, |
|
"text": "Qian et al. (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 415, |
|
"text": "Sergeeva et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 784, |
|
"end": 813, |
|
"text": "(Khandelwal and Sawant, 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In contrast to these approaches we focus our work on entity-specific assertion detection, the results of which are of more practical help for supporting health professionals. Bhatia et al. (2019) explored extracting entities and negations in a joint setting, whereas the work of Harkema et al. (2009) , Chen (2019) and de Bruijn et al. 2011ais the closest to our task setup, i.e. labelling entities with an assertion class. Harkema et al. (2009) extended the NexEx algorithm with contextual properties. de Bruijn et al. (2011a) use a simple SVM classifier and Chen (2019) apply a bidirectional LSTM model with attention to the task and evaluate it on the i2b2 corpus. While these models reach F1-scores above 0.9 on the majority classes, the challenging possible class does not surpass 0.65. We show that medical language models outperform these scores especially regarding the minority class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 195, |
|
"text": "Bhatia et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 300, |
|
"text": "Harkema et al. (2009)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 445, |
|
"text": "Harkema et al. (2009)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Furthermore, Wu et al. (2014) compared then state-of-the-art approaches for negation detection and found a lack of generalisation to arbitrary clinical text. We thus want to examine the transfer capabilities of recent language models to understand whether they can mitigate the phenomenon.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 29, |
|
"text": "Wu et al. (2014)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We want to understand the abilities of medical language models on the task of assertion detection. We hence fine-tune various (medical) language models on the i2b2 corpus described below. We further apply the best performing model to the BioScope dataset and our newly introduced MIMIC-III assertion dataset without further fine-tuning to test their performance on unseen medical data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "F1 for present possible absent Earlier approaches SVM Classifier (de Bruijn et al., 2011b) 0.959 0.643 0.939 Conditional Softmax Shared Decoder (Bhatia et al., 2019) --0.905 Bi-directional LSTM with Attention (Chen, 2019) 0.950 0.637 0.927 Language models under evaluation BERT Base (Devlin et al., 2019) 0.968 0.704 0.943 BioBERT Base (Lee et al., 2020) 0.976 0.759 0.963 Bio+Clinical BERT (Alsentzer et al., 2019) 0.977 0.775 0.966 Bio+Discharge Summary BERT (Alsentzer et al., 2019) 0 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 90, |
|
"text": "(de Bruijn et al., 2011b)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 165, |
|
"text": "(Bhatia et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 221, |
|
"text": "(Chen, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 304, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 354, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 415, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 485, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The 2010 i2b2 Assertion Task (de Bruijn et al., 2011a) provides a corpus of assertions in clinical discharge summaries. The task is split into six classes, namely present, possible, absent, hypothetical, conditional and associated with someone else. However, the distribution is highly skewed, such that only 6% of the assertions belong to the latter three classes. Hence we only use the present, possible, and absent assertions for our evaluation as they present the most important information for doctors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "BioScope (Vincze et al., 2008 ) is a corpus of assertions in biomedical publications. It was specifically curated for the study of negation and speculation (or absent and possible in this paper) scope and does not contain present annotations. As mentioned before, the BioScope dataset does not completely match the information need of health professionals and the i2b2 corpus lacks varied medical text types. We thus introduce a new set of labelled assertions to complement existing data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 29, |
|
"text": "(Vincze et al., 2008", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The MIMIC-III Clinical Database (Johnson et al., 2016) provides texts from discharge summaries as well as other clinical notes (physician letters, nurse letters, and radiology reports) representing a promising source of varied medical text. Therefore, two annotators followed the annotation guidelines from the i2b2 challenge, and labelled 5,000 assertions, i.e. word spans of entities and their corresponding present / possible / absent class. The inner-annotator agreement as Cohen's kappa coefficient is 0.847, which indicates a strong level of agreement. The annotations were further veri-fied by a medical doctor, who provided feedback to correct a small number of labels, and confirmed that the end results were satisfactory.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "It is important to note that even though the newly annotated data from MIMIC-III adds variation to the existing corpora, the dataset has its own limitations. The clinical notes are collected from a single institution (with a mostly White patient population) and from Intensive Care Unit patients only. We therefore argue that progress in assertion detection requires further initiatives for releasing more diverse sets of clinical notes. Table 1 summarizes the assertion distribution in the introduced datasets and shows the unbalanced nature of the data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 438, |
|
"end": 445, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We make predictions about assertions on a perentity level. However, we want our models to consider the context of an entity. We therefore pass the whole sentence to the models and surround the entity tokens with special indicator tokens [entity] whose embeddings are randomly initialised. A sample input sequence thus looks as follows: [CLS] We apply the same pre-processing to all three datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 245, |
|
"text": "[entity]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 341, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "There are various pre-trained (bio-)medical and clinical language models available to evaluate on the assertion detection task. We select the most prevalent ones and describe them in short below: Table 3 : Experimental results (in F1) for the best performing Bio+Discharge Summary BERT model on two further assertion datasets and their different text types. Both datasets were not seen during training. Note that the number of evaluation samples is very low for some text types (i.e. possible class in nurse letters), which impairs the expressiveness of these results.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 203, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-tuning Medical Language Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "present", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning Medical Language Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "BERT (Devlin et al., 2019) was pre-trained on non-medical data and serves as a baseline for Transformer-base pre-trained language models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 26, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning Medical Language Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "BioBERT (Lee et al., 2020 ) is a standard model for medical NLP tasks and is pre-trained on biomedical publications. Bio+Clinical BERT and Bio+Discharge Summary BERT (Alsentzer et al., 2019) are built upon BioBERT with additional pretraining on clinical notes / discharge summaries. The CORe model (van Aken et al., 2021) uses BioBERT and adds a specialized clinical outcome pre-training. Biomed RoBERTA (Gururangan et al., 2020) is based on the RoBERTA model (Liu et al., 2019) and pre-trained on bio-medical publications. After an initial grid search we fix our hyperparameters to a learning rate of 1e-5, batch size of 32, and 2 epochs of training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 25, |
|
"text": "(Lee et al., 2020", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 190, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 478, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning Medical Language Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We start by evaluating the mentioned models on the i2b2 corpus. We use training and test data as defined by in the i2b2 challenge and compare our results to previous state-of-the-art approaches in Table 2 . Next, we apply the best performing Bio+Discharge Summary BERT to the BioScope and MIMIC-III corpora without additional finetuning (Table 3) . This way we can see the model's performance on medical text from unseen sources.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 204, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 346, |
|
"text": "(Table 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Language models outperform baselines. Table 2 shows that all evaluated medical language models are able to increase F1-scores on all three classes. On the most challenging possible class the improvement is the clearest with up to \u223c15pp, which shows that the models are better in handling sparse occurrences coupled with vague expressions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 45, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Medical pre-training is important. The vanilla BERT baseline is the weakest of our evaluated models, which shows that models specialized on the medical domain are not only effective for more complex medical tasks but also for assertion detection, which is in line with the claim by Gururangan et al. (2020) that domain-specific pre-training is almost always of use. Bio+Discharge Summary BERT is the best model -probably because it was trained on text very similar to the i2b2 corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Text style matters. Table 3 shows the ability of the Bio+Discharge Summary BERT language model to transfer to other text styles. The assertions in the BioScope corpus are difficult to identify by the model as they clearly differ from the ones used by doctors in clinical notes. The text style in MIMIC-III data is more similar to the originally learned data which is reflected in the results. 3 However, physician letters appear to contain more specialized expressions and therefore evoke more errors. This points towards a lack of generalization possibly caused by the limited variety of assertion cues in the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 27, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We analyse all errors made by the best model to identify main sources of errors and to point towards future research directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Inconsistent data in pre-existing datasets account for roughly 45% of errors. This includes obvious labelling mistakes, but also disagreements among annotators. For example, phrases such as \"appeared to be,\" \"concerning for\" and \"consistent with\" are labeled differently, as present or as possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Long range dependencies account for roughly 20% of all errors, in which entities and their cues have dependencies longer than a few tokens apart. While the model's attention mechanism could easily detect distant tokens, the model might have learned to only consider close assertion cues. The following is an example of a distant cue indicating the absent class which was missed by the model:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "His rash on the right hand was examined further and is now resolved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Lists of assertions are found in 8% of error samples. Here the assertion is not directly coupled to an entity but must be inferred by the way it is listed. Such somewhat ambiguous cases are usually easily understood by humans, but difficult for our models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "No hydrocephalus, subarachnoid hemorrhage, no fracture.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Misspellings account for 5% of all observed errors, but they reveal a critical yet surprising limitation. For instance, the cues \"appeas\" and \"probalbe\" that indicate possible instances, are missed. While Transformer-based models are generally capable of dealing with misspellings due to subword tokenization, the missing variety of expressions in the data appears to let the models focus on a specific set of textual cues without generalizing to new phrases or even misspellings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this work, we present an evaluation on medical language models to detect assertions in clinical texts and experimental results which show that they outperform baseline approaches. We further provided a new corpus of assertion annotations on the MIMIC-III dataset that will augment existing data collections and shows the model's capability to be transferred to other sources -if the text styles do not strongly differ. We suggest future work to investigate generalization to unseen data and expressions. We further encourage work on multi-task learning of entity extraction and assertions to support health professionals with systems that learn jointly in an end-to-end fashion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Note that the model's pre-training is based on MIMIC-III and it was thus to an extent exposed to the test data. Due to the difference of the target task and the amount of total pre-training data, this influence should be negligible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Our work is funded by the German Federal Ministry for Economic Affairs and Energy (BMWi) under grant agreement 01MD19003B (PLASS) and 01MK2008MD (Servicemeister).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Publicly Available Clinical BERT Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Alsentzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly Available Clinical BERT Embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Work- shop, pages 72-78.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Joint Entity Extraction and Assertion Detection for Clinical Text", |
|
"authors": [ |
|
{ |
|
"first": "Parminder", |
|
"middle": [], |
|
"last": "Bhatia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Busra", |
|
"middle": [], |
|
"last": "Celikkaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Khalilia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "954--959", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parminder Bhatia, Busra Celikkaya, and Mohammed Khalilia. 2019. Joint Entity Extraction and Asser- tion Detection for Clinical Text. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 954-959.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A Simple Algorithm for Identifying Negated Findings and Diseases in Discharge Summaries", |
|
"authors": [ |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Wendy W Chapman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Bridewell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hanbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Gregory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruce G", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buchanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Journal of Biomedical Informatics", |
|
"volume": "34", |
|
"issue": "5", |
|
"pages": "301--310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wendy W Chapman, Will Bridewell, Paul Hanbury, Gregory F Cooper, and Bruce G Buchanan. 2001. A Simple Algorithm for Identifying Negated Findings and Diseases in Discharge Summaries. Journal of Biomedical Informatics, 34(5):301-310.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Attention-based Deep Learning System for Negation and Assertion Detection in Clinical Notes", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Journal of Artificial Intelligence and Applications", |
|
"volume": "10", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Chen. 2019. Attention-based Deep Learning Sys- tem for Negation and Assertion Detection in Clini- cal Notes. International Journal of Artificial Intelli- gence and Applications, 10(1).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Machinelearned Solutions for Three Stages of Clinical Information Extraction: The State of the Art at i2b2 2010", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Berry De Bruijn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "18", |
|
"issue": "5", |
|
"pages": "557--562", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berry de Bruijn, Colin Cherry, Svetlana Kiritchenko, Joel Martin, and Xiaodan Zhu. 2011a. Machine- learned Solutions for Three Stages of Clinical Infor- mation Extraction: The State of the Art at i2b2 2010. Journal of the American Medical Informatics Asso- ciation, 18(5):557-562.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Machinelearned solutions for three stages of clinical information extraction: the state of the art at i2b2 2010", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Berry De Bruijn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "J. Am. Medical Informatics Assoc", |
|
"volume": "18", |
|
"issue": "5", |
|
"pages": "557--562", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berry de Bruijn, Colin Cherry, Svetlana Kiritchenko, Joel D. Martin, and Xiaodan Zhu. 2011b. Machine- learned solutions for three stages of clinical informa- tion extraction: the state of the art at i2b2 2010. J. Am. Medical Informatics Assoc., 18(5):557-562.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Volume 1 (Long and Short Papers), pages 4171-4186. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Don't stop pretraining: Adapt language models to domains and tasks", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Context: An algorithm for determining negation, experiencer, and temporal status from clinical reports", |
|
"authors": [ |
|
{ |
|
"first": "Henk", |
|
"middle": [], |
|
"last": "Harkema", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Dowling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tyler", |
|
"middle": [], |
|
"last": "Thornblade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [ |
|
"Webber" |
|
], |
|
"last": "Chapman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "J. Biomed. Informatics", |
|
"volume": "42", |
|
"issue": "5", |
|
"pages": "839--851", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Henk Harkema, John N. Dowling, Tyler Thornblade, and Wendy Webber Chapman. 2009. Context: An al- gorithm for determining negation, experiencer, and temporal status from clinical reports. J. Biomed. In- formatics, 42(5):839-851.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "MIMIC-III, a Freely Accessible Critical Care Database", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Alistair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H Lehman", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengling", |
|
"middle": [], |
|
"last": "Li-Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Ghassemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Moody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [ |
|
"Anthony" |
|
], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger G", |
|
"middle": [], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Scientific Data", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair EW Johnson, Tom J Pollard, Lu Shen, H Lehman Li-Wei, Mengling Feng, Moham- mad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016. MIMIC-III, a Freely Accessible Critical Care Database. Scientific Data, 3(1):1-9.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Neg-BERT: A Transfer Learning Approach for Negation Detection and Scope Resolution", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suraj", |
|
"middle": [], |
|
"last": "Sawant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5739--5748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Khandelwal and Suraj Sawant. 2020. Neg- BERT: A Transfer Learning Approach for Negation Detection and Scope Resolution. In Proceedings of The 12th Language Resources and Evaluation Con- ference, pages 5739-5748.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Bioinform", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomedical language representation model for biomedical text mining. Bioinform., 36(4):1234- 1240.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Pretrained language models for biomedical and clinical tasks: Understanding and extending the state-of-the-art", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Patrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 3rd Clinical Natural Language Processing Workshop", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "146--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick S. H. Lewis, Myle Ott, Jingfei Du, and Veselin Stoyanov. 2020. Pretrained language models for biomedical and clinical tasks: Understanding and extending the state-of-the-art. In Proceedings of the 3rd Clinical Natural Language Processing Work- shop, ClinicalNLP@EMNLP 2020, Online, Novem- ber 19, 2020, pages 146-157. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Roberta: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining ap- proach. CoRR, abs/1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Transfer learning in biomedical natural language processing: An evaluation of BERT and elmo on ten benchmarking datasets", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shankai", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 18th BioNLP Workshop and Shared Task, BioNLP@ACL 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Transfer learning in biomedical natural language processing: An evaluation of BERT and elmo on ten benchmarking datasets. In Proceedings of the 18th BioNLP Workshop and Shared Task, BioNLP@ACL 2019, Florence, Italy, August 1, 2019, pages 58-65. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Challenges in Understanding Clinical Notes: Why NLP Engines Fall Short and Where Background Knowledge Can Help", |
|
"authors": [ |
|
{ |
|
"first": "Sujan", |
|
"middle": [], |
|
"last": "Perera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Sheth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krishnaprasad", |
|
"middle": [], |
|
"last": "Thirunarayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suhas", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 International Workshop on Data Management & Analytics for Healthcare", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sujan Perera, Amit Sheth, Krishnaprasad Thirunarayan, Suhas Nair, and Neil Shah. 2013. Challenges in Understanding Clinical Notes: Why NLP Engines Fall Short and Where Background Knowledge Can Help. In Proceedings of the 2013 International Workshop on Data Management & Analytics for Healthcare, page 21-26.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Speculation and Negation Scope Detection via Convolutional Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Peifeng", |
|
"middle": [], |
|
"last": "Zhong Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaoming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhunchen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "815--825", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhong Qian, Peifeng Li, Qiaoming Zhu, Guodong Zhou, Zhunchen Luo, and Wei Luo. 2016. Specu- lation and Negation Scope Detection via Convolu- tional Neural Networks. In Proceedings of the 2016 Conference on Empirical Methods in Natural Lan- guage Processing, pages 815-825.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Negation Scope Detection in Clinical Notes and Scientific Abstracts: A Featureenriched LSTM-based Approach", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Sergeeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henghui", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Prinsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Tahmasebi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "AMIA Summits on Translational Science Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Sergeeva, Henghui Zhu, Peter Prinsen, and Amir Tahmasebi. 2019. Negation Scope Detection in Clinical Notes and Scientific Abstracts: A Feature- enriched LSTM-based Approach. AMIA Summits on Translational Science Proceedings, 2019:212.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Clinical outcome prediction from admission notes using self-supervised knowledge integration", |
|
"authors": [ |
|
{ |
|
"first": "Jens-Michalis", |
|
"middle": [], |
|
"last": "Betty Van Aken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Papaioannou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klemens", |
|
"middle": [], |
|
"last": "Mayrdorfer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Budde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "L\u00f6ser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Betty van Aken, Jens-Michalis Papaioannou, Manuel Mayrdorfer, Klemens Budde, Felix A. Gers, and Alexander L\u00f6ser. 2021. Clinical outcome prediction from admission notes using self-supervised knowl- edge integration. In Proceedings of the 16th Confer- ence of the European Chapter of the Association for Computational Linguistics, EACL 2021. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The Bio-Scope Corpus: Biomedical Texts Annotated for Uncertainty, Negation and Their Scopes", |
|
"authors": [ |
|
{ |
|
"first": "Veronika", |
|
"middle": [], |
|
"last": "Vincze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gy\u00f6rgy", |
|
"middle": [], |
|
"last": "Szarvas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich\u00e1rd", |
|
"middle": [], |
|
"last": "Farkas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gy\u00f6rgy", |
|
"middle": [], |
|
"last": "M\u00f3ra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00e1nos", |
|
"middle": [], |
|
"last": "Csirik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "BMC bioinformatics", |
|
"volume": "9", |
|
"issue": "11", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Veronika Vincze, Gy\u00f6rgy Szarvas, Rich\u00e1rd Farkas, Gy\u00f6rgy M\u00f3ra, and J\u00e1nos Csirik. 2008. The Bio- Scope Corpus: Biomedical Texts Annotated for Un- certainty, Negation and Their Scopes. BMC bioin- formatics, 9(11):1-9.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Negation's not solved: generalizability versus optimizability in clinical natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Masanz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Coarr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Halgrim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Carrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheryl", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "PLoS One", |
|
"volume": "", |
|
"issue": "9", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Wu, Timothy Miller, James Masanz, Matt Coarr, Scott Halgrim, David Carrell, and Cheryl Clark. 2014. Negation's not solved: generalizabil- ity versus optimizability in clinical natural language processing. PLoS One, 11(9).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Sample output of our demo system. Detected entities are highlighted in red, yellow, and green to indicate present, possible, and absent.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Results of baseline approaches and (medical) language models on the i2b2 Assertions Task. Pre-trained medical language models outperform all earlier approaches -with a large margin on the possible class. Note thatBhatia et al. (2019) only evaluated their model on negation detection.", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |