|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:38:35.698797Z" |
|
}, |
|
"title": "ARTEMIS: A Novel Annotation Methodology for Indicative Single Document Summarization", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Jha", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Keping", |
|
"middle": [], |
|
"last": "Bi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pakdaman", |
|
"middle": [ |
|
"Asli" |
|
], |
|
"last": "Celikyilmaz", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Zhiboedov", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kieran", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Microsoft", |
|
"middle": [], |
|
"last": "Corporation", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Umass", |
|
"middle": [], |
|
"last": "Amherst", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We describe ARTEMIS (Annotation methodology for Rich, Tractable, Extractive, Multidomain, Indicative Summarization), a novel hierarchical annotation process that produces indicative summaries for documents from multiple domains. Current summarization evaluation datasets are single-domain and focused on a few domains for which naturally occurring summaries can be easily found, such as news and scientific articles. These are not sufficient for training and evaluation of summarization models for use in document management and information retrieval systems, which need to deal with documents from multiple domains. Compared to other annotation methods such as Relative Utility and Pyramid, ARTEMIS is more tractable because judges don't need to look at all the sentences in a document when making an importance judgment for one of the sentences, while providing similarly rich sentence importance annotations. We describe the annotation process in detail and compare it with other similar evaluation systems. We also present analysis and experimental results over a sample set of 532 annotated documents. \u2020 Work done while an intern at Microsoft. \u2021 Work done while an employee of Microsoft. Original Document (1) This content should be viewed as reference documentation only, to inform IT business decisions. .. (2) Microsoft employees need to stay aware of new company products, services, processes, and personnel-related developments in an organization that provides them. .. (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and information to Microsoft employees according to employee roles. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest Information Technology (IT) infrastructure environments in the world. (5) It consists of 95,000 employees working in 107 countries worldwide. (6) The Sales, Marketing, and Services Group (SMSG) at Microsoft is responsible for servicing the needs of Microsoft customers and partners. (7) It is essential that these 45,000 employees remain informed about products and services within their areas of expertise and, in turn, to educate and inform. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have all of the tools and knowledge they require to deliver. .. (.. . document truncated) Summary 1 (2) Microsoft employees need to stay aware of new company products, services, processes, and. .. (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest. .. Summary 2 (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and. .. (6) The Sales, Marketing, and Services Group (SMSG) at Microsoft is responsible for servicing the needs of. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have. .. Summary 3 (2) Microsoft employees need to stay aware of new company products, services, processes, and. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have. . .", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We describe ARTEMIS (Annotation methodology for Rich, Tractable, Extractive, Multidomain, Indicative Summarization), a novel hierarchical annotation process that produces indicative summaries for documents from multiple domains. Current summarization evaluation datasets are single-domain and focused on a few domains for which naturally occurring summaries can be easily found, such as news and scientific articles. These are not sufficient for training and evaluation of summarization models for use in document management and information retrieval systems, which need to deal with documents from multiple domains. Compared to other annotation methods such as Relative Utility and Pyramid, ARTEMIS is more tractable because judges don't need to look at all the sentences in a document when making an importance judgment for one of the sentences, while providing similarly rich sentence importance annotations. We describe the annotation process in detail and compare it with other similar evaluation systems. We also present analysis and experimental results over a sample set of 532 annotated documents. \u2020 Work done while an intern at Microsoft. \u2021 Work done while an employee of Microsoft. Original Document (1) This content should be viewed as reference documentation only, to inform IT business decisions. .. (2) Microsoft employees need to stay aware of new company products, services, processes, and personnel-related developments in an organization that provides them. .. (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and information to Microsoft employees according to employee roles. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest Information Technology (IT) infrastructure environments in the world. (5) It consists of 95,000 employees working in 107 countries worldwide. (6) The Sales, Marketing, and Services Group (SMSG) at Microsoft is responsible for servicing the needs of Microsoft customers and partners. (7) It is essential that these 45,000 employees remain informed about products and services within their areas of expertise and, in turn, to educate and inform. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have all of the tools and knowledge they require to deliver. .. (.. . document truncated) Summary 1 (2) Microsoft employees need to stay aware of new company products, services, processes, and. .. (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest. .. Summary 2 (3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and. .. (6) The Sales, Marketing, and Services Group (SMSG) at Microsoft is responsible for servicing the needs of. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have. .. Summary 3 (2) Microsoft employees need to stay aware of new company products, services, processes, and. .. (4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest. .. (8) The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have. . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Given an input source document, summarization systems produce a condensed summary which can be either informative or indicative. Informative summaries try to convey all the important points of the document (Kan et al., 2002 (Kan et al., , 2001b , while indicative summaries hint at the topics of the document, pointing to information alerting the reader about the document content (Saggion and Lapalme, 2002) . An informative summary aims to replace the source document, so that the user does not need to read the full document (Edmundson, 1969) . An indicative summary, on the other hand, aims to Figure 1 : One of the documents from our web-crawled sample annotated dataset along with indicative summaries annotated by three different judges. The sentence numbers in round brackets are not in the original document but are added here for readability. Summary sentences are truncated for readability as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 223, |
|
"text": "(Kan et al., 2002", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 244, |
|
"text": "(Kan et al., , 2001b", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 408, |
|
"text": "(Saggion and Lapalme, 2002)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 545, |
|
"text": "(Edmundson, 1969)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 598, |
|
"end": 606, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "help the user decide whether they should consider reading the full document (Kan et al., 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 94, |
|
"text": "(Kan et al., 2002)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The content of indicative summaries can be composed in several ways. For example, it can contain sentences extracted from the source document which relate to its main topic (Barzilay and Elhadad, 1997; Kupiec et al., 1995) , generated text describing how a document is different from other documents (Kan et al., 2001a) , topic keywords (Hovy and Lin, 1997; Saggion and Lapalme, 2002) as well as metadata such as length and writing style (Nenkova and McKeown, 2011) . Document management systems such as Google Docs, Microsoft OneDrive and SharePoint and Dropbox can use indicative summaries to help their users decide whether a given document is relevant for them before opening the full document. Indicative summaries can also be used in information retrieval systems as previews for documents returned in search results. Document summarization systems deployed in these real-world systems need to be able to summarize documents from a wide variety of domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 201, |
|
"text": "(Barzilay and Elhadad, 1997;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 222, |
|
"text": "Kupiec et al., 1995)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 319, |
|
"text": "(Kan et al., 2001a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 357, |
|
"text": "(Hovy and Lin, 1997;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 384, |
|
"text": "Saggion and Lapalme, 2002)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 465, |
|
"text": "(Nenkova and McKeown, 2011)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, existing summarization datasets are highly domain-specific, with a majority of them focusing on news summarization (Nallapati et al., 2016; Grusky et al., 2018; Sandhaus, 2008; Graff et al., 2003) . One of the reasons for this bias towards news summarization is the availability of naturally occurring summaries for news, which makes it easier to create large-scale summarization datasets automatically by scraping online sources. Apart from the domain bias, they are also susceptible to noise which can affect upto 5.92% of the data (Kryscinski et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 148, |
|
"text": "(Nallapati et al., 2016;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 169, |
|
"text": "Grusky et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 185, |
|
"text": "Sandhaus, 2008;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 205, |
|
"text": "Graff et al., 2003)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 568, |
|
"text": "(Kryscinski et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to train and evaluate multi-domain summarization models for use in document management systems, we need to build representative datasets geared towards this use case. Towards this goal, we present ARTEMIS (Annotation methodology for Rich, Tractable, Extractive, Multi-domain, Indicative Summarization), a hierarchical annotation process for indicative summarization of multidomain documents. Figure 1 shows a sample document crawled from the web with three annotated summaries obtained using ARTEMIS.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 409, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ARTEMIS's hierarchical annotation process allows judges to create indicative summaries for long documents through divide-and-conquer. Judges successively summarize larger and larger chunks of a document in multiple stages, at each stage reusing sentences selected previously. The hierarchical process means that judges only look at a small set of sentences at each stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Compared to previous annotation methods, where judges need to consider all the document sentences together when building a summary (Tam et al., 2007) or create expensive semantic annotations (Nenkova and Passonneau, 2004) , ARTEMIS is a low-cost annotation approach that produces rich sentence importance annotations. Judges are able to use ARTEMIS to annotate documents averaging 1322 words (77 sentences) in 4.17 minutes on average, based on an initial sample of annotation tasks. This is almost twice the length of documents in summarization datasets such as CNN/Dailymail at 766 words (Nallapati et al., 2016) and NEWS-ROOM at 659 words (Grusky et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 149, |
|
"text": "(Tam et al., 2007)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 221, |
|
"text": "(Nenkova and Passonneau, 2004)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 589, |
|
"end": 613, |
|
"text": "(Nallapati et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 662, |
|
"text": "(Grusky et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ARTEMIS's annotation process aims at selecting a set of sentences that contain relevant information about the main topics of a document rather than conveying all the relevant information in a document. Given this, summaries annotated by ARTEMIS are indicative in nature and suited for document management and information retrieval systems, where they can be used as part of document preview to help a user decide whether a document is relevant for them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. Section 2 describes the annotation process in detail and Section 3 relates our method to previous annotation methods for summarization. Section 4 presents a number of analyses characterizing the ARTEMIS annotation process in terms of label distribution and judge agreement by using a sample annotated document set. Section 5 presents evaluation results for a set of baseline summarization models on the sample annotated document set. Finally, Section 6 presents some concluding remarks and points to future work. Figure 2 shows a high-level diagram representing the annotation process for ARTEMIS. Given a document as input, the preprocessing step consists of first dividing the document into sections, each of which is further divided into paragraphs. The section and paragraph boundaries are computed based on a set of heuristics that depend on signals like explicit section headers as well as constraints on the number of sentences shown at each screen.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 561, |
|
"end": 569, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The hypothetical document in Figure 2 is di- Figure 2 : A schematic of ARTEMIS annotation process. A document is divided into sections and paragraphs. The judges summarize paragraphs, sections and the document hierarchically, at each step using sentences selected at the previous step.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 37, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 45, |
|
"end": 53, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "vided into two sections with two paragraphs each. The first section contains sentences {1 . . 6}, with two paragraphs containing sentences {1 . . 3} and {4 . . 6} respectively. The second section contains sentences {7 . . 12}, again with two paragraphs containing sentences {7 . . 9} and {10 . . 12}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A salient sentence is defined as a sentence that includes a main concept or idea for summarizing the text, or a fact or an argument emphasized by the author * . Several example sentences are provided in the judge guidelines to help them distinguish salient sentences from non-salient sentences. At a high-level, the judges are trained to select sentences that allow a reader to decide whether to read the full document or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To summarize the document, judges proceed in a bottom-up manner starting from paragraphs (left-toright in Figure 2 ). A judge is first asked to summarize each paragraph in a section by selecting a few salient sentences. A minimum number of sentences are required for each paragraph-summary \u2020 . Once a paragraph has been summarized, the annotation continues to the next paragraph till paragraph-level summaries are created for all the paragraphs in a section. For the document in Figure 2 , the judge selected sentences {2, 3} for the first paragraph and sentences {4, 6} for the second paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 114, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 487, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Once all the paragraphs in a section are summarized, the judge is asked to create a summary for the entire section. However, the judge doesn't have to look at all the sentences in the section to build the section-level summary. Instead, they only select from the set of sentences previously selected to summarize the paragraphs of the section. For example, for summarizing the first section in Figure 2, the judge only needs to select from the set of sentences {2, 3, 4, 6}, instead of the entire set of sentences {1 . . 6} that comprise the section. In the example, the judge decided to use the sentences {3, 4, 6} for summarizing the first section.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 400, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Once a section is summarized, the annotation proceeds to the next section in a similar manner. Once all the sections of a document are summarized, the judge is asked to build the document summary by selecting from sentences that they had previously selected to build the section-level summaries. In Figure 2 , the judge selected sentences {3, 4, 7, 11} for the document level summary. Finally, the judge is asked to build a short summary for the document by selecting three most salient sentences from their document-level summary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 307, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "ARTEMIS's hierarchical annotation process considerably reduces the cognitive load on the judges. By reusing judgements made at previous steps, judges are able to successively summarize long documents by divide-and-conquer. For creating Document Sentences #Para #Sec #Doc #Short (1) This content should be viewed as reference documentation only, to inform IT . . . 0 0 0 0 (2) Microsoft employees need to stay aware of new company products, services, processes, and personnel-related developments in an organization that provides . . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(3) The SMSG Readiness team at Microsoft developed a suite of applications that delivers training and information to Microsoft employees according to employee . . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(4) Microsoft Information Technology (Microsoft IT) is responsible for managing one of the largest Information Technology (IT) infrastructure environments in the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "3", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(5) It consists of 95,000 employees working in 107 countries worldwide. 0 0 0 0 (6) The Sales, Marketing, and Services Group (SMSG) at Microsoft is responsible for servicing the needs of Microsoft customers and partners.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4 2 2", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "3 1 1 1 (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4 2 2", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "It is essential that these 45,000 employees remain informed about products and services within their areas of expertise and, in turn, to educate and inform . . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4 2 2", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1 0 0 0 (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4 2 2", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The SMSG Readiness (SMSGR) team at Microsoft is responsible for ensuring that SMSG employees have all of the tools and knowledge they require to deliver . . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4 2 2", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Table 1: Detailed view of the annotation for the web-crawled document shown in Figure 1 . Against each sentence, we show the number of judges that selected the sentence at paragraph, section, document and short summary stage.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 87, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "3 2 2", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "the document-level summary in the hypothetical example in Figure 2 , the judge only needs to look at the 6 sentences {3, 4, 6, 7, 8, 11} selected for the two section-level summaries, instead of having to go over the entire set of 12 sentences. Table 1 shows a more detailed view of the annotation for an actual document annotated through ARTEMIS with five judges (This is the same document that was used in Figure 1 ). For each of the first eight sentences in the document, it shows the number of judges that selected the sentence at paragraph, section, document and short summary stage. This table gives an insight into the kind of information available from the annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 66, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 251, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 415, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "3 2 2", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Sentences (1) and (5) were deemed by every judge as not salient. Sentence (3) was selected by four judges as salient up to document-summary level, but one of the judges dropped it at shortsummary level. Similarly, sentence (4) was selected at paragraph-summary level by five judges, but only two judges kept it till the document and short-summary level. In Section 4, we present statistics on a sample annotated document set that characterize the annotation process in more detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "3 2 2", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We now compare ARTEMIS with existing summarization evaluation methods. We start with discussing Relative Utility, which is most related to our methodology, and describe how ARTEMIS obtains similar judgments, but with a light-weight process where judges don't need to look at the entire input document when annotating a sentence. Following this, we discuss DUC evaluations, ROUGE and the Pyramid method. Finally, we discuss some of the recent trends in summarization evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Tam et al. 2007introduce Relative Utility (RU) as an evaluation metric to account for Summary Sentence Substitutability (SSS) problem in coselection metrics. Co-selection metrics are evaluation metrics for extractive summarization that depend on text unit overlap with ideal reference summaries created by judges. The SSS problem arises because the judges only provide information about the sentences that they selected for a fixed-length summary. However, other sentences in the document might be equally good candidates for the summary. Human judges often disagree about which are the top n% of the sentences in a document (Mani, 2001 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 625, |
|
"end": 636, |
|
"text": "(Mani, 2001", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To address the SSS problem, in RU evaluation judges are asked to assign a utility score to each sentence in a document on a scale of 0 to 10. Given these utility scores, the score for any arbitrary extractive summary can be computed based on the utility of the sentences in the summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In RU, to assign the utility score to a sentence in the document, a judge needs to compare the sentence with every other sentence in the document. This can be difficult for long documents. ARTEMIS is a light-weight process that achieves an approximation of this. By assigning graded importance scores to paragraph, section, document and short summary level labels, we can obtain an approximate utility score for each sentence. For example scores {1, 2, 3, 4} could be assigned to sentences selected at paragraph, section, document and short summary level and a score of 0 could be assigned to sentences not selected at any level.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "3.2 DUC evaluations and ROUGE DUC (Document Understanding Conferences) were a series of conferences run to further progress in summarization. DUC 2001-2004 focused on single and multi-document summarization (Dang, 2005) . In DUC evaluation for summary content, first a single human judge creates a model summary for each document. The model summary is split automatically into content units. For evaluating a system generated summary, a human judge compares the sentences in the system summary with model content units and estimates the fact overlap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 219, |
|
"text": "(Dang, 2005)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The use of a single model summary in DUC evaluations raised concerns in the research community and led to the proposal of Pyramid evaluation, which we describe in Section 3.3. Lin (2004a) concluded that given enough samples, the use of single model summaries was valid, but using multiple model summaries increased correlation with human judgments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 187, |
|
"text": "Lin (2004a)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In later years, DUC experimented with ROUGE (Lin, 2004b) , an automatic metric for summary evaluation that uses n-gram co-occurrence statistics for scoring system generated summaries against the model summaries. ROUGE is the standard automatic evaluation method used in recent summarization evaluations, which we describe in Section 3.4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 56, |
|
"text": "(Lin, 2004b)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In ARTEMIS, the sentences selected by judges for document or short-level summary can be used as model summaries for ROUGE evaluation, as we demonstrate in Section 5. In addition, the labels for sentences at different summary levels could be used to train a pair-wise sentence ranking system such as LambdaMart (Burges, 2010) or come up with more refined evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relative Utility", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Nenkova and Passonneau 2004introduced Pyramid method as a more reliable method for summary evaluation by incorporating the idea that no single best model summary exists. Given a set of humangenerated model summaries for a document, the Pyramid method starts by manually identifying Summary Content Units (SCUs) in the model summaries. A SCU represents a single unit of information (e.g. \"Two men were indicted\") which can have different surface realizations in different summaries (e.g. \"Court indicted two men\", \"Two men have been indicted\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pyramid evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The weight of an SCU is the number of model summaries it appears in. Thus, an SCU appear-ing in five model summaries has a higher weight than an SCU appearing in three model summaries. Given the SCU inventory over all model summaries, the Pyramid score of a system generated summary is obtained based on the number and weights of the SCUs in the summary. Nenkova and Passonneau (2004) observe that the number of SCUs grows as the number of model summaries increases, confirming a similar observation by van Halteren and Teufel (2003) , supporting the claim that different judges deem different facts as important.", |
|
"cite_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 384, |
|
"text": "Nenkova and Passonneau (2004)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 533, |
|
"text": "Halteren and Teufel (2003)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pyramid evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Finding SCUs in model summaries and then matching them to system summaries is an expensive semantic judgment task. Once created, the SCU inventory can be used to assign an importance weight to any sentence in a system generated extractive summary based on the weights of SCUs in it. Our methodology provides a cheaper method for assigning importance weight for each sentence in a document. In ARTEMIS, multiple judges select each sentence for multiple summaries at paragraph, section, document, and short-summary levels. These judgments provide a low-cost way of obtaining an importance weight for a sentence, without expensive SCU annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pyramid evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Recent summarization evaluations are done using large scale datasets collected automatically from the web. Most of these datasets are from the news domain, including CNN/DailyMail (Nallapati et al., 2016) , NEWSROOM (Grusky et al., 2018) , New York Times (Sandhaus, 2008) and Gigaword (Rush et al., 2015) . Some of the other domains investigated are scientific articles (Cohan et al., 2018) , patents (Sharma et al., 2019) , and Reddit stories (Kim et al., 2019) . Datasets built from naturally occurring summaries found online tend to focus on domains for which manually written summaries are easily available such as news and scientific articles. These datasets are not sufficient for building a multidomain document summarization application. Additionally, given the nature of data collection, often only a single summary is available for each document. This makes error analysis of individual examples difficult because different judges might deem different information as summary-worthy (Louis and Nenkova, 2013) Summaries collected from online sources are also prone to noise. Kryscinski et al. (2019) manually inspected CNN/DailyMail and NEWSROOM datasets and found that the problem of noisy data affects upto 5.92% of the summaries in different splits. Examples of noise they found include links to other articles and news sources, placeholder texts, unparsed HTML code, and non-informative passages in the reference summaries. In ARTEMIS, such noisy text is excluded from annotation by explicit labeling of defective sentences. Hardy et al. (2019) proposed a new summarization evaluation approach called HIGHRES, which uses multiple judges to highlight salient information in original documents. Once the highlights are obtained, a system summary can be evaluated manually by asking judges to compare the system summary against highlights, or by a modified ROUGE evaluation that weighs n-grams by the number of times they were highlighted. HIGHRES is complementary to our hierarchical annotation approach and both the methods can be used together for obtaining rich summary annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 204, |
|
"text": "(Nallapati et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 237, |
|
"text": "(Grusky et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 271, |
|
"text": "(Sandhaus, 2008)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 304, |
|
"text": "(Rush et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 390, |
|
"text": "(Cohan et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 422, |
|
"text": "(Sharma et al., 2019)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 462, |
|
"text": "(Kim et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 992, |
|
"end": 1017, |
|
"text": "(Louis and Nenkova, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1083, |
|
"end": 1107, |
|
"text": "Kryscinski et al. (2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1537, |
|
"end": 1556, |
|
"text": "Hardy et al. (2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recent Trends in Summarization Evaluation", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We present analysis on a sample dataset of 532 Microsoft Word documents crawled from the web with no domain restrictions, thus creating an opendomain dataset. We extracted the text from the Word documents for our annotation. The annotation framework does not rely on Word document format and can be used to annotate any document for which the raw text can be extracted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated Data Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The data was annotated by a set of managed judges who were trained extensively for ARTEMIS annotation process using detailed guidelines and illustrative examples. For additional quality control, we used a set of gold documents annotated by the development team for initial qualification tests for the judges as well as their ongoing evaluation. We divided the sample dataset into train, dev and test partitions, as shown in Table 3 : Average number of paragraphs and sections per document and the average number of sentences in each, along with the 95% confidence interval.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 431, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotated Data Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "stated, the statistics presented are computed over the dev partition. Table 3 shows how the sentences of a document are divided across paragraphs and sections for the annotations. On an average, there are about 7 sections and 15 paragraphs in each document. The number of sentences in each section averages about 12, while the number of sentences in each paragraph averages about 5. Note that when summarizing a section, a judge has to look at much smaller number of sentences than 12, thanks to the hierarchical annotation process.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 77, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotated Data Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To understand where the salient sentences lie for the documents, we divide each document into 10 equally sized bins and plot what fraction of sentences selected for the doc-level summaries lie in each bin. Each bin on an average contains 8.34 \u00b1 0.38 sentences. Figure 3 shows the distribution of sentences selected for the doc-level summaries across the bins. More than 50% of the selected sentences lie in the first bin and more than 90% of the sentences lie in the first five bins. This shows that there is a bias for the summary sentences to be towards the first half of a document. However, the annotators don't form summaries by just selecting the first few sentences, as shown by the poor ROUGE-F1 scores obtained by the Lead-3 baseline in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distribution Statistics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Another characterization of the annotation system can be done based on what fraction of salient sentences selected at each stage make it to the next stage. Table 4 shows this for all the stages of annotation. Looking at the diagonal first, we see that 82.44% of the sentences selected as salient for paragraph-level summaries are also selected for section-level summaries, but only 69.57% of the sentences selected for section-level summaries are selected for document-level summaries. From document-level summaries to short summary level, again 84.57% of the salient sentences are kept. This shows that a larger number of sentences get filtered between the section and document level. Overall, only 48.51% of the sentences selected for paragraph-level summaries are used for the final three-sentence short summaries.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 163, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distribution Statistics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We compute Krippendorff's alpha over the entire annotated document set by treating each of paragraph, section, document and short summary level judgements as ordinal ratings. Across the set of all judges, the Krippendorff's alpha is 0.46. This is consistent with previous findings that summary content selection is a subjective task with moderate agreement (Mani, 2001 ). Nenkova and Passonneau (2004) report a Krippendorff's alpha of 0.81 for their annotations. However, they measure agreement on the task of assigning SCU's to words, which is a less subjective task than assigning importance to a content unit. They also use a distance metric for computing Krippendorff's alpha that takes into account SCU size, which is not described in detail in their paper. For additional agreement evaluation, we had 10 documents evaluated by two sets of judges. The first set of judges was comprised of 4 developers involved in the design of ARTEMIS and its guide-# Paragraph Section Document Short 1", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 368, |
|
"text": "(Mani, 2001", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 401, |
|
"text": "Nenkova and Passonneau (2004)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Agreement Statistics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "11.2 \u00b1 1.0 9.4 \u00b1 0.8 6.5 \u00b1 0.4 5.4 \u00b1 0.3 2 5.1 \u00b1 0.6 4.1 \u00b1 0.5 2.8 \u00b1 0.3 2.4 \u00b1 0.2 3 2.5 \u00b1 0.4 2.0 \u00b1 0.3 1.5 \u00b1 0.2 1.3 \u00b1 0.2 Table 5 : Average number of salient sentences at each stage corresponding to the minimum number of judges needed to mark a sentence as salient (out of a total of five judges) along with 95% confidence intervals.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 132, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Agreement Statistics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "lines. The second set of judges was comprised of 5 managed judges trained for doing the annotations. For each set of judges, a sentence was considered to be selected for document-level summary if at least 2 judges selected it. Given these judgements, the Kappa score between the two sets of judges was 0.43, which is considered moderate agreement (Landis and Koch, 1977) . Table 5 shows the average number of sentences selected at the different annotation levels if we use a minimum of 1, 2, or 3 judges to mark a sentence as salient out of the 5 total judges that annotate each document. We see that with 2 judges, there is agreement for 2.4 sentences for the final short summary, which is restricted to 3 sentences per judge. Even with 3 judges, there is agreement on 1.3 sentences for the final short-summary level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 370, |
|
"text": "(Landis and Koch, 1977)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 373, |
|
"end": 380, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Agreement Statistics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We evaluate a number of baseline methods on the sample annotated document set, partitioned into train, dev and test as described in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 139, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For these experiments, the document-level summary created by each judge for a document is treated as an independent reference summary and we evaluate the candidate summary against all the reference summaries using ROUGE-F scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Lead-3 baseline selects first three sentences of a document as the summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Oracle scores are obtained using a jackknifed procedure. Reference summary from each judge is considered a predicted summary and evaluated against all the other reference summaries for the document. The Oracle ROUGE score is computed by averaging the scores for all judge summaries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Cheng&Lapata (Cheng and Lapata, 2016) is an encoder-decoder summarization model where each sentence is first encoded using a CNN (Convolutional Neural Network). These sentence level encodings are then passed through an RNN (Recurrent Neural Network) to create contextual encodings for each sentence. The encoding for the final sentence of the document is fed into the decoder, which uses another RNN with attention over input sentence encodings to predict the label for each sentence. At each decoding step, the decoder state also depends on the probability of the previous sentence being part of summary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 39, |
|
"text": "(Cheng and Lapata, 2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 SummaRunner (Nallapati et al., 2017 ) uses a hierarchical RNN to compute contextual encodings for each sentence in the input. These encodings are average pooled and passed through a non-linear transformation to create an encoding for the document. In a second pass, a logistic layer makes a binary decision for each sentence based on the sentence encodings, the document representation as well as factors modeling previously selected summary sentences and sentence position.", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 37, |
|
"text": "(Nallapati et al., 2017", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Seq2SeqRNN is a method introduced in Kedzie et al. (2018) that uses an RNN to encode the input sentences. A separate RNN based decoder is used to transform each sentence into a query vector which attends to the encoder output. The attention weighted encoder output and the decoder GRU output are used together to predict the output label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We used the code released by Kedzie et al. (2018) for reproducing Cheng&Lapata, SummaRunner and Seq2SeqRNN systems. The ROUGE-F score for each system on the test data is shown in Table 6 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 49, |
|
"text": "Kedzie et al. (2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 186, |
|
"text": "Table 6", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The Lead baseline achieves a ROUGE-1 score of 44.94, which is significantly lower than the other systems as well as the Oracle. This shows that compared to news summarization, selecting the first few sentences is a much weaker baseline for open-domain summarization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The SummaRunner system does better than Cheng&Lapata, potentially due to its incorporating multiple signals for content, salience, novelty and position. Seq2SeqRNN performs the best, which is consistent with the results reported in Kedzie et al. (2018) . There is still a gap between these systems and the Oracle method, which achieves a ROUGE-1 score of 73.28.", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 252, |
|
"text": "Kedzie et al. (2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we described ARTEMIS, a novel hierarchical annotation methodology for indicative, extractive summarization. We described the annotation process in detail and compared it with Relative Utility, DUC evaluation methodology, the Pyramid method as well as other recent methods for summary content evaluation. We also presented analysis over a sample annotated dataset to characterize various properties of annotation process such as distribution of salient sentences and judge agreement. Finally, we showed experimental results for a set of baseline summarization systems using the annotated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Indicative summaries are useful in a number of scenarios involving information triage such as document management and information retrieval systems. However, summarization models for such systems need to be able to summarize documents from multiple domains. Most existing summarization datasets are single-domain and focused towards news, and hence are not sufficient for training and evaluating models for these applications. ARTEMIS provides a low-cost methodology for annotating multi-domain indicative summaries compared to systems such as Pyramid and Relative Utility while producing similarly rich annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "ARTEMIS summary annotations contain sentences that provide information about important topics in the document. The summaries are indicative because they do not aim to convey all the important points for a given information need, but instead, give a sense of what topics are covered in the document. The set of annotations in ARTEMIS can be seen as a coarse partitioning between important and non-important sentences in an input document. Thus, models trained on these annotations can also be used as an importance signal in a larger pipeline for creating informative summaries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "* Authors can emphasize sentences either through formatting or discourse cues.\u2020 The judge can also mark incomplete and grammatically incorrect sentences as defective, which are not counted when computing the minimum threshold for paragraph-summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Using lexical chains for text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the ACL Workshop on Intelligent Scalable Text Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay and Michael Elhadad. 1997. Using lexical chains for text summarization. In In Proceed- ings of the ACL Workshop on Intelligent Scalable Text Summarization, pages 10-17.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "From ranknet to lambdarank to lambdamart: An overview", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Chris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Burges", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris J.C. Burges. 2010. From ranknet to lambdarank to lambdamart: An overview. Technical Report MSR-TR-2010-82.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural summarization by extracting sentences and words", |
|
"authors": [ |
|
{ |
|
"first": "Jianpeng", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "484--494", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1046" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianpeng Cheng and Mirella Lapata. 2016. Neural sum- marization by extracting sentences and words. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 484-494, Berlin, Germany. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A discourse-aware attention model for abstractive summarization of long documents", |
|
"authors": [ |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soon", |
|
"middle": [], |
|
"last": "Doo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "615--621", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2097" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arman Cohan, Franck Dernoncourt, Doo Soon Kim, Trung Bui, Seokhwan Kim, Walter Chang, and Na- zli Goharian. 2018. A discourse-aware attention model for abstractive summarization of long docu- ments. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 2 (Short Papers), pages 615-621, New Orleans, Louisiana. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Overview of duc 2005", |
|
"authors": [ |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Document Understanding Conf. Wksp. 2005 (DUC 2005) at the Human Language Technology Conf./Conf. on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoa Trang Dang. 2005. Overview of duc 2005. In In Proceedings of the Document Understanding Conf. Wksp. 2005 (DUC 2005) at the Human Language Technology Conf./Conf. on Empirical Methods in Natural Language Processing (HLT/EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "New methods in automatic extracting", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Edmundson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "J. ACM", |
|
"volume": "16", |
|
"issue": "2", |
|
"pages": "264--285", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/321510.321519" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. P. Edmundson. 1969. New methods in automatic extracting. J. ACM, 16(2):264-285.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "English gigaword. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2003. English gigaword. Linguistic Data Consortium, Philadelphia, 4(1):34.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Grusky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mor", |
|
"middle": [], |
|
"last": "Naaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "708--719", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Grusky, Mor Naaman, and Yoav Artzi. 2018. Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 708-719, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Examining the consensus between human summaries: initial experiments with factoid analysis", |
|
"authors": [ |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Hans Van Halteren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Teufel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the HLT-NAACL 03 Text Summarization Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hans van Halteren and Simone Teufel. 2003. Examin- ing the consensus between human summaries: initial experiments with factoid analysis. In Proceedings of the HLT-NAACL 03 Text Summarization Workshop, pages 57-64.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "HighRES: Highlight-based reference-less evaluation of summarization", |
|
"authors": [ |
|
{ |
|
"first": "Hardy", |
|
"middle": [], |
|
"last": "Hardy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3381--3392", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1330" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hardy Hardy, Shashi Narayan, and Andreas Vlachos. 2019. HighRES: Highlight-based reference-less evaluation of summarization. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 3381-3392, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automated text summarization in summarist", |
|
"authors": [ |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eduard H. Hovy and Chin-Yew Lin. 1997. Automated text summarization in summarist. In ACL 1997.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Using the annotated bibliography as a resource for indicative summarization", |
|
"authors": [ |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judith", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Klavans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of LREC 2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min-Yen Kan, Judith L. Klavans, and Kathleen R. Mck- eown. 2002. Using the annotated bibliography as a resource for indicative summarization. In In Pro- ceedings of LREC 2002, Las.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Applying natural language generation to indicative summarization", |
|
"authors": [ |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judith", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Klavans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the EACL Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min-Yen Kan, Kathleen R. McKeown, and Judith L. Klavans. 2001a. Applying natural language gener- ation to indicative summarization. In In Proc. of the EACL Workshop on Natural Language Generation, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Domain-specific informative and indicative summarization for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judith", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Klavans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Workshop on text summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1629--1636", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min-Yen Kan, Kathleen R. McKeown, and Judith L. Klavans. 2001b. Domain-specific informative and indicative summarization for information retrieval. In In: Workshop on text summarization (DUC 2001, pages 1629-1636.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Content selection in deep learning models of summarization", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Kedzie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1818--1828", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1208" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Kedzie, Kathleen McKeown, and Hal Daum\u00e9 III. 2018. Content selection in deep learning models of summarization. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing, pages 1818-1828, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Abstractive summarization of Reddit posts with multi-level memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Byeongchang", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyunwoo", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2519--2531", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1260" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byeongchang Kim, Hyunwoo Kim, and Gunhee Kim. 2019. Abstractive summarization of Reddit posts with multi-level memory networks. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2519-2531, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Neural text summarization: A critical evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Kryscinski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Shirish Keskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mc-Cann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wojciech Kryscinski, Nitish Shirish Keskar, Bryan Mc- Cann, Caiming Xiong, and Richard Socher. 2019. Neural text summarization: A critical evaluation. ArXiv, abs/1908.08960.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A trainable document summarizer", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Kupiec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francine", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the 18th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR 95", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/215206.215333" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Kupiec, Jan Pedersen, and Francine Chen. 1995. A trainable document summarizer. In Proceedings of the 18th Annual International ACM SIGIR Con- ference on Research and Development in Informa- tion Retrieval, SIGIR 95, page 6873, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The measurement of observer agreement for categorical data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Landis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Koch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Biometrics", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/2529310" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "JR Landis and GG Koch. 1977. The measurement of observer agreement for categorical data. Biometrics, 33(1):159174.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Looking for a few good metrics: Automatic summarization evaluation -how many samples are enough?", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004a. Looking for a few good metrics: Automatic summarization evaluation -how many samples are enough?", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "ROUGE: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text Summarization Branches Out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004b. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Automatically assessing machine summary content without a gold standard", |
|
"authors": [ |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "2", |
|
"pages": "267--300", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/COLI_a_00123" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annie Louis and Ani Nenkova. 2013. Automatically assessing machine summary content without a gold standard. Computational Linguistics, 39(2):267- 300.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Summarization evaluation: An overview", |
|
"authors": [ |
|
{ |
|
"first": "Inderjeet", |
|
"middle": [], |
|
"last": "Mani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Inderjeet Mani. 2001. Summarization evaluation: An overview.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Summarunner: A recurrent neural network based sequence model for extractive summarization of documents", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Thirty-First AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Feifei Zhai, and Bowen Zhou. 2017. Summarunner: A recurrent neural network based se- quence model for extractive summarization of docu- ments. In Thirty-First AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Abstractive text summarization using sequence-to-sequence RNNs and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cicero Dos Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Aglar Gul\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "280--290", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K16-1028" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Bowen Zhou, Cicero dos Santos, \u00c7 aglar Gul\u00e7ehre, and Bing Xiang. 2016. Abstrac- tive text summarization using sequence-to-sequence RNNs and beyond. In Proceedings of The 20th SIGNLL Conference on Computational Natural Lan- guage Learning, pages 280-290, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Automatic summarization", |
|
"authors": [ |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ani Nenkova and Kathleen McKeown. 2011. Auto- matic summarization.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Evaluating content selection in summarization: The pyramid method", |
|
"authors": [ |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Passonneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "145--152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ani Nenkova and Rebecca Passonneau. 2004. Evaluat- ing content selection in summarization: The pyra- mid method. In Proceedings of the Human Lan- guage Technology Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 145-152, Boston, Massachusetts, USA. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "379--389", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 379-389, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Generating indicative-informative summaries with sumum", |
|
"authors": [ |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "Saggion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Lapalme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computational Linguistics", |
|
"volume": "28", |
|
"issue": "4", |
|
"pages": "497--526", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/089120102762671963" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Horacio Saggion and Guy Lapalme. 2002. Generat- ing indicative-informative summaries with sumum. Computational Linguistics, 28(4):497-526.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "he new york times annotated corpus ldc2008t19", |
|
"authors": [ |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Sandhaus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evan Sandhaus. 2008. he new york times annotated corpus ldc2008t19.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "BIG-PATENT: A large-scale dataset for abstractive and coherent summarization", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2204--2213", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1212" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Sharma, Chen Li, and Lu Wang. 2019. BIG- PATENT: A large-scale dataset for abstractive and coherent summarization. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 2204-2213, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Single-document and multi-document summary evaluation using relative utility", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Tam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dragomir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunes", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Erkan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Tam, Dragomir R. Radev, and Gunes Erkan. 2007. Single-document and multi-document sum- mary evaluation using relative utility. Technical Re- port CSE-TR-538-07, University of Michigan. De- partment of Electrical Engineering and Computer Science.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Distribution of sentences selected for doclevel summaries across 10 equally sized bins for each document.", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Sample dataset used for the data analysis in this paper.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>rich summary annotations for open-domain docu-</td></tr><tr><td>ments with multiple judges.</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "Unless otherwise", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Partition</td><td>Average Count</td><td>Average number</td></tr><tr><td/><td>Per Document</td><td>of sentences</td></tr><tr><td>Section</td><td>6.92 \u00b1 0.91</td><td>12.04 \u00b1 0.72</td></tr><tr><td colspan=\"2\">Paragraph 15.25 \u00b1 2.17</td><td>5.46 \u00b1 0.06</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Filtration ratios for salient sentences between</td></tr><tr><td>different stages. For example, the first row (Paragraph)</td></tr><tr><td>shows what percentage of sentences selected at para-</td></tr><tr><td>graph level survive till section, document and short-</td></tr><tr><td>summary level. Table cells corresponding to filtration</td></tr><tr><td>between same or out-of-order stages in the pipeline are</td></tr><tr><td>colored gray.</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"text": "Results for different baselines on the test data.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |