|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:35:22.159663Z" |
|
}, |
|
"title": "Towards Human-Centered Summarization: A Case Study on Financial News", |
|
"authors": [ |
|
{ |
|
"first": "Tatiana", |
|
"middle": [], |
|
"last": "Passali", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Aristotle University of Thessaloniki", |
|
"location": {} |
|
}, |
|
"email": "scpassali@csd.auth.gr" |
|
}, |
|
{ |
|
"first": "Alexios", |
|
"middle": [], |
|
"last": "Gidiotis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Aristotle University of Thessaloniki", |
|
"location": {} |
|
}, |
|
"email": "gidiotis@csd.auth.gr" |
|
}, |
|
{ |
|
"first": "Efstathios", |
|
"middle": [], |
|
"last": "Chatzikyriakidis", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "stathis.chatzikyriakidis@medoid.ai" |
|
}, |
|
{ |
|
"first": "Grigorios", |
|
"middle": [], |
|
"last": "Tsoumakas", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Aristotle University of Thessaloniki", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent Deep Learning (DL) summarization models greatly outperform traditional summarization methodologies, generating highquality summaries. Despite their success, there are still important open issues, such as the limited engagement and trust of users in the whole process. In order to overcome these issues, we reconsider the task of summarization from a human-centered perspective. We propose to integrate a user interface with an underlying DL model, instead of tackling summarization as an isolated task from the end user. We present a novel system, where the user can actively participate in the whole summarization process. We also enable the user to gather insights into the causative factors that drive the model's behavior, exploiting the self-attention mechanism. We focus on the financial domain, in order to demonstrate the efficiency of generic DL models for domain-specific applications. Our work takes a first step towards a modelinterface co-design approach, where DL models evolve along user needs, paving the way towards human-computer text summarization interfaces.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent Deep Learning (DL) summarization models greatly outperform traditional summarization methodologies, generating highquality summaries. Despite their success, there are still important open issues, such as the limited engagement and trust of users in the whole process. In order to overcome these issues, we reconsider the task of summarization from a human-centered perspective. We propose to integrate a user interface with an underlying DL model, instead of tackling summarization as an isolated task from the end user. We present a novel system, where the user can actively participate in the whole summarization process. We also enable the user to gather insights into the causative factors that drive the model's behavior, exploiting the self-attention mechanism. We focus on the financial domain, in order to demonstrate the efficiency of generic DL models for domain-specific applications. Our work takes a first step towards a modelinterface co-design approach, where DL models evolve along user needs, paving the way towards human-computer text summarization interfaces.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The ever increasing amount of online text documents, such as blog posts, newswire articles and academic publications, during the last decades, has created the urgent need for appropriate natural language understanding tools. Summarization, i.e., shortening an initial text document by keeping only the most important information, plays a key role in addressing this information overload.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A lot of sophisticated summarization models have been proposed in the past, with a recent focus on Deep Learning (DL) architectures. DL models (See et al., 2017; Kry\u015bci\u0144ski et al., 2018; Celikyilmaz et al., 2018; Chen and Bansal, 2018; Liu and Lapata, 2019; Song et al., 2019; Zhang et al., 2020) achieve great results in the task of summarization, outperforming most of the previously used methods. Typical DL models involve sequence to sequence architectures with RNNs (Nallapati et al., 2016; See et al., 2017) often combined with attention mechanisms (Luong et al., 2015; Bahdanau et al., 2015) , as well as Transformers (Vaswani et al., 2017; Lewis et al., 2020; Raffel et al., 2020a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 161, |
|
"text": "(See et al., 2017;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 186, |
|
"text": "Kry\u015bci\u0144ski et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 212, |
|
"text": "Celikyilmaz et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 235, |
|
"text": "Chen and Bansal, 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 257, |
|
"text": "Liu and Lapata, 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 276, |
|
"text": "Song et al., 2019;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 296, |
|
"text": "Zhang et al., 2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 471, |
|
"end": 495, |
|
"text": "(Nallapati et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 513, |
|
"text": "See et al., 2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 575, |
|
"text": "(Luong et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 598, |
|
"text": "Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 647, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 648, |
|
"end": 667, |
|
"text": "Lewis et al., 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 689, |
|
"text": "Raffel et al., 2020a)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Despite the success of DL models, some significant challenges remain. The low interpretability of these models (Brunner et al., 2020; Vig and Belinkov, 2019; Serrano and Smith, 2019; Vashishth et al., 2019 ) is a major drawback that limits significantly the trust of users in the whole process.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 133, |
|
"text": "(Brunner et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "Vig and Belinkov, 2019;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 182, |
|
"text": "Serrano and Smith, 2019;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 205, |
|
"text": "Vashishth et al., 2019", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition, existing pipelines do not adequately engage the human in the summarization process (Trivedi et al., 2018; Shapira et al., 2017) , providing isolated and static predictions. The engagement of users and their feedback in the whole process can be a key factor in creating high-quality models and improving the quality of existing models (Stiennon et al., 2020; Ghandeharioun et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 118, |
|
"text": "(Trivedi et al., 2018;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 140, |
|
"text": "Shapira et al., 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 370, |
|
"text": "(Stiennon et al., 2020;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 398, |
|
"text": "Ghandeharioun et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To overcome the above limitations, we revisit the task of neural summarization from a humancentered perspective and with a unifying view of user interfaces and underlying summarization models. More specifically, we present a system that allows the active involvement of the user, setting the basis for human-computer text summarization interfaces. Our system allows users to choose over different decoding strategies and control the number of alternative summaries that are generated. Users can give their feedback by combining parts of the different generated summaries as a target summary for the corresponding input. These summaries are recorded, and can then be used as additional training examples, which in turn will improve the performance of the model and customize it to the preferences of the users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition, our system provides useful insights about the inner workings of the model, based on the self-attention mechanism of Transformers. Knowing which parts of the source document are most important for the generation of the final summary, can build up the trust between users and the machine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present a case study of the proposed system on the challenging, domain-specific task of financial articles summarization, to demonstrate the ability of the suggested approach to successfully employ generic DL models for domain-specific applications that often have different requirements. Indeed, domain-focused summarization models (Kan et al., 2001; Reeve et al., 2007) are generally more challenging, as they require deeper knowledge of the specific domain intricacies in order to generate salient summaries with logical entailment. To this end, we compiled a novel financial-focused dataset, which consists exclusively of financial articles from Bloomberg 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 354, |
|
"text": "(Kan et al., 2001;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 374, |
|
"text": "Reeve et al., 2007)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is structured as follows. The main features of the proposed human-centered system are detailed in Section 2. The case study on financial news summarization is presented in Section 3. Finally, conclusions and interesting future research directions are discussed in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section we will introduce the main features of our human-centered summarization system. We first present the approach used for interpreting the summaries generated by the model. Then we present the different decoding strategies we employ during inference. Finally, we explain how users can interact with our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HCI meets Summarization", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our interface assumes the existence of a Transformer-based model with self-attention (Vaswani et al., 2017) , which are the backbone of most modern summarization approaches. To provide insights into the produced summaries, we exploit the fact that the self-attention mechanism offers an implicit explanation about the factors that drive the behavior of the model. In particular, it helps the model identify input-output text dependencies by focusing on different parts of the input in order to generate the final sequence representation. This mechanism is typically combined with multiple attention heads. The attention weights of each head are concatenated with each other to compute the final weights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 107, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Peeking into the Black Box", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Extracting the weights of each encoder layer separately, gives us useful insights about the model's behavior. In particular, we observe that different layers give us different types of insights regarding the way that the model perceives natural language. The first layers tend to focus on named entities and phrases taking a whole picture of the text, while the last layers attend additionally prepositions and articles in order to learn the language structure. In order to provide an overview of the model, we average all the self-attention layers along with all their attention heads, giving the user an overall picture regarding the model's learning process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Peeking into the Black Box", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Assuming that a word which is attended by many words is more salient for the final decision of the model, we highlight the words according to their self-attention weights. Thus, high-weight words are strongly highlighted, while lower-weight words are faintly highlighted. This allows users to get a glimpse of where the model focuses on to generate the final summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Peeking into the Black Box", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The selection of the right decoding strategy during inference can play a critical role in the whole process as it greatly affects the quality of a model's predictions (Holtzman et al., 2020) , with different decoding strategies exhibiting different behaviors (Ippolito et al., 2019) . Some decoding strategies, such as greedy search, suffer from redundancy issues (Shao et al., 2017) , while others, such as beam search, might generate almost identical hypotheses among the different generated beams (Gimpel et al., 2013) . Beam search is widely used in generative models, but there are also attempts that utilize other decoding mechanisms, such as top-k sampling (Fan et al., 2018b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 190, |
|
"text": "(Holtzman et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 282, |
|
"text": "(Ippolito et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 383, |
|
"text": "(Shao et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 521, |
|
"text": "(Gimpel et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 664, |
|
"end": 683, |
|
"text": "(Fan et al., 2018b)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our system allows for the active involvement of users into the underlying summarization process, by offering them the opportunity to select among the following decoding strategies:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Random sampling selects randomly a token out of the word probability distribution. Often combined with a temperature parameter to control the entropy of the distribution (Ficler and Goldberg, 2017; Fan et al., 2018b; Caccia et al., 2020 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 199, |
|
"text": "(Ficler and Goldberg, 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 218, |
|
"text": "Fan et al., 2018b;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 238, |
|
"text": "Caccia et al., 2020", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Top-k sampling limits the space of possible next tokens to the top-k higher-ranked tokens of the distribution (Fan et al., 2018b ) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 130, |
|
"text": "(Fan et al., 2018b", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Top-p or nucleus sampling selects the next token from a subset of tokens with cumulative probability up from a predefined threshold p (Holtzman et al., 2020) . It can also be combined with top-k sampling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 159, |
|
"text": "(Holtzman et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Greedy search selects the token with the highest probability at each time step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Beam search selects not only the token with the highest probability at each time step, but also a number of tokens with the highest probability according to the beam width. The number of the final generated beams is equal to the beam width. Beam search with beam width set to 1 degenerates to greedy search.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Diverse beam search follows the beam search algorithm, but also adds a diversity penalty to enhance the diversity between the top most probable generated beams (Vijayakumar et al., 2016).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Strategies", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The interaction of a user with our system consists of the following steps. It starts with the user entering the source text into a text box. Then users have the option to view the visualization of the attention weights, as well as choose a particular visualization color. Next users can select among the available decoding strategies, which also gives them the opportunity to change the default hyperparameters of each decoding strategy. Finally, they can click on a button to obtain the summaries. It is also possible for users to mix and match sentences from the alternative produced summaries, as well as enter their own text, in order to create a personalized summary. This summary can then be saved, and later be used for further fine-tuning of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interaction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In this section, we detail our experiments with the case study of financial summarization. We first describe the data collection process and the preprocessing steps we followed. Then we discuss the models that we constructed and their evaluation. Finally we discuss concrete examples of the user experience. The code and instructions for this case study of our system is publicly available 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study: Financial Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2 https://bit.ly/human-centered-summarization-notebook ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study: Financial Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We compiled a novel collection of financial news articles along with human-written summaries using the Bloomberg Market and Financial News API by RapidAPI 3 . The articles concern different financial and business categories, such as stocks, markets, currency, rates, cryptocurrencies and industries. We removed outlier documents, i.e., relatively small (up to 70 tokens) and very large (over 3,000 tokens) ones. As most of the summaries consist of two sentences, we also removed single-sentence summaries to maintain a consistent target structure. Table 1 presents some basic statistics about our dataset before and after this simple pre-processing pipeline.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 548, |
|
"end": 555, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use the recently proposed PEGASUS model (Zhang et al., 2020) , which is based on the transformer encoder-decoder architecture. It features 16 layers for both the encoder and the decoder, each of them with 16 attention heads. PEGASUS is already pre-trained on two large corpora, C4 (Raffel et al., 2020b) and HugeNews, and fine-tuned on 12 different downstream datasets. The model uses SentencePiece, a subword tokenizer (Kudo and Richardson, 2018) , which divides rare tokens into known subword units allowing for the efficient handling of unknown words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 63, |
|
"text": "(Zhang et al., 2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 450, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We experimented with two models fine-tuned on two different newswire datasets respectively, namely Extreme Summarization (XSum) (Narayan et al., 2018) and CNN/Daily Mail (Hermann et al., 2015) . We used the open-sourced weights of these models to initialize our summarizers, and then further fine-tuned them on the collected financial dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 150, |
|
"text": "(Narayan et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 192, |
|
"text": "CNN/Daily Mail (Hermann et al., 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We observed that both model variants quickly adapted to the new dataset, and after only a few training epochs they were capable of generating salient, non-redundant financially-focused summaries, which target explicit economic and business issues. Examples of the generated summaries before and after fine-tuning are shown in Figure 1 . Fine-tuning on our dataset, leads to an improvement in performance by approximately 10 ROUGE-1 (F1 score) points (Lin, 2004) for the XSum model, which is eventually used in our system. The evaluation results are shown in Table 2 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 462, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 335, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 566, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Fine-tuning R-1 R-2 R-L R-S R-1 R-2 R-L R-S", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "An example of the visualized self-attention weights is shown in Figure 2 . The model focuses on basic named entities of the source text, which are indeed important for the final generation. We also observe that different layer depths provide different insights regarding the model's learning process as shown in Figure 3 . For example, the first layers attempt to focus on every word of the input document in Figure 2 : Visualization of the encoder self-attention weights. The underscore before a token indicates the start of the token according to the subword tokenizer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 72, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 320, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 417, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Samples from the User Experience", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Layer 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Samples from the User Experience", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Layer 6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Samples from the User Experience", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Layer 16 order to capture phrases and sentences, while the last layers pay close attention to prepositions and articles attempting to learn language structure. An example of the output differentiation between different decoding strategies for the same input text is shown in Figure 4 . The different summaries that are generated by the model, demonstrate the value of selecting an appropriate decoding strategy for the final generation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 283, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Samples from the User Experience", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We presented a novel system for human-centered summarization that actively engages the user into the whole process of summarization, enabling personalized summaries and models. The users interact with the model, by entering a source text, selecting different decoding strategies, viewing a visualization of the model's attention and synthesizing a final summary from parts of multiple summaries, which can be used for further fine-tuning. We also presented a case study of our work, along with a novel dataset, on summarizing financial news. We observed that pre-trained PEGASUS models adapt quickly to our dataset, generating salient financially-focused summaries. Our work aims to inspire future research in human-centered techniques for neural summarization systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In future work, human involvement in the summarization process could be enhanced by using approaches that allow users to control different aspects of the generated summaries, such as length (Kikuchi et al., 2016; Liu et al., 2018; Takase and Okazaki, 2019; Fan et al., 2018a) , style (Fan et al., 2018a) or generation based on a specific entity of the text (He et al., 2020; Fan et al., 2018a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 212, |
|
"text": "(Kikuchi et al., 2016;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 230, |
|
"text": "Liu et al., 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 256, |
|
"text": "Takase and Okazaki, 2019;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 275, |
|
"text": "Fan et al., 2018a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 303, |
|
"text": "(Fan et al., 2018a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 374, |
|
"text": "(He et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 393, |
|
"text": "Fan et al., 2018a)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The interface we designed can be also further extended, allowing the user to evaluate the generated summaries, assessing different aspects of the text, such as salience, readability and coherence. Finally, more advanced approaches can be explored for leveraging the user submitted feedback in order to further improve the underlying model (Lertvittayakumjorn et al., 2020; Li et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 372, |
|
"text": "(Lertvittayakumjorn et al., 2020;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 389, |
|
"text": "Li et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://www.bloomberg.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://rapidapi.com/marketplace", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyung", |
|
"middle": [ |
|
"Hyun" |
|
], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyung Hyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the 2015 International Conference on Learning Rep- resentations.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "On identifiability in transformers", |
|
"authors": [ |
|
{ |
|
"first": "Gino", |
|
"middle": [], |
|
"last": "Brunner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Damian", |
|
"middle": [], |
|
"last": "Pascual", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Richter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Wattenhofer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gino Brunner, Yang Liu, Damian Pascual, Oliver Richter, Massimiliano Ciaramita, and Roger Watten- hofer. 2020. On identifiability in transformers. In 8th International Conference on Learning Represen- tations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Language gans falling short", |
|
"authors": [ |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Caccia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Caccia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Fedus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Larochelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Charlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Massimo Caccia, Lucas Caccia, William Fedus, Hugo Larochelle, Joelle Pineau, and Laurent Charlin. 2020. Language gans falling short. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep Communicating Agents for Abstractive Summarization", |
|
"authors": [ |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1662--1675", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1150" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asli Celikyilmaz, Antoine Bosselut, Xiaodong He, and Yejin Choi. 2018. Deep Communicating Agents for Abstractive Summarization. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers), pages 1662-1675, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Fast abstractive summarization with reinforce-selected sentence rewriting", |
|
"authors": [ |
|
{ |
|
"first": "Yen", |
|
"middle": [], |
|
"last": "Chun Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACL 2018 -56th Annual Meeting of the Association for Computational Linguistics, Proceedings of the Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p18-1063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yen Chun Chen and Mohit Bansal. 2018. Fast abstrac- tive summarization with reinforce-selected sentence rewriting. In ACL 2018 -56th Annual Meeting of the Association for Computational Linguistics, Pro- ceedings of the Conference (Long Papers).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Controllable abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--54", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2706" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, David Grangier, and Michael Auli. 2018a. Controllable abstractive summarization. In Proceed- ings of the 2nd Workshop on Neural Machine Trans- lation and Generation, pages 45-54, Melbourne, Australia. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Hierarchical neural story generation", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "889--898", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Mike Lewis, and Yann Dauphin. 2018b. Hierarchical neural story generation. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 889-898, Melbourne, Australia. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Controlling linguistic style aspects in neural language generation", |
|
"authors": [ |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Ficler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Workshop on Stylistic Variation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "94--104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-4912" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jessica Ficler and Yoav Goldberg. 2017. Controlling linguistic style aspects in neural language genera- tion. In Proceedings of the Workshop on Stylis- tic Variation, pages 94-104, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Approximating interactive human evaluation with self-play for open-domain dialog systems", |
|
"authors": [ |
|
{ |
|
"first": "Asma", |
|
"middle": [], |
|
"last": "Ghandeharioun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judy", |
|
"middle": [ |
|
"Hanwen" |
|
], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natasha", |
|
"middle": [], |
|
"last": "Jaques", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c0gata", |
|
"middle": [], |
|
"last": "Lapedriza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rosalind", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Picard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13658--13669", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asma Ghandeharioun, Judy Hanwen Shen, Natasha Jaques, Craig Ferguson, Noah Jones,\u00c0gata Lapedriza, and Rosalind W. Picard. 2019. Approxi- mating interactive human evaluation with self-play for open-domain dialog systems. In Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Sys- tems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pages 13658-13669.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A systematic exploration of diversity in machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Shakhnarovich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1100--1111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Gimpel, Dhruv Batra, Chris Dyer, and Gregory Shakhnarovich. 2013. A systematic exploration of diversity in machine translation. In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1100-1111.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Nazneen Rajani, and Caiming Xiong. 2020. Ctrlsum: Towards generic controllable text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Junxian", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Kry\u015bci\u0144ski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.04281" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junxian He, Wojciech Kry\u015bci\u0144ski, Bryan McCann, Nazneen Rajani, and Caiming Xiong. 2020. Ctrl- sum: Towards generic controllable text summariza- tion. arXiv preprint arXiv:2012.04281.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "1693--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefen- stette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. Advances in neural information processing systems, 28:1693-1701.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The curious case of neural text degeneration", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Holtzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Buys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxwell", |
|
"middle": [], |
|
"last": "Forbes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degeneration. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Comparison of diverse decoding methods from conditional language models", |
|
"authors": [ |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Ippolito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reno", |
|
"middle": [], |
|
"last": "Kriz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Kustikova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3752--3762", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1365" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daphne Ippolito, Reno Kriz, Jo\u00e3o Sedoc, Maria Kustikova, and Chris Callison-Burch. 2019. Com- parison of diverse decoding methods from condi- tional language models. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 3752-3762, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Domain-specific informative and indicative summarization for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judith", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Klavans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Workshop on text summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min-Yen Kan, Kathleen R McKeown, and Judith L Kla- vans. 2001. Domain-specific informative and indica- tive summarization for information retrieval. In In: Workshop on text summarization (DUC 2001. Cite- seer.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Controlling output length in neural encoder-decoders", |
|
"authors": [ |
|
{ |
|
"first": "Yuta", |
|
"middle": [], |
|
"last": "Kikuchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryohei", |
|
"middle": [], |
|
"last": "Sasano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroya", |
|
"middle": [], |
|
"last": "Takamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manabu", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1328--1338", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuta Kikuchi, Graham Neubig, Ryohei Sasano, Hiroya Takamura, and Manabu Okumura. 2016. Control- ling output length in neural encoder-decoders. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1328-1338, Austin, Texas. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improving abstraction in text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Kry\u015bci\u0144ski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Paulus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1207" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wojciech Kry\u015bci\u0144ski, Romain Paulus, Caiming Xiong, and Richard Socher. 2018. Improving abstraction in text summarization. In Proceedings of the 2018", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Conference on Empirical Methods in Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1808--1817", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Lan- guage Processing, pages 1808-1817, Brussels, Bel- gium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-2012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "FIND: human-in-the-loop debugging deep text classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Piyawat", |
|
"middle": [], |
|
"last": "Lertvittayakumjorn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Toni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "332--348", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.24" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piyawat Lertvittayakumjorn, Lucia Specia, and Francesca Toni. 2020. FIND: human-in-the-loop debugging deep text classifiers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, November 16-20, 2020, pages 332-348. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Dialogue learning with human-in-the-loop", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Alexander H. Miller, Sumit Chopra, Marc'Aurelio Ranzato, and Jason Weston. 2016. Di- alogue learning with human-in-the-loop. CoRR, abs/1611.09823.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Rouge: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text summarization branches out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Text Summarization with Pretrained Encoders", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3721--3731", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/d19-1387" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu and Mirella Lapata. 2019. Text Summariza- tion with Pretrained Encoders. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3721-3731.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Controlling length in abstractive summarization using a convolutional neural network", |
|
"authors": [ |
|
{ |
|
"first": "Yizhu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyi", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenny", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4110--4119", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1444" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yizhu Liu, Zhiyi Luo, and Kenny Zhu. 2018. Con- trolling length in abstractive summarization using a convolutional neural network. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing, pages 4110-4119, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natu- ral Language Processing, pages 1412-1421, Lis- bon, Portugal. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Abstractive Text Summarization using Sequence-tosequence RNNs and Beyond", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Cicero Dos Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "280--290", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K16-1028" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Bowen Zhou, Cicero dos Santos, Caglar G\u00fcl\u00e7ehre, and Bing Xiang. 2016. Ab- stractive Text Summarization using Sequence-to- sequence RNNs and Beyond. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 280-290, Stroudsburg, PA, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Shay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.08745" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Shay B Cohen, and Mirella Lap- ata. 2018. Don't give me the details, just the summary! topic-aware convolutional neural net- works for extreme summarization. arXiv preprint arXiv:1808.08745.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020a. Exploring the limits of transfer learning with a unified text-to-text trans- former. J. Mach. Learn. Res., 21:140:1-140:67.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020b. Exploring the limits of transfer learning with a unified text-to- text transformer. Journal of Machine Learning Re- search, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The use of domain-specific concepts in biomedical text summarization. Information Processing & Management", |
|
"authors": [ |
|
{ |
|
"first": "Hyoil", |
|
"middle": [], |
|
"last": "Lawrence H Reeve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brooks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "1765--1776", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lawrence H Reeve, Hyoil Han, and Ari D Brooks. 2007. The use of domain-specific concepts in biomedical text summarization. Information Pro- cessing & Management, 43(6):1765-1776.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Get To The Point: Summarization with Pointer-Generator Networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1073--1083", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1099" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J Liu, and Christopher D Man- ning. 2017. Get To The Point: Summarization with Pointer-Generator Networks. In Proceedings of the 2017 Annual Meeting of the Association for Compu- tational Linguistics, pages 1073-1083.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Is attention interpretable?", |
|
"authors": [ |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2931--2951", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sofia Serrano and Noah A. Smith. 2019. Is attention interpretable? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2931-2951, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Generating high-quality and informative conversation responses with sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Yuanlong", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Britz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Goldie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Kurzweil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2210--2219", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1235" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuanlong Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. 2017. Gen- erating high-quality and informative conversation re- sponses with sequence-to-sequence models. In Pro- ceedings of the 2017 Conference on Empirical Meth- ods in Natural Language Processing, pages 2210- 2219, Copenhagen, Denmark. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Interactive abstractive summarization for event news tweets", |
|
"authors": [ |
|
{ |
|
"first": "Ori", |
|
"middle": [], |
|
"last": "Shapira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hadar", |
|
"middle": [], |
|
"last": "Ronen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meni", |
|
"middle": [], |
|
"last": "Adler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yael", |
|
"middle": [], |
|
"last": "Amsterdamer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judit", |
|
"middle": [], |
|
"last": "Bar-Ilan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--114", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-2019" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ori Shapira, Hadar Ronen, Meni Adler, Yael Amster- damer, Judit Bar-Ilan, and Ido Dagan. 2017. Interac- tive abstractive summarization for event news tweets. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 109-114, Copenhagen, Den- mark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Abstractive text summarization using lstmcnn based deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Shengli", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haitao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tongxiao", |
|
"middle": [], |
|
"last": "Ruan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Multimedia Tools and Applications", |
|
"volume": "78", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11042-018-5749-3" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shengli Song, Haitao Huang, and Tongxiao Ruan. 2019. Abstractive text summarization using lstm- cnn based deep learning. Multimedia Tools and Ap- plications, 78.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Learning to summarize from human feedback", |
|
"authors": [ |
|
{ |
|
"first": "Nisan", |
|
"middle": [], |
|
"last": "Stiennon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Ouyang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Ziegler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chelsea", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Christiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2009.01325" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nisan Stiennon, Long Ouyang, Jeff Wu, Daniel M Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul Christiano. 2020. Learning to summarize from human feedback. arXiv preprint arXiv:2009.01325.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Positional encoding to control output sequence length", |
|
"authors": [ |
|
{ |
|
"first": "Sho", |
|
"middle": [], |
|
"last": "Takase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3999--4004", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1401" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sho Takase and Naoaki Okazaki. 2019. Positional en- coding to control output sequence length. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 3999-4004, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Nlpreviz: an interactive tool for natural language processing on clinical text", |
|
"authors": [ |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Trivedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phuong", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Chapman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janyce", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harry", |
|
"middle": [], |
|
"last": "Hochheiser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "J. Am. Medical Informatics Assoc", |
|
"volume": "25", |
|
"issue": "1", |
|
"pages": "81--87", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/jamia/ocx070" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gaurav Trivedi, Phuong Pham, Wendy W. Chapman, Rebecca Hwa, Janyce Wiebe, and Harry Hochheiser. 2018. Nlpreviz: an interactive tool for natural lan- guage processing on clinical text. J. Am. Medical Informatics Assoc., 25(1):81-87.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Attention interpretability across nlp tasks. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Shikhar", |
|
"middle": [], |
|
"last": "Vashishth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyam", |
|
"middle": [], |
|
"last": "Upadhyay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Singh Tomar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shikhar Vashishth, Shyam Upadhyay, Gaurav Singh Tomar, and Manaal Faruqui. 2019. Attention inter- pretability across nlp tasks. arXiv.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Analyzing the structure of attention in a transformer language model", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--76", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4808" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig and Yonatan Belinkov. 2019. Analyzing the structure of attention in a transformer language model. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 63-76, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Diverse beam search: Decoding diverse solutions from neural sequence models", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Ashwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Vijayakumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cogswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ramprasath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Selvaraju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Crandall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1610.02424" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashwin K Vijayakumar, Michael Cogswell, Ram- prasath R Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. 2016. Diverse beam search: Decoding diverse solutions from neural se- quence models. arXiv preprint arXiv:1610.02424.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Pegasus: Pre-training with extracted gap-sentences for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Jingqing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Saleh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11328--11339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Pe- ter Liu. 2020. Pegasus: Pre-training with extracted gap-sentences for abstractive summarization. In In- ternational Conference on Machine Learning, pages 11328-11339. PMLR.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Examples of generated summaries before and after fine-tuning.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Self-attention weights of layers 1, 6 and 16.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Output for different decoding strategies.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": "Dataset Statistics", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Initial Preprocessed</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "Evaluation Results. We measure the F1 scores for ROUGE-1, ROUGE-2, ROUGE-L and ROUGE-S.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>CNN/Daily Mail model</td><td>XSum model</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |