|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:25:39.006614Z" |
|
}, |
|
"title": "DiSCoL: Toward Engaging Dialogue Systems through Conversational Line Guided Response Generation", |
|
"authors": [ |
|
{ |
|
"first": "Sarik", |
|
"middle": [], |
|
"last": "Ghazarian", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zixi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": {} |
|
}, |
|
"email": "zixiliu@isi.edu" |
|
}, |
|
{ |
|
"first": "Tuhin", |
|
"middle": [], |
|
"last": "Chakrabarty", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Columbia University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": {} |
|
}, |
|
"email": "xuezhema@isi.edu" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": {} |
|
}, |
|
"email": "galstyan@isi.edu" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Having engaging and informative conversations with users is the utmost goal for opendomain conversational systems. Recent advances in transformer-based language models and their applications to dialogue systems have succeeded in generating fluent and human-like responses. However, those systems still lack control over the generation process toward producing contentful responses and achieving engaging conversations. To address this, we present DiSCoL (Dialogue Systems through Coversational Line guided response generation). DiSCoL is an open-domain dialogue system that leverages conversational lines (briefly convlines) as controllable and informative content-planning elements to guide the generation model in producing engaging and informative responses. Two primary modules in DiSCoL's pipeline are conditional generators trained for 1) predicting relevant and informative convlines for dialogue contexts and 2) generating high-quality responses conditioned on the predicted convlines. Users can also change the returned convlines to control the direction of the conversations toward topics that are more interesting for them. Through automatic and human evaluations, we demonstrate the efficiency of the convlines in producing engaging conversations.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Having engaging and informative conversations with users is the utmost goal for opendomain conversational systems. Recent advances in transformer-based language models and their applications to dialogue systems have succeeded in generating fluent and human-like responses. However, those systems still lack control over the generation process toward producing contentful responses and achieving engaging conversations. To address this, we present DiSCoL (Dialogue Systems through Coversational Line guided response generation). DiSCoL is an open-domain dialogue system that leverages conversational lines (briefly convlines) as controllable and informative content-planning elements to guide the generation model in producing engaging and informative responses. Two primary modules in DiSCoL's pipeline are conditional generators trained for 1) predicting relevant and informative convlines for dialogue contexts and 2) generating high-quality responses conditioned on the predicted convlines. Users can also change the returned convlines to control the direction of the conversations toward topics that are more interesting for them. Through automatic and human evaluations, we demonstrate the efficiency of the convlines in producing engaging conversations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Over the past decade, users have actively engaged with dialogue systems to fulfill a wide range of requirements. Task-oriented dialogue systems have assisted users in accomplishing specific tasks such as finding apartments (Gustafson et al., 2000) and restaurants (Gruenstein and Seneff, 2007) or even booking movie tickets (Li et al., 2017) . While, Open-domain dialogue systems have been extensively leveraged for psychotherapy counseling, entertainment, and even teaching foreign languages to users (Zhou et al., 2020; Oh et al., Figure 1: A dialogue context and its three responses generated based on DialoGPT and our proposed DiS-CoL system using originally inferred and manipulated convlines, respectively. DiSCoL leverages convlines (depicted in colored boxes) to guide the generation model to encapsulate those informative contents. Our demo enables the user to edit or remove the inferred convlines (shown in blue for edits and red for removal) to guide the conversation towards its desired directions. et al., 2020) . In this work, we focus on the second group.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 247, |
|
"text": "(Gustafson et al., 2000)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 293, |
|
"text": "(Gruenstein and Seneff, 2007)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 341, |
|
"text": "(Li et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 521, |
|
"text": "(Zhou et al., 2020;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 532, |
|
"text": "Oh et al.,", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1012, |
|
"end": 1025, |
|
"text": "et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the context of open-domain dialogue systems, neural-network-based generative models have outperformed retrieval-based systems by generating diverse and novel responses. More recently, largescale language models with transformer-based architectures, such as GPT-2 (Radford et al., 2019) and BART , have advanced the state of the art in Natural Language Generation and Dialogue Systems. Such models can be further enhanced by fine-tuning them on task-specific data, as it is the case of DialoGPT (dialogue generative pre-trained transformer) (Zhang et al., 2019) , a neural conversational response generation model, trained on 147M conversation-like exchanges extracted from Reddit. Although responses generated by such models are fluent and locally coherent, they usually suffer from content poverty (e.g., generating non-informative content), which can negatively impact user engagement. Furthermore, these models do not allow the users to exert control on the generation process and guide the conversation to- To alleviate this issue, here we propose DiSCoL, an open-domain dialogue system, which leverages convlines as primary elements to add control for generating informative and content-rich responses. Convlines are abstract representations of utterances in the dialogues that can be used as content planning elements to form high-level content of an utterance and guide the generator to incorporate these informative units in the generation (See colored boxes in Figure 1 ). Content planning has been shown to be beneficial in the story generation task. These abstract representations known as storylines or story plots have been successful to guide the language models produce more coherent and fluent stories (Yao et al., 2019; Goldfarb-Tarrant et al., 2019; Fan et al., 2019; Goldfarb-Tarrant et al., 2020; Rashkin et al., 2020; Brahman et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 288, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 563, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1721, |
|
"end": 1739, |
|
"text": "(Yao et al., 2019;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1740, |
|
"end": 1770, |
|
"text": "Goldfarb-Tarrant et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1771, |
|
"end": 1788, |
|
"text": "Fan et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1789, |
|
"end": 1819, |
|
"text": "Goldfarb-Tarrant et al., 2020;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1820, |
|
"end": 1841, |
|
"text": "Rashkin et al., 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1842, |
|
"end": 1863, |
|
"text": "Brahman et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1473, |
|
"end": 1481, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "DiSCoL is composed of four main neuralnetwork-based modules (See Figure 3) . The first two modules are designed to extract entities and topics of the dialogue context. The third module is a fine-tuned conditional generator that learns to take the dialogue context and previously extracted information and predict convlines that would be leveraged in the response generator module. Similar to convline generator, response generator is a conditional auto-regressive language model that generates response conditioned on the dialogue context and its convlines, entities, and topics ex-tracted from previous modules. The middle block of Figure 1 exhibits the generated response for the inferred convlines shown in green boxes. In the interactive setting of our devised demo from which a snapshot is shown in Figure 2 , we provide the users with the facility to manipulate the predicted convlines to direct the conversation toward its topics of interest. The last block in Figure 1 depicts the removed and edited convlines (red and blue boxes) that led the generator to generate a slightly different response by taking into account the applied adjustments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 74, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 633, |
|
"end": 641, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 804, |
|
"end": 812, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 968, |
|
"end": 976, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We validate DiSCoL on the Topical chat dataset (Gopalakrishnan et al., 2019) using both human and automatic evaluations. Our results demonstrate the superiority of DiSCoL over DialoGPT in terms of generating higher quality responses, thus indicating the usefulness of convlines as dialogue control mechanisms for generating more engaging responses. We release the source code and trained models to facilitate the future dialogue research. 1", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 76, |
|
"text": "(Gopalakrishnan et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The architecture of our proposed DiSCoL demo system and its modules are depicted in Figure 3 . A user converses with the system by writing an utterance as an input. This utterance passes through all the modules and in each module some new information such as its extracted entities, topics, and convlines are augmented. The last module, response generator, incorporates all this information to generate a response as the output of the system. In this section, we explain each module in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 92, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "system Architecture", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "One of the principal components in the conversational systems is the set of entities that both interlocutors are interested to converse about. It is crucial that the system can identify the main entities from the dialogue context and try to continue the conversation by providing more relevant information or even expressing its opinions and impressions regarding them. Therefore, in DiSCoL we take the user's utterance as the dialogue context and extract its entities. This task is known as a named entity recognition (NER) task, where each token in the text is classified into one of the predefined classes such as a person, organization, location or other. Toward this goal, we leverage the BERT model (Devlin et al., 2019) fine-tuned on CoNLL-2003 dataset (Sang and De Meulder, 2003) , which is a well-known corpus for NER task. 2 We detokenize the output of the fine-tuned BERT model to get the original version of entities' tokens and disregard the predefined classes of entities since in our case they do not augment additional benefits. As shown in Figure 3 , all entities with labels other than O are returned from the entity extractor module.", |
|
"cite_spans": [ |
|
{ |
|
"start": 705, |
|
"end": 726, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 769, |
|
"text": "CoNLL-2003 dataset (Sang and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 787, |
|
"text": "De Meulder, 2003)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 833, |
|
"end": 834, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1057, |
|
"end": 1065, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Entity Extractor", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Knowing the topic that the user is enthusiastic to discuss is essential for the dialogue system to generate utterances about that specific topic. The blue box in Figure 3 represents the topic classifier that takes the user's utterance and predicts the most relevant topics from a predefined set. These topics are later used for predicting convlines and consequently generating responses.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 170, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topic Classifier", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Due to the proven effectiveness of the BERT model (Devlin et al., 2019) and its wide applicability in many classification tasks, we incorporate it into the topic classifier module of DiSCoL. We finetune BERT model on pairs of utterances and their aligned topics with the main goal of minimizing the cross-entropy loss.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 71, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Classifier", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "DiSCoL's main contribution is in the convline generator module that is depicted as the purple box in Figure 3 . Convlines are abstract representations or content plans of utterances throughout the conversation. These representations, which are also known as storylines or story plots in the context of story generation, have recently posited their efficiency in generating higher quality stories (Yao et al., 2019; Fan et al., 2019; Goldfarb-Tarrant et al., 2020; Rashkin et al., 2020) . Story generation models leverage plan-and-write framework that is successful in generating fluent and informative stories by the intervention of storylines as an intermediate step. In this work, we follow the same idea but in the context of conversational systems. In particular, we aim to show that the controlled generation of high-quality utterances by planning in advance and leveraging useful abstract-level convlines can be beneficial for dialogue systems as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 414, |
|
"text": "(Yao et al., 2019;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 432, |
|
"text": "Fan et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 463, |
|
"text": "Goldfarb-Tarrant et al., 2020;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 485, |
|
"text": "Rashkin et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 109, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To compose the convlines as the main component in the convline generator module, we extract sequences of important words in each utterance from existing human-human conversational data. We use the YAKE (Campos et al., 2018) method that relies on the text's statistical features to extract the most important keywords of an utterance, as it has shown its superiority over other state-of-theart unsupervised approaches such as TF-IDF and RAKE (Rose et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "(Campos et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 460, |
|
"text": "(Rose et al., 2010)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To train the convline generator, we extract pairs of (u i , r i ) as a set of consecutive pairs of dialogue context utterances and their corresponding groundtruth responses in the human-human conversational data. For each dialogue context utterance (u i ), we extract its entities (e i ) and topics (t i ) using the entity extractor and topic classifier modules. Each response (r i ) is replaced by its convlines (c i ) obtained by the YAKE algorithm. The constructed input data are in (u i , e i , t i , c i ) format.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The convline generator is a conditional model that generates the most probable convlines given the provided dialogue context utterance together with its entities and topics. To this end, we apply BART , which is a state-ofthe-art pre-trained sequence-to-sequence generative model. It combines a bidirectional encoder as that of BERT (Devlin et al., 2019) to encode the input and a GPT like (Radford et al., 2018) autoregressive decoder model to generate convlines as the output. The top block in Figure 4 encapsulates the training process of the convlines module. We fine-tune BART on the constructed training data with the objective of minimizing the negative log likelihood shown in Equation 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 354, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 412, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 496, |
|
"end": 504, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L line_gen = \u2212log n i=1 P (c i |u i , t i , e i )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "During inference, the fine-tuned BART model takes the user's utterance augmented with its inferred entities and topics to predict the most probable convlines, as depicted in the bottom block of Figure 4 . We use top-k sampling (Fan et al., 2019) with k = 5 and a temperature of 0.7 for the generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 246, |
|
"text": "(Fan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 203, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convline Generator", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The last module in DiSCoL system's pipeline is the response generator that is identical to convline generator except for the type of inputs and outputs. The response generator takes the dialogue context utterance, its convlines and topics as inputs and generates response conditioned on those data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Response Generator", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "L resp_gen = \u2212log n i=1 P (r i |u i , t i , c i ) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Response Generator", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "During training, we provide utterances, their topics and convlines extracted from YAKE to the BART model and fine-tune this pre-trained conditional generator. As it is shown in Equation 2, the training objective is to maximize the probability of generating ground-truth responses given their context utterances, topics, and the convlines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Response Generator", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "During inference, the generator attempts to produce the most probable responses that include convlines returned by the convline generator module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Response Generator", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We test our system on Topical-Chat dataset (Gopalakrishnan et al., 2019 ) that includes knowledge-grounded human-human conversations covering a set of 8 different topics. This dataset has been collected by employing Amazon Mechanical Turk (AMT) workers who have been provided with specific entities and some external knowledge (Wikipedia lead sections, Washington Post articles, or some Reddit fun facts) to chat about. Therefore, each utterance in the conversation is either based on provided knowledge sources or the user's personal knowledge. Overall, 261 popular entities spanning 8 various topics (Fashion, Sports, Books, Politics, General Entertainment, Music, Science & Technology and Movies) have been selected for the dataset collection. We add General topic for utterances (e.g. greetings) that do not include any specific contents such as \"hi, how are you today?\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 71, |
|
"text": "(Gopalakrishnan et al., 2019", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Although each utterance in the Topical-Chat dataset comes from either provided external knowledge or interlocutor's personal knowledge about some specified entities, it lacks determined topic labels, which are necessary for DiSCoL modules. To infer topics, we first manually match all 261 entities in the external knowledge to one of the topics in the predefined set (Fashion, Sports, Books, Politics, and etc.). Next, we label all utterances talking about those entities to their corresponding topics. This simple labeling scheme produces topics for about 78% of the 188,378 (easy_set) total utterances. As an example, the utterance \"Do you know Tom Brady\" is about \"Tom Brady\" entity that is an indication of the \"Sports\" topic. Therefore, we label this utterance with the \"Sports\" topic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Classification Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The remaining challenging utterances are mainly the continuation of the dialogue history without directly containing any entities. Take \"I guess they live up to their name then!\" as an example of such utterances with no mentioned entities. We pursue the following context-based heuristics to label such challenging_set utterances with their relevant topics. If the utterance's neighbors (utterances right before or after the current utterance) are from easy_set and both share the same entity, we assign that entity's topic to the current utterance, while in the case of neighbors containing different entities, we label the given utterance with both utterances' topics. If the previous rules do not apply to an utterance in the challenging_set, we use the most frequent topic in the dialog as its topic. In parallel to the above heuristics and in order to improve the quality of assigned topics, we also apply a keyword-based classifier that classifies challenging_set utterances with appropriate topics. The keyword-based classifier retrieves the most similar entity from the overall 261 entities to each utterance's keywords using their BERT embeddings. Then, the manually matched topics for the retrieved entity are assigned to the utterance. We only consider 5323 challenging_set utterances that their adapted labels based on both approaches: 1) context-based heuristics and 2) keyword-based classifier are the same (See statistics in Table 1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1440, |
|
"end": 1447, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topic Classification Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The remaining utterances shown in the last column of Table 1 are mainly general utterances for starting or ending conversations without any specific content such as \"Good Morning! How are you today?\" or \"It was nice chatting with you!\". We fine-tune the BERT model as the topic classifier for 10 epochs and get an accuracy of 85.55 on the validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 60, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topic Classification Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Convlines are the central components in the training of the DiSCoL system. We leverage YAKE (Campos et al., 2018) importance score to tokens in a text by following an unsupervised approach that builds upon features extracted from the text (Campos et al., 2018) . In this model, a set of features are computed for each term in the text. Subsequently, a list of candidates (n-grams of tokens) is created. Next, the Levenshtein distance is used to remove duplicate keywords. Finally, the aggregation of token scores in each keyword is used to represent the keyword's score. Keywords with lower scores are returned as the text's salient convlines. We use YAKE to generate a contiguous sequence of 1, 2, and 3-grams candidate convlines. We extract 3-grams convlines, followed by extracting 2-grams and 1-gram that are not included in the previously returned keywords.", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 113, |
|
"text": "(Campos et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 260, |
|
"text": "(Campos et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convline Generator Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We fine-tune BART-large for both convlines and response generator models for 3 epochs and checkpoint the best epoch based on validation perplexity. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convline Generator Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We evaluate the performance of DiSCoL system against DialoGPT, which is one of the strongest recent baselines that has shown its efficiency in generating consistent and relevant responses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To explore the efficiency of our proposed controlled response generation, we apply both automatic and human evaluations. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Due to the multi-faceted nature of dialogue quality, it is necessary to do the evaluation from different aspects (See et al., 2019; Mehri and Eskenazi, 2020) . To this end, we compare the quality of DiSCoL and DialoGPT generated responses through computing different metrics. We conduct automatic evaluations and compute evaluation metrics on 23,530 consecutive utterance pairs (dialogue context utterances and their ground-truth responses) of the Topical chat test set. The measured metrics are averaged over all utterance pairs within the test set. We compute BLEU-3 (Papineni et al., 2002) to evaluate the similarity of generated responses to ground-truth responses based on the 3-grams overlaps. Due to the one-to-many essence of opendomain dialogue systems and the imperfection of such word-overlap metrics (Liu et al., 2016; Ghazarian et al., 2019; Mehri and Eskenazi, 2020) , we also focus on three main aspects: diversity, relevancy, and engagingness as better indications of systems performances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 131, |
|
"text": "(See et al., 2019;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 157, |
|
"text": "Mehri and Eskenazi, 2020)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 592, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 812, |
|
"end": 830, |
|
"text": "(Liu et al., 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 831, |
|
"end": 854, |
|
"text": "Ghazarian et al., 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 880, |
|
"text": "Mehri and Eskenazi, 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluations", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Diversity measures the percentage of distinct generated tokens by each model. Li et al. (2015) proposed distinct-2 that computes distinct bi-grams divided by the total number of generated words. Relevancy utilizes both dialogue context utterance and the generated response to deliberate how much it is relevant to the given utterance (Tao et al., 2018; Ghazarian et al., 2019) . We use the contextualized Ruber metric for this purpose (Ghazarian et al., 2019) . At the end, since in open-domain dialogue systems, it is necessary to have both relevant and interesting responses to make the user feel satisfied (Ghazarian et al., 2020) , we further validate systems based on the engagingness of responses. We compute engagingness as the probability score of the engaging class predicted by Ghazarian et al. (2020) 's proposed engagement classifier. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 94, |
|
"text": "Li et al. (2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 352, |
|
"text": "(Tao et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 376, |
|
"text": "Ghazarian et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 459, |
|
"text": "(Ghazarian et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 609, |
|
"end": 633, |
|
"text": "(Ghazarian et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 811, |
|
"text": "Ghazarian et al. (2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluations", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We extend our evaluations by running AMT experiments to report human judgments on the quality of system-generated responses. We randomly select 100 dialogue context utterances from the Topical chat test set. For each given dialogue context utterance, we ask three AMT workers to rate DiSCoL and DialoGPT's generated responses by keeping these systems anonymous. Participants rate the relevancy, engagingness, and overall quality of each response on a 5-point Likert scale (1 indicating irrelevant/not engaging and low-quality response). The statistics of the AMT experiment are shown in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 587, |
|
"end": 594, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluations", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Automatic Evaluation. Figure 5 depicts the average scores of diversity, BLEU, relevancy, and engagingness resulted from automatic evaluation metrics for all the generated responses of DiSCoL and DialoGPT systems. The strength of DiSCoL is noticeable from its higher BLEU score and more diverse, relevant, and engaging responses. Overall, the diversity is low due to the limited distinct topics considered in the Topical chat dataset. The BLEU metric is low for both systems which shows its inadequacy in the open-domain evaluations; where a response can be super appropriate and at the same time not similar to the ground-truth response.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 30, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Human Evaluation. The bars in Figure 6 demonstrate the average of human annotations for different qualities of generated utterances. Each response's score is the mean aggregation of three annotators' ratings. According to Figure 6 , annotators appraise responses generated by DiSCoL with higher scores in terms of relevancy, engagingness, and overall quality. This could be an evidence for the positive impact of incorporating convlines to guide the dialogue system towards generating controllable, relevant, and contentful responses that infuse the user to converse for a longer time.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 38, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 230, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We have introduced DiSCoL, an open domain dialogue system that leverages convline as an intermediate step toward generating more informative and controllable responses in dialogues. The convlines are predicted and subsequently leveraged in the response generation process. Additionally, DiS-CoL allows users to manipulate convlines towards their favorite conversational direction. Our findings show that in contrast to other transformer-based dialogue systems that do not incorporate content planning, DiSCoL takes the advantage of such a principled structure to generate better and more engaging conversations with users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the future, we imagine an open path of possible research in the controllable conversations which would guide the dialogue toward having pleasant features such as empathy and bias-free or even personalized convlines to generate dialogues with such aspects. It is also expecting to train dialogue models to converse by following specific styles such as generating formal conversations by predicting more formal convlines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Through the entire phases of the conducted research and developed DiSCoL system, all coauthors were agreed and adhered to ACM Code of Ethics. Our effort was to ensure we stuck to the conscience of the profession and considered the Code principles. We certify that this system and all the presented evaluations are compatible with the provided code. In the following, we discuss two main spots in the development and evaluation of our system that could be targeted for encompassing abusive and improper conversations and having biased evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "DiSCoL System's Development The main contribution of our proposed DiSCoL system is to augment controllable response generation with the intervention of convlines that leads the generation towards producing more relevant and interesting responses. Indeed, DiSCoL provides an opportunity for users to manipulate the convlines and guide the system to continue the conversation in the user's favorite direction. All DiSCoL's modules leverage pre-trained large language models such as BART and fine-tune them on recently proposed Topical chat dataset (Gopalakrishnan et al., 2019) . One potential harm that DiSCoL could cause is its feasibility to generate improper responses conditioned on the inferred convlines with abusive contents. Since the convline and response generators are BART models finetuned on human-human conversations that do not encompass profanity and inappropriate content ((Gopalakrishnan et al., 2019)), hence the convlines that indeed are important informative units of the utterances would be free of bias and obscene content. However, there still is a possibility of dual-usage attacks by augmenting conversations with offensive languages to fine-tune the generators and teach them to generate such inappropriate content. The identification of such attacks that could occur in almost all learnable models and the way to overcome them by itself is a distinct and huge research area that is out of this paper's scope.", |
|
"cite_spans": [ |
|
{ |
|
"start": 546, |
|
"end": 575, |
|
"text": "(Gopalakrishnan et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "DiSCoL System's Evaluation Alongside the automatic evaluation for demonstrating the efficiency of controllable generations using convlines, we further collected human annotations by conducting Amazon Mechanical Turk (AMT) experiments. We provided different systems responses for given utterances while keeping systems anonymous and asked users to rate responses by considering different aspects that had been explained in the AMT surveys. We estimated the average time users would spend on each survey and fairly compensated them according to the hourly wage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We kept the privacy of all AMT turkers who participated in the experiments. Our experiments did not have the requisite to know the user's personal information, therefore their personal information including their genre, ethnicity, and etc. are not revealed. This fades the necessity for IRB approvals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "At the end, we want to note that our system's target is NLP open-domain conversational AI community with the main goal of achieve engaging conversations with the incorporation of convlines and increasing the user's ability to control the generation process. Likewise other proposed dialogue systems, we anticipate specific failure modes specifically for novel conversations on new topics. Lifelong learning in dialogue systems which is not the focus of this work is a research area that attempts to enhance conversation systems' ability to deal with such novel scenarios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We leverage fine-tuned BERT model provided by Huggingface (https://github.com/huggingface/ transformers).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We fine-tune BART model using https://github. com/pytorch/fairseq", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by the CwC program under the Contract W911NF-15-1-0543 with the US Defense Advanced Research Projects Agency (DARPA) and is the result of a hackathon in PLUSlab from USC/UCLA. We would like to thank all members of PLUSlab from USC/UCLA, specifically Johnny Wei, and Zhubo Deng for their constructive help. We also want to appreciate the anonymous reviewers for their helpful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgment", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Cue me in: Content-inducing approaches to interactive story generation", |
|
"authors": [ |
|
{ |
|
"first": "Faeze", |
|
"middle": [], |
|
"last": "Brahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandru", |
|
"middle": [], |
|
"last": "Petrusca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Snigdha", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Asia-Pacific Chapter of the Association for Computational Linguistics (AACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Faeze Brahman, Alexandru Petrusca, and Snigdha Chaturvedi. 2020. Cue me in: Content-inducing ap- proaches to interactive story generation. In Asia- Pacific Chapter of the Association for Computa- tional Linguistics (AACL).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Yake! collection-independent automatic keyword extractor", |
|
"authors": [ |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Campos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00edtor", |
|
"middle": [], |
|
"last": "Mangaravite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arian", |
|
"middle": [], |
|
"last": "Pasquali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al\u00edpio", |
|
"middle": [], |
|
"last": "M\u00e1rio Jorge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C\u00e9lia", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "806--810", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ricardo Campos, V\u00edtor Mangaravite, Arian Pasquali, Al\u00edpio M\u00e1rio Jorge, C\u00e9lia Nunes, and Adam Jatowt. 2018. Yake! collection-independent automatic key- word extractor. In European Conference on Informa- tion Retrieval, pages 806-810. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Chapter of the Association for Computational Linguistics (NAACL-HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In North American Chapter of the Association for Computational Linguistics (NAACL-HLT).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Strategies for structuring story generation", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Mike Lewis, and Yann Dauphin. 2019. Strategies for structuring story generation. In Asso- ciation for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Better automatic evaluation of open-domain dialogue systems with contextualized embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Johnny Tian-Zheng", |
|
"middle": [], |
|
"last": "Sarik Ghazarian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Methods for Optimizing and Evaluating Neural Language Generation (NeuralGen workshop of NAACL-HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarik Ghazarian, Johnny Tian-Zheng Wei, Aram Gal- styan, and Nanyun Peng. 2019. Better automatic evaluation of open-domain dialogue systems with contextualized embeddings. In Proceedings of the Methods for Optimizing and Evaluating Neural Lan- guage Generation (NeuralGen workshop of NAACL- HLT).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Predictive engagement: An efficient metric for automatic evaluation of opendomain dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Sarik", |
|
"middle": [], |
|
"last": "Ghazarian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ralph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7789--7796", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarik Ghazarian, Ralph M Weischedel, Aram Galstyan, and Nanyun Peng. 2020. Predictive engagement: An efficient metric for automatic evaluation of open- domain dialogue systems. In AAAI, pages 7789- 7796.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Content planning for neural story generation with aristotelian rescoring", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tuhin", |
|
"middle": [], |
|
"last": "Chakrabarty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Tuhin Chakrabarty, Ralph Weischedel, and Nanyun Peng. 2020. Content plan- ning for neural story generation with aristotelian rescoring. In Empirical Methods in Natural Lan- guage Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Plan, write, and revise: an interactive system for open-domain story generation", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haining", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-HLT 2019), Demonstrations Track", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "89--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Haining Feng, and Nanyun Peng. 2019. Plan, write, and revise: an interactive system for open-domain story generation. In 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-HLT 2019), Demonstrations Track, volume 4, pages 89-97.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Topical-chat: Towards knowledge-grounded open-domain conversations", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Gopalakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinglang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Gottardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Kwatra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anu", |
|
"middle": [], |
|
"last": "Venkatesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raefer", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthik Gopalakrishnan, Behnam Hedayatnia, Qinglang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, Dilek Hakkani-T\u00fcr, and Amazon Alexa AI. 2019. Topical-chat: Towards knowledge-grounded open-domain conversations. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Releasing a multimodal dialogue system into the wild: User support mechanisms", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gruenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Seneff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 8th SIGdial Workshop on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "111--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Gruenstein and Stephanie Seneff. 2007. Re- leasing a multimodal dialogue system into the wild: User support mechanisms. In Proceedings of the 8th SIGdial Workshop on Discourse and Dialogue, pages 111-119.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Adapt-a multimodal conversational dialogue system in an apartment domain", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Gustafson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [], |
|
"last": "Bell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Beskow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Boye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rolf", |
|
"middle": [], |
|
"last": "Carlson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Edlund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Granstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "House", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mats", |
|
"middle": [], |
|
"last": "Wir\u00e9n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "The Sixth International Conference on Spoken Language Processing (ICSLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "134--137", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Gustafson, Linda Bell, Jonas Beskow, Johan Boye, Rolf Carlson, Jens Edlund, Bj\u00f6rn Granstr\u00f6m, David House, and Mats Wir\u00e9n. 2000. Adapt-a mul- timodal conversational dialogue system in an apart- ment domain. In The Sixth International Conference on Spoken Language Processing (ICSLP), Beijing, China, pages 134-137.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ves", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A diversity-promoting objective function for neural conversation models", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "North American Chapter of the Association for Computational Linguistics (NAACL-HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2015. A diversity-promoting ob- jective function for neural conversation models. In North American Chapter of the Association for Com- putational Linguistics (NAACL-HLT).", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "End-to-end taskcompletion neural dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Xiujun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Nung", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lihong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Joint Conference on Natural Language Processing (IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiujun Li, Yun-Nung Chen, Lihong Li, Jianfeng Gao, and Asli Celikyilmaz. 2017. End-to-end task- completion neural dialogue systems. In Interna- tional Joint Conference on Natural Language Pro- cessing (IJCNLP).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "How not to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
|
"authors": [ |
|
{ |
|
"first": "Chia-Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Iulian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Noseworthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Charlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian V Serban, Michael Noseworthy, Laurent Charlin, and Joelle Pineau. 2016. How not to evaluate your dialogue system: An empirical study of unsupervised evaluation met- rics for dialogue response generation. In Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Unsupervised evaluation of interactive dialog with dialogpt", |
|
"authors": [ |
|
{ |
|
"first": "Shikib", |
|
"middle": [], |
|
"last": "Mehri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Eskenazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shikib Mehri and Maxine Eskenazi. 2020. Unsuper- vised evaluation of interactive dialog with dialogpt. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A chatbot for psychiatric counseling in mental healthcare service based on emotional dialogue analysis and sentence generation", |
|
"authors": [ |
|
{ |
|
"first": "Kyo-Joong", |
|
"middle": [], |
|
"last": "Oh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongkun", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byungsoo", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ho-Jin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 18th IEEE International Conference on Mobile Data Management (MDM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "371--375", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyo-Joong Oh, Dongkun Lee, Byungsoo Ko, and Ho- Jin Choi. 2017. A chatbot for psychiatric counsel- ing in mental healthcare service based on emotional dialogue analysis and sentence generation. In 2017 18th IEEE International Conference on Mobile Data Management (MDM), pages 371-375. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting of the Association for Compu- tational Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "PlotMachines: Outlineconditioned generation with dynamic plot state tracking", |
|
"authors": [ |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Hannah Rashkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hannah Rashkin, Asli Celikyilmaz, Yejin Choi, and Jianfeng Gao. 2020. PlotMachines: Outline- conditioned generation with dynamic plot state tracking. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Automatic keyword extraction from individual documents", |
|
"authors": [ |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dave", |
|
"middle": [], |
|
"last": "Engel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Cramer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Cowley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Text mining: applications and theory", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stuart Rose, Dave Engel, Nick Cramer, and Wendy Cowley. 2010. Automatic keyword extraction from individual documents. Text mining: applications and theory, 1:1-20.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition. arXiv preprint cs/0306050.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Developing a social media-based chatbot for english learning", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sarosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kusumawardani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Suyono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wijaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IOP Conference Series: Materials Science and Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M Sarosa, M Kusumawardani, A Suyono, and MH Wi- jaya. 2020. Developing a social media-based chat- bot for english learning. In IOP Conference Series: Materials Science and Engineering, page 012074. IOP Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "What makes a good conversation? how controllable attributes affect human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "North American Chapter of the Association for Computational Linguistics (NAACL-HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Stephen Roller, Douwe Kiela, and Jason Weston. 2019. What makes a good conversation? how controllable attributes affect human judgments. In North American Chapter of the Association for Computational Linguistics (NAACL-HLT).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Ruber: An unsupervised method for automatic evaluation of open-domain dialog systems", |
|
"authors": [ |
|
{ |
|
"first": "Chongyang", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Mou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chongyang Tao, Lili Mou, Dongyan Zhao, and Rui Yan. 2018. Ruber: An unsupervised method for au- tomatic evaluation of open-domain dialog systems. In Proceedings of the AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Planand-write: Towards better automatic storytelling", |
|
"authors": [ |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weischedel", |
|
"middle": [], |
|
"last": "Ralph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The Thirty-Third AAAI Conference on Artificial Intelligence (AAAI-19)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lili Yao, Nanyun Peng, Weischedel Ralph, Kevin Knight, Dongyan Zhao, and Rui Yan. 2019. Plan- and-write: Towards better automatic storytelling. In The Thirty-Third AAAI Conference on Artificial In- telligence (AAAI-19).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Dialogpt: Large-scale generative pre-training for conversational response generation", |
|
"authors": [ |
|
{ |
|
"first": "Yizhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siqi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, and Bill Dolan. 2019. Dialogpt: Large-scale generative pre-training for conversational response generation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The design and implementation of xiaoice, an empathetic social chatbot", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heung-Yeung", |
|
"middle": [], |
|
"last": "Shum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computational Linguistics", |
|
"volume": "46", |
|
"issue": "1", |
|
"pages": "53--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Zhou, Jianfeng Gao, Di Li, and Heung-Yeung Shum. 2020. The design and implementation of xiaoice, an empathetic social chatbot. Computational Linguis- tics, 46(1):53-93.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "A snapshot of the proposed DiSCoL system ward users' desired direction. The first block in Figure 1 depicts an example of a generated response by DialoGPT.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "Architecture of DiSCoL system", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Automatic evaluations on responses generated by DiSCoL and DialoGPT systems", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "Human evaluations on responses generated by DiSCoL and DialoGPT systems", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "2017; Sarosa Dialogue Context: what do you think about Game of Thrones? Game of Thrones is my favorite show. I like many genres of TV, but I think I like the Adventure genre the most. How about you? Do you have a favorite show or do you like the adventure genre? I love the game of thrones! My favorite show lol! Game of lions is also my favorite show, who is your favorite character? I like Game of Thrones. I have not seen the latest season. I have seen the first two seasons.", |
|
"content": "<table><tr><td/><td/><td>DialoGPT</td><td/><td/><td/></tr><tr><td/><td/><td>DiSCoL</td><td/><td/><td/></tr><tr><td>game of thrones</td><td>game of lions</td><td>love the game</td><td>favorite show</td><td>favorite character</td><td>show lol</td></tr><tr><td>game of thrones</td><td>game of lions</td><td>adventure genre</td><td>favorite show</td><td>many genres</td><td>show lol</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td/><td/><td/><td/><td>E n c o d e r I n p u t</td></tr><tr><td/><td/><td>!</td><td>YAKE</td><td>bears fan # chicago popularized many phrases !</td><td>O u t p u t D e c o d e r</td></tr><tr><td/><td/><td/><td/><td>BART</td></tr><tr><td/><td/><td/><td/><td>!</td></tr><tr><td/><td/><td/><td/><td>watch the nfl # nfl games # favorite sport</td></tr><tr><td/><td/><td/><td/><td>watch the simpsons</td></tr><tr><td/><td colspan=\"4\">Figure 4: Architecture of the convline generator during training and inference time</td></tr><tr><td>Uttr.</td><td colspan=\"3\">Easy_set Challenging_set General_uttr.</td></tr><tr><td>188,378</td><td>146,370</td><td>5,323</td><td>5,966</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "for retrieving discourse keywords representing convlines. YAKE assigns an", |
|
"content": "<table><tr><td colspan=\"4\">Dialogue Context Annotators Kappa Pearson</td></tr><tr><td>100</td><td>33</td><td>0.44</td><td>0.5</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Statistics and inter-annotator agreements of AMT evaluations on DiSCoL and DialoGPT performances.", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |