|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:35:26.759328Z" |
|
}, |
|
"title": "Challenges in Designing Natural Language Interfaces for Complex Visual Models", |
|
"authors": [ |
|
{ |
|
"first": "Henrik", |
|
"middle": [], |
|
"last": "Voigt", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Jena", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Monique", |
|
"middle": [], |
|
"last": "Meuschke", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Jena", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Jena", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Bielefeld", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Intuitive interaction with visual models becomes an increasingly important task in the field of Visualization (VIS) and verbal interaction represents a significant aspect of it. Vice versa, modeling verbal interaction in visual environments is a major trend in ongoing research in NLP. To date, research on Language & Vision, however, mostly happens at the intersection of NLP and Computer Vision (CV), and much less at the intersection of NLP and Visualization, which is an important area in Human-Computer Interaction (HCI). This paper presents a brief survey of recent work on interactive tasks and setups in NLP and Visualization. We discuss the respective methods, show interesting gaps and conclude by suggesting neural, visually grounded dialogue modeling as a promising potential for NLIs for visual models.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Intuitive interaction with visual models becomes an increasingly important task in the field of Visualization (VIS) and verbal interaction represents a significant aspect of it. Vice versa, modeling verbal interaction in visual environments is a major trend in ongoing research in NLP. To date, research on Language & Vision, however, mostly happens at the intersection of NLP and Computer Vision (CV), and much less at the intersection of NLP and Visualization, which is an important area in Human-Computer Interaction (HCI). This paper presents a brief survey of recent work on interactive tasks and setups in NLP and Visualization. We discuss the respective methods, show interesting gaps and conclude by suggesting neural, visually grounded dialogue modeling as a promising potential for NLIs for visual models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, research in NLP has become more and more interested in data sets, tasks and models that pair Language and Vision, cf. work on image Captioning (Vinyals et al., 2015; Herdade et al., 2019; He et al., 2020) , Visual Question Answering (Antol et al., 2015; Goyal et al., 2017; Kazemi and Elqursh, 2017) , or Instruction Following and -Generation in visual domains (Fried et al., 2017 (Fried et al., , 2018 . This new area is generally called Vision & Language (Mogadala et al., 2019) , but it is actually based mostly on combining methods from NLP (like e.g. language models) and Computer Vision, e.g. visual analysis and recognition models for encoding visual input like images. Methods and models from the research area of Visualization -which investigates solutions for modelling, exploring, analyzing and communicating data by using visual technologies and can be seen as the field of visual synthesis -are, to the best of our knowledge, less well known in the NLP community.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 182, |
|
"text": "(Vinyals et al., 2015;", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 204, |
|
"text": "Herdade et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 221, |
|
"text": "He et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 270, |
|
"text": "(Antol et al., 2015;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 290, |
|
"text": "Goyal et al., 2017;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 316, |
|
"text": "Kazemi and Elqursh, 2017)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "(Fried et al., 2017", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 419, |
|
"text": "(Fried et al., , 2018", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 497, |
|
"text": "(Mogadala et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the VIS community, interaction with visual models plays an important role and natural language interaction represents a big part of it (Bacci et al., 2020; . Natural Language Interfaces (NLIs) that support interactive visualizations based on language queries have found increasing interest in recent research (Narechania et al., 2020; Yu and Silva, 2020; . However, from an NLP point of view, the methods applied in these recent interfaces, mostly rely on established methods for implementing semantic parsers that map natural language instructions to symbolic data base queries, which are consecutively visualized by a visualization pipeline. In this paper, we argue that there is space for further support of intuitive interaction with visual models using state-of-the-art NLP methods that would also pose novel and interesting challenges for both domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 158, |
|
"text": "(Bacci et al., 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 337, |
|
"text": "(Narechania et al., 2020;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 357, |
|
"text": "Yu and Silva, 2020;", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We focus this brief overview on a selection of methods for modeling interaction in the fields of NLP and VIS based on recent submissions to the top conferences ACL, EACL, VIS and EuroVIS. First, we briefly describe how interaction is understood in the respective fields (Section 2). We provide a short overview of recent, state-of-the-art systems related to interaction with visual models or in visual environments (Section 3). Finally, we discuss potential research gaps and challenges that could be addressed in future work on modelling interaction with visual models (Section 4). As interaction is a major research topic in both NLP and VIS, we do not aim for a complete survey, but we hope to make readers from both communities aware that there could be fruitful directions for collaboration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We refer to interaction as the \"mutual determination of behaviour\" between different entities, like humans, digital agents or interfaces, following Hornbaek and Oulasvirta (2017) . Work on interaction in NLP typically investigates verbal communication between human dialogue partners and models dialogue systems that interact with users via natural language, but also recognizes the fact that verbal communication typically happens in combination with other modalities, like touch, movements and gestures in embodied dialogue or gaze and visual stimuli in visual dialogue (Cuay\u00e1huitl et al., 2015) . In HCI and VIS, interaction via multiple modalities plays a very prominent role, i.e. involves gestures, movements, different input controllers, screens, gazes, modalities and more. The interaction between a user and a visual model is a key aspect of many VIS tasks and applications and impacts on the user evaluation of a visual model to a significant degree (Yi et al., 2007; Tominski, 2015; Figueiras, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 178, |
|
"text": "Hornbaek and Oulasvirta (2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 597, |
|
"text": "(Cuay\u00e1huitl et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 977, |
|
"text": "(Yi et al., 2007;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 993, |
|
"text": "Tominski, 2015;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1010, |
|
"text": "Figueiras, 2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interaction in NLP and VIS", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Generally speaking, the field of VIS is interested in the development of techniques for creating visual models (Brehmer and Munzner, 2013; Liu et al., 2014; Amar et al., 2005) . A visual model is data that is mapped into a visually perceivable space by representing concepts in the data through visual concepts to make them easily perceivable and understandable by humans. This supports research and education in many aspects as well as data exploration and understanding of big data sets. Research on interaction in VIS often addresses the design of appropriate human-computer interfaces and the abilities they need to offer for interacting with a visual model. Natural Language Interfaces (NLIs), in this context, can be seen as one possible solution of enabling interaction with a visualization. Dimara and Perin (2019) provide a comprehensive study on how interaction is seen in VIS by defining it as \"the interplay between a person and a data interface involving a data-related intent, at least one action from the person and an interface reaction that is perceived as such\". The authors deliberately distinguish their view from the HCI definition of interaction as stated in Hornbaek and Oulasvirta (2017) , by making the importance of the data related intent of the user the focus in VIS. As a conclusion, the authors observe that approaches towards interaction in VIS currently lack two points, i.e. flexibility and a better understanding of the user goal. The lack of these currently leads to interfaces that are too predictable, unsatisfying in their capacities to act or risk misdirected interactions with visual models. Despite that, Hornbaek and Oulasvirta (2017) and Dimara and Perin (2019) both argue that interaction foremost represents a form of dialogue which the authors evaluate in terms of its \"naturalness\" and its mutual \"strong sense of understanding\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 138, |
|
"text": "(Brehmer and Munzner, 2013;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 156, |
|
"text": "Liu et al., 2014;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 175, |
|
"text": "Amar et al., 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1181, |
|
"end": 1211, |
|
"text": "Hornbaek and Oulasvirta (2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1646, |
|
"end": 1676, |
|
"text": "Hornbaek and Oulasvirta (2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1681, |
|
"end": 1704, |
|
"text": "Dimara and Perin (2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interaction in VIS", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "This highlights the point that interaction with a visual model is fundamentally conceived as a multi-modal process that leverages various different interface modalities for communication and information exchange. As discussed below, from an NLP perspective, interactions with systems in VIS can be seen as multi-modal dialogues between a system and a user having data-related goals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interaction in VIS", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Work in NLP often aims at understanding and modeling how dialogue partners collaborate and achieve common ground by exchanging verbal utterances (potentially in combination with different modalities, like e.g. vision). This typically involves language understanding, dialogue management (reasoning over latent user goals) and language generation (Young et al., 2010 (Young et al., , 2013 . Recent work on dialogue has turned more and more to so-called neural end-to-end-dialogue systems that do not separate processes of understanding, reasoning and generation, and aim for more flexibility and adaptiveness (Santhanam and Shaikh, 2019) . Santhanam and Shaikh (2019) distinguish between goal-driven and open dialogue systems as they address fundamentally different interaction and evaluation set-ups. Goal-or task-oriented systems are typically designed towards helping the user to achieve a very specific goal in a given context. For instance, in instruction-following and -generation (Fried et al., 2017 (Fried et al., , 2018 , a user or system needs to reach a specific position in an environment by following navigation instructions. Here, the interaction is often asymmetric in the sense that the modalities to be used by the partners are very restricted (the instruction follower acts, the giver speaks). Open-domain dialogue systems, like Li et al. (2017) ; Adiwardana et al. (2020) are not bound to a goal and therefore require a high awareness of context, personality and variety of the dialogue system as Santhanam and Shaikh (2019) point out. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 346, |
|
"end": 365, |
|
"text": "(Young et al., 2010", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 387, |
|
"text": "(Young et al., , 2013", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 636, |
|
"text": "(Santhanam and Shaikh, 2019)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 666, |
|
"text": "Santhanam and Shaikh (2019)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 986, |
|
"end": 1005, |
|
"text": "(Fried et al., 2017", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1027, |
|
"text": "(Fried et al., , 2018", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1346, |
|
"end": 1362, |
|
"text": "Li et al. (2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1365, |
|
"end": 1389, |
|
"text": "Adiwardana et al. (2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interaction in NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "A range of recent papers have looked into integrating NLP in VIS systems, by implementing NLIs that translate a natural language query to a visualization command in some programming language. This allows users to \"talk to some dataset\", as illustrated in Figure 1 . Existing NLIs are applied in systems that create and manipulate, e.g., chart visualizations (Shao and Nakashole, 2020) . Similar interfaces are proposed in Narechania et al. 2016These existing NLIs are mostly applied in the field of visual analytics. Here, the user has a concrete goal in mind, i.e. some manipulation of the underlying data table (e.g. aggregation, filtering). Dimara and Perin (2019) point out that the exact understanding of the users' goal is important in these interfaces and one current approach for improving the inference of the users' intent is to predict it based on activity logs. Setlur and Kumar (2020)'s work suggests that the handling of vague subjective modifiers in utterances can be improved by using sentiment analysis techniques.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 384, |
|
"text": "(Shao and Nakashole, 2020)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 667, |
|
"text": "Dimara and Perin (2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 263, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Natural Language Interfaces in VIS", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In terms of NLP methods, these interfaces are mostly based on manually engineered grammars that parse user input to queries and then generate an appropriate visualization output, as e.g. in (Yu and Silva, 2020) . These grammars are relatively easy to set-up even for non-experts (of NLP) and do not require large amounts of training data, as most state-of-the-art dialogue systems developed in NLP. An important limitation of this approach, however, is that such semantic grammars are designed to translate directly between a given user query in natural language and some underlying data query language like e.g. SQL. This means that, in longer multi-turn interactions between a user and a system, users are not able to formulate short, intuitive queries that implicitly refer to the context (e.g. \"now, make this a bit bigger\" where \"this\" refers to an aspect of the visual model discussed in the preceding context) or multi-modal queries (e.g. \"increase the volume of this particle here\" while user points to a region on the screen). Finally, and most importantly, they assume that the user can precisely formulate or describe the action or manipulation that is needed to obtain a certain visualization or information from the visual model, as shown for instance in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "(Yu and Silva, 2020)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1268, |
|
"end": 1276, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Natural Language Interfaces in VIS", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Beyond NLIs for visual analytics (Narechania et al., 2020; Yu and Silva, 2020) , we see further potential for other NLP methods in visualization tasks that require more than plots of data for a specific, precisely formulated goal. In visual exploration, the goal is typically rather vague and developed during the exploration process itself, in an iterative fashion while interacting with the system. Moreover, applications in augmented and virtual reality entail new possibilities of immersive experiences and interaction for supporting performance as in (Butcher et al., 2020) or to enhance retention , which clearly includes multiple modalities. We argue that users of visual models in such exploratory setups could greatly benefit from natural language interaction, if the NLI would allow for more context-sensitive and situated querying of the model. Moreover, beyond querying, we expect that users would highly appreciate verbal system feedback or suggestions and explanations (see Figure 2 ). Ideally, this back-and-forth between the user and the system should support the user not only in realizing his goal, but also in establishing his goal or refining his initially vague goal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 58, |
|
"text": "(Narechania et al., 2020;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 59, |
|
"end": 78, |
|
"text": "Yu and Silva, 2020)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 988, |
|
"end": 997, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Natural Language Interfaces in VIS", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thus, we currently see a lot of interest in systems that enable interaction with an underlying data set via natural language, but the query-based approach used in many NLIs still seems to lack flexibility.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Interfaces in VIS", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A lot of recent work in NLP tackles dialogue modelling (Shuster et al., 2020; Qin et al., 2020; Ham et al., 2020; Rameshkumar and Bailey, 2020) or question answering (Baheti et al., 2020; . Goal-based dialogue covers navigation Zhu et al. (2020) , manipulation (Jayannavar et al., 2020) or classical information presentation tasks (Andreas et al., 2020) . A central problem in these mod- els is the fact that at each point in an interaction, there is uncertainty with respect to the understanding of the user goal. The predominant approach to handle reasoning under uncertainty is (Deep) Reinforcement Learning (RL) where an agent learns a dialogue policy (Jaques et al., 2020; Li et al., , 2016 . RL optimizes the utterance understanding and generation in the system with respect to a certain reward function in the given environment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 77, |
|
"text": "(Shuster et al., 2020;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 95, |
|
"text": "Qin et al., 2020;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 113, |
|
"text": "Ham et al., 2020;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 143, |
|
"text": "Rameshkumar and Bailey, 2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 187, |
|
"text": "(Baheti et al., 2020;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 245, |
|
"text": "Zhu et al. (2020)", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 286, |
|
"text": "(Jayannavar et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 353, |
|
"text": "(Andreas et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 677, |
|
"text": "(Jaques et al., 2020;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 695, |
|
"text": "Li et al., , 2016", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language & Vision", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Next to these improvements on the level of dialogue modeling, recent developments in Language & Vision focus on grounding verbal utterances in visual inputs as, for instance, in visual question answering (Huang et al., 2020; Khademi, 2020) . Visual dialogue (Das et al., 2017; Wang et al., 2020) extends the dialogue modelling task to the visual modality. Mixed-initiative visual dialogue, as e.g. in Ilinykh et al. (2019) , aims at modeling interactions in which both dialogue partners can talk and act, which could be an interesting setting for visual exploration tasks. We believe that these successes in neural dialogue modelling and the integration of different modalities as in visual dialogue can lead to new possibilities for interactive systems in VIS, as we will discuss below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 224, |
|
"text": "(Huang et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 239, |
|
"text": "Khademi, 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 276, |
|
"text": "(Das et al., 2017;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 295, |
|
"text": "Wang et al., 2020)", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 422, |
|
"text": "Ilinykh et al. (2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language & Vision", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Uncertainty We believe that a fruitful direction for more flexible NLP-based systems in VIS is to look at scenarios where users might not have a concrete manipulation task or goal in mind, but want to explore a complex visual model. Numerous applications, such as in medicine (Meuschke et al., 2016 (Meuschke et al., , 2017 or cultural-technical scenarios , require the visual exploration of complex models. Figure 2 shows an example of a 3D-mesh of an aneurysm and a corresponding, made-up dialogue that would support the user in exploring the model (Meuschke et al., 2018) . On the NLP side, this setting involves a high degree of uncertainty. The user investigates a certain region of the model she is interested in, develops an understanding of the visual landscape and/or just learns how to handle it best. Thus, we argue that complex visual models like in Figure 2 probably call for different and more flexible types of interactions, as compared to NLIs discussed in Section 3.1. A user analyzing a barplot might be interested in minima, maxima, trends or outliers, which correspond to fixed goals. In contrast to that, a neurosurgeon inspecting an aneurysm in virtual reality is much more interested in the how than in the what and the goal might not be precisely formulated beforehand by the surgeon but evolving through the back and forth of the interaction with the visual model. An important question that arises from that is, if users would really use natural language for exploring a visual model or if they would rather prefer the use of e.g. a controller or touch gesture. For many cases this might indeed be true, but we argue, that certain scenarios in visual exploration especially demand for verbal problem solution: recommendation of possibilities (\"show me how to reach the largest vessel from here\", \"how to achieve a blood pressure increase in this region\"), problemsolution-suggestions, tutorial-like action descriptions the user has to mimic, e.g. in educational scenarios or future state simulations that are highly hypothetical (\"what would the vessel behave if we changed the blood flow drastically to ...\"). These cases are in fact simulations of possible solutions helping the user to visualize and explore the solution space, which are much more convenient and intuitive expressed using natural language which can be supported by strong dialogue models that adapt to the context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 298, |
|
"text": "(Meuschke et al., 2016", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 323, |
|
"text": "(Meuschke et al., , 2017", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 551, |
|
"end": 574, |
|
"text": "(Meuschke et al., 2018)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 408, |
|
"end": 416, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 862, |
|
"end": 870, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Visual Grounding Visual language grounding in these scenarios captures not only the grounding of words into the scene (e.g. \"vessel\") but also the grounding of movements and gazes like pointer gestures (e.g. \"here\") which shows that the interplay of context awareness and multi-modal visual grounding are prerequisites for dialogue that humans would describe as \"intuitive\" and flexible. In contrast to systems like (Narechania et al., 2020) which are restricted to visual output and systems like (Adiwardana et al., 2020) which respond verbally, dialogue systems in visual models should be able to handle multi-modal responses, as illustrated in Figure 2 . The highlighting, scaling, coloring or fading of certain visual properties is an important part of the response which not only contains text but rather text and a visual action combined. Figure 2 shows an example for a collaborative, mixed-initiative interaction where the the dialogue flow is not entirely centered on user queries. used dialogue modelling for generating visual story lines in collaboration with a user and obtained promising results in leveraging visual exploration scenarios. In contrast to concise, goal-based visual analytics (see Section 3.1), hard-coded grammars might be too restricted to handle the high uncertainty and the complex underlying reasoning in explorative scenarios. The visual analytics task differs from the visual exploration task considering the fact that it is not driven by a concise goal. Introducing mixedinitiative dialogue in visual exploration enhances the users' ability of communicating uncertainties and supports experimenting and iterative engagement with the environment as applied in active visual problem solving or in educational settings. When no concrete goal can be formulated, a NLI has to adapt to the user and present contextual information like hints, explanations or react to expressed uncertainties (e.g. 'What does this blue region here show me?', 'How can I slice the vessel and investigate the thickness?', 'How does this spot evolve over time?') which is a form of guidance, an evolving field in visual analytics (Ceneda et al., 2020) . This interaction also is not bound to text-language interaction, but furthermore accommodates gestures, movements or glances and therefore can be categorized as multi-modal. Here, recent advances in NLP could extend the interface flexibility by providing better context-awareness using visually grounded dialogue techniques and contributing to solve the user goal inference problem by re-framing the task as an iterative goal alignment task executed via mixed-initiative dialogue. We think that especially mixed-initiative dialogue would be a challenging but very promising direction and well-suited for inferring user intentions in complex VIS settings because of the usage of direct user feedback and iterative alignment. Furthermore, mixed-initiative dialogue methods could support the setup of user-centred evaluation of more complex visualization techniques, such as in (Lawonn et al., 2014) . In sum, we argue that the role of NLP in interfaces with visual models is to enrich the dialogue between a system and a user (Dimara and Perin, 2019) with more flexible and intuitive ways of dialogue that might include touch-based or controller-based interaction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 416, |
|
"end": 441, |
|
"text": "(Narechania et al., 2020)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 522, |
|
"text": "(Adiwardana et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 2140, |
|
"end": 2161, |
|
"text": "(Ceneda et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 3039, |
|
"end": 3060, |
|
"text": "(Lawonn et al., 2014)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 3188, |
|
"end": 3212, |
|
"text": "(Dimara and Perin, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 647, |
|
"end": 655, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 845, |
|
"end": 853, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we gave a brief introduction on how interaction is understood and modelled in the fields of NLP and VIS. We found that existing work on NLIs in the Visualization domain heavily relies on query-based interactions. We argued that for interacting with highly complex visual models these strict interaction protocols might not be sufficient. Recent developments in Language & Vision investigate dialogue in visual contexts and reach promising results. We believe that this holds interesting research gaps for future work in integrating different variations of NLP-backed dialogue methods into visualizations enabling multi-modal interaction with visual models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the Michael Stifel Center Jena for the funding of this work, which is part of the \"A Virtual Werkstatt for Digitization in the Sciences\" project funded by the Carl Zeiss Foundation (062017-02).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards a human-like open-domain chatbot", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Adiwardana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Fiedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Thoppilan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apoorv", |
|
"middle": [], |
|
"last": "Kulshreshtha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Nemade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifeng", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Adiwardana, Minh-Thang Luong, D. So, J. Hall, Noah Fiedel, R. Thoppilan, Z. Yang, Apoorv Kul- shreshtha, G. Nemade, Yifeng Lu, and Quoc V. Le. 2020. Towards a human-like open-domain chatbot. ArXiv, abs/2001.09977.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Low-level components of analytic activity in information visualization", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Amar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Eagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Stasko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "IEEE Symposium on Information Visualization, 2005. INFOVIS 2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "111--117", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Amar, James Eagan, and John Stasko. 2005. Low-level components of analytic activity in infor- mation visualization. In IEEE Symposium on Infor- mation Visualization, 2005. INFOVIS 2005., pages 111-117. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Vqa: Visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Stanislaw", |
|
"middle": [], |
|
"last": "Antol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE international conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2425--2433", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Mar- garet Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. 2015. Vqa: Visual question an- swering. In Proceedings of the IEEE international conference on computer vision, pages 2425-2433.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Inspecting data using natural language queries", |
|
"authors": [ |
|
{ |
|
"first": "Franscesca", |
|
"middle": [], |
|
"last": "Bacci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federico", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Cau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Spano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ICCSA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franscesca Bacci, Federico Maria Cau, and L. D. Spano. 2020. Inspecting data using natural language queries. In ICCSA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Fluent response generation for conversational question answering", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Baheti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Small", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.10464" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Baheti, Alan Ritter, and Kevin Small. 2020. Fluent response generation for conversational ques- tion answering. arXiv preprint arXiv:2005.10464.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A multi-level typology of abstract visualization tasks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brehmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Munzner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "19", |
|
"issue": "", |
|
"pages": "2376--2385", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Brehmer and T. Munzner. 2013. A multi-level ty- pology of abstract visualization tasks. IEEE Trans- actions on Visualization and Computer Graphics, 19:2376-2385.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Vria: A web-based framework for creating immersive analytics experiences", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"William" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Butcher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nigel", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panagiotis D", |
|
"middle": [], |
|
"last": "Ritsos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter William Scott Butcher, Nigel W John, and Panagi- otis D Ritsos. 2020. Vria: A web-based framework for creating immersive analytics experiences. IEEE Transactions on Visualization and Computer Graph- ics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Guide me in analysis: A framework for guidance designers", |
|
"authors": [ |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Ceneda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Andrienko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Andrienko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Gschwandtner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Miksch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolaus", |
|
"middle": [], |
|
"last": "Piccolotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Schreck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Streit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Suschnigg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Tominski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computer Graphics Forum", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "269--288", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Davide Ceneda, N. Andrienko, G. Andrienko, T. Gschwandtner, S. Miksch, Nikolaus Piccolotto, T. Schreck, M. Streit, Josef Suschnigg, and C. Tominski. 2020. Guide me in analysis: A frame- work for guidance designers. Computer Graphics Forum, 39:269 -288.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Designing and evaluating multimodal interactions for facilitating visual analysis with dashboards", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Chowdhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdul", |
|
"middle": [], |
|
"last": "Moeid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hoque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [], |
|
"last": "Kabir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sohorab Hossain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Islam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "IEEE Access", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "60--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Chowdhury, Abdul Moeid, Enamul Hoque, M. Kabir, Md. Sohorab Hossain, and M. M. Islam. 2021. De- signing and evaluating multimodal interactions for facilitating visual analysis with dashboards. IEEE Access, 9:60-71.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Introduction for speech and language for interactive robots", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Cuay\u00e1huitl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazunori", |
|
"middle": [], |
|
"last": "Komatani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Skantze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Comput. Speech Lang", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "83--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Cuay\u00e1huitl, Kazunori Komatani, and Gabriel Skantze. 2015. Introduction for speech and lan- guage for interactive robots. Comput. Speech Lang., 34:83-86.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Visual dialog. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kottur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deshraj", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Jos\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1080--1089", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Das, S. Kottur, K. Gupta, Avi Singh, Deshraj Ya- dav, Jos\u00e9 M. F. Moura, D. Parikh, and Dhruv Ba- tra. 2017. Visual dialog. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1080-1089.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "What is interaction for data visualization?", |
|
"authors": [ |
|
{ |
|
"first": "Evanthia", |
|
"middle": [], |
|
"last": "Dimara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Perin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE transactions on visualization and computer graphics", |
|
"volume": "26", |
|
"issue": "1", |
|
"pages": "119--129", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evanthia Dimara and Charles Perin. 2019. What is in- teraction for data visualization? IEEE transactions on visualization and computer graphics, 26(1):119- 129.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Towards the understanding of interaction in information visualization", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Figueiras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "19th International Conference on Information Visualisation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "140--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Figueiras. 2015. Towards the understanding of in- teraction in information visualization. 2015 19th International Conference on Information Visualisa- tion, pages 140-147.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Unified pragmatic models for generating and following instructions", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Fried", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1711.04987" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Fried, Jacob Andreas, and Dan Klein. 2017. Unified pragmatic models for generating and follow- ing instructions. arXiv preprint arXiv:1711.04987.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Speaker-follower models for vision-and-language navigation", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Fried", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronghang", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volkan", |
|
"middle": [], |
|
"last": "Cirik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Saenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Darrell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3314--3325", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Fried, Ronghang Hu, Volkan Cirik, Anna Rohrbach, Jacob Andreas, Louis-Philippe Morency, Taylor Berg-Kirkpatrick, Kate Saenko, Dan Klein, and Trevor Darrell. 2018. Speaker-follower mod- els for vision-and-language navigation. In Advances in Neural Information Processing Systems, pages 3314-3325.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Quda: Natural language queries for visual data analytics", |
|
"authors": [ |
|
{ |
|
"first": "Siwei", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siliang", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siwei Fu, Kai Xiong, X. Ge, Y. Wu, Siliang Tang, and W. Chen. 2020. Quda: Natural language queries for visual data analytics. ArXiv, abs/2005.03257.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Making the v in vqa matter: Elevating the role of image understanding in visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Yash", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tejas", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Summers-Stay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6325--6334", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and D. Parikh. 2017. Making the v in vqa matter: Elevating the role of image under- standing in visual question answering. 2017 IEEE Conference on Computer Vision and Pattern Recog- nition (CVPR), pages 6325-6334.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "End-to-end neural pipeline for goal-oriented dialogue systems using gpt-2", |
|
"authors": [ |
|
{ |
|
"first": "Donghoon", |
|
"middle": [], |
|
"last": "Ham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeong-Gwan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youngsoo", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kee-Eung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Donghoon Ham, Jeong-Gwan Lee, Youngsoo Jang, and Kee-Eung Kim. 2020. End-to-end neural pipeline for goal-oriented dialogue systems using gpt-2. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Image captioning through image transformer. ArXiv, abs", |
|
"authors": [ |
|
{ |
|
"first": "Sen", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Tavakoli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Rosenhahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pugeault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sen He, Wentong Liao, H. Tavakoli, M. Yang, B. Rosenhahn, and N. Pugeault. 2020. Image captioning through image transformer. ArXiv, abs/2004.14231.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Image captioning: Transforming objects into words", |
|
"authors": [ |
|
{ |
|
"first": "Simao", |
|
"middle": [], |
|
"last": "Herdade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armin", |
|
"middle": [], |
|
"last": "Kappeler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Boakye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Soares", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simao Herdade, Armin Kappeler, K. Boakye, and J. Soares. 2019. Image captioning: Transforming objects into words. In NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Association for Computing Machinery", |
|
"authors": [ |
|
{ |
|
"first": "Kasper", |
|
"middle": [], |
|
"last": "Hornbaek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antti", |
|
"middle": [], |
|
"last": "Oulasvirta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5040--5052", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3025453.3025765" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasper Hornbaek and Antti Oulasvirta. 2017. What Is Interaction?, page 5040-5052. Association for Com- puting Machinery, New York, NY, USA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Aligned dual channel graph convolutional network for visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Qingbao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jielong", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changmeng", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junying", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Ho-Fung Leung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7166--7176", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.642" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingbao Huang, Jielong Wei, Yi Cai, Changmeng Zheng, Junying Chen, Ho-fung Leung, and Qing Li. 2020. Aligned dual channel graph convolutional net- work for visual question answering. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7166-7176, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A natural-language-based visual query approach of uncertain human trajectories", |
|
"authors": [ |
|
{ |
|
"first": "Zhaosong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shengjie", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kejie", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weixia", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingjie", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minfeng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingliang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "26", |
|
"issue": "1", |
|
"pages": "1256--1266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaosong Huang, Ye Zhao, Wei Chen, Shengjie Gao, Kejie Yu, Weixia Xu, Mingjie Tang, Minfeng Zhu, and Mingliang Xu. 2019. A natural-language-based visual query approach of uncertain human trajecto- ries. IEEE Transactions on Visualization and Com- puter Graphics, 26(1):1256-1266.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Meetup! a corpus of joint activity dialogues in a visual environment", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Ilinykh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Schlangen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Ilinykh, Sina Zarrie\u00df, and D. Schlangen. 2019. Meetup! a corpus of joint activity dialogues in a vi- sual environment. ArXiv, abs/1907.05084.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Human-centric dialog training via offline reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Natasha", |
|
"middle": [], |
|
"last": "Jaques", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judy", |
|
"middle": [ |
|
"Hanwen" |
|
], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asma", |
|
"middle": [], |
|
"last": "Ghandeharioun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Lapedriza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shixiang", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rosalind", |
|
"middle": [], |
|
"last": "Picard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3985--4003", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.327" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Natasha Jaques, Judy Hanwen Shen, Asma Ghande- harioun, Craig Ferguson, Agata Lapedriza, Noah Jones, Shixiang Gu, and Rosalind Picard. 2020. Human-centric dialog training via offline reinforce- ment learning. In Proceedings of the 2020 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 3985-4003, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Learning to execute instructions in a Minecraft dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Prashant", |
|
"middle": [], |
|
"last": "Jayannavar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjali", |
|
"middle": [], |
|
"last": "Narayan-Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2589--2602", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.232" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prashant Jayannavar, Anjali Narayan-Chen, and Julia Hockenmaier. 2020. Learning to execute instruc- tions in a Minecraft dialogue. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 2589-2602, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Show, ask, attend, and answer: A strong baseline for visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Kazemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Elqursh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Kazemi and A. Elqursh. 2017. Show, ask, attend, and answer: A strong baseline for visual question answering. ArXiv, abs/1704.03162.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Multimodal neural graph memory networks for visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Khademi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7177--7188", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.643" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mahmoud Khademi. 2020. Multimodal neural graph memory networks for visual question answering. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 7177- 7188, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Intelligent assistant for exploring data visualizations", |
|
"authors": [ |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jillian", |
|
"middle": [], |
|
"last": "Aurisano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Eugenio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "FLAIRS Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhinav Kumar, Jillian Aurisano, B. D. Eugenio, and A. Johnson. 2020. Intelligent assistant for exploring data visualizations. In FLAIRS Conference.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Comparative evaluation of feature line techniques for shape depiction", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Baer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Saalfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of Vision, Modeling and Visualization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Lawonn, Alexandra Baer, Patrick Saalfeld, and Bernhard Preim. 2014. Comparative evaluation of feature line techniques for shape depiction. In Proc. of Vision, Modeling and Visualization, pages 31-38, Darmstadt.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Visualization and extraction of carvings for heritage conservation", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Trostmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Hildebrandt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE transactions on visualization and computer graphics", |
|
"volume": "23", |
|
"issue": "1", |
|
"pages": "801--810", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Lawonn, Erik Trostmann, Bernhard Preim, and Klaus Hildebrandt. 2016. Visualization and extrac- tion of carvings for heritage conservation. IEEE transactions on visualization and computer graph- ics, 23(1):801-810.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Deep reinforcement learning for dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.01541" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Michel Galley, Jianfeng Gao, and Dan Jurafsky. 2016. Deep rein- forcement learning for dialogue generation. arXiv preprint arXiv:1606.01541.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Adversarial learning for neural dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlin", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9bastien", |
|
"middle": [], |
|
"last": "Jean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1701.06547" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, Tianlin Shi, S\u00e9bastien Jean, Alan Ritter, and Dan Jurafsky. 2017. Adversar- ial learning for neural dialogue generation. arXiv preprint arXiv:1701.06547.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Rethinking supervised learning and reinforcement learning in task-oriented dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Ziming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Kiseleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "De Rijke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3537--3546", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.316" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziming Li, Julia Kiseleva, and Maarten de Rijke. 2020. Rethinking supervised learning and reinforcement learning in task-oriented dialogue systems. In Find- ings of the Association for Computational Linguis- tics: EMNLP 2020, pages 3537-3546, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "RikiNet: Reading Wikipedia pages for natural question answering", |
|
"authors": [ |
|
{ |
|
"first": "Dayiheng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeyun", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiusheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daxin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiancheng", |
|
"middle": [], |
|
"last": "Lv", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6762--6771", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.604" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dayiheng Liu, Yeyun Gong, Jie Fu, Yu Yan, Jiusheng Chen, Daxin Jiang, Jiancheng Lv, and Nan Duan. 2020. RikiNet: Reading Wikipedia pages for nat- ural question answering. In Proceedings of the 58th Annual Meeting of the Association for Computa- tional Linguistics, pages 6762-6771, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A survey on information visualization: recent advances and challenges. The Visual Computer", |
|
"authors": [ |
|
{ |
|
"first": "Shixia", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingcai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengchen", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "1373--1393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shixia Liu, Weiwei Cui, Yingcai Wu, and Mengchen Liu. 2014. A survey on information visualization: recent advances and challenges. The Visual Com- puter, 30(12):1373-1393.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Automatic viewpoint selection for exploration of timedependent cerebral aneurysm data", |
|
"authors": [ |
|
{ |
|
"first": "Monique", |
|
"middle": [], |
|
"last": "Meuschke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wito", |
|
"middle": [], |
|
"last": "Engelke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Beuing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Bildverarbeitung f\u00fcr die Medizin", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "352--357", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monique Meuschke, Wito Engelke, Oliver Beuing, Bernhard Preim, and Kai Lawonn. 2017. Auto- matic viewpoint selection for exploration of time- dependent cerebral aneurysm data. In Bildverar- beitung f\u00fcr die Medizin, pages 352-357. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Clustering of aortic vortex flow in cardiac 4d pc-mri data", |
|
"authors": [ |
|
{ |
|
"first": "Monique", |
|
"middle": [], |
|
"last": "Meuschke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "K\u00f6hler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uta", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Bildverarbeitung f\u00fcr die Medizin", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "182--187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monique Meuschke, Kai Lawonn, Benjamin K\u00f6hler, Uta Preim, and Bernhard Preim. 2016. Clustering of aortic vortex flow in cardiac 4d pc-mri data. In Bildverarbeitung f\u00fcr die Medizin, pages 182-187. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Exploration of blood flow patterns in cerebral aneurysms during the cardiac cycle", |
|
"authors": [ |
|
{ |
|
"first": "Monique", |
|
"middle": [], |
|
"last": "Meuschke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Vo\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Preim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lawonn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computers & Graphics", |
|
"volume": "72", |
|
"issue": "", |
|
"pages": "12--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monique Meuschke, Samuel Vo\u00df, Bernhard Preim, and Kai Lawonn. 2018. Exploration of blood flow pat- terns in cerebral aneurysms during the cardiac cycle. Computers & Graphics, 72:12-25.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Trends in integration of vision and language research: A survey of tasks, datasets, and methods", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Mogadala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marimuthu", |
|
"middle": [], |
|
"last": "Kalimuthu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dietrich", |
|
"middle": [], |
|
"last": "Klakow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.09358" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Mogadala, Marimuthu Kalimuthu, and Dietrich Klakow. 2019. Trends in integration of vision and language research: A survey of tasks, datasets, and methods. arXiv preprint arXiv:1907.09358.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Nl4dv: A toolkit for generating analytic specifications for data visualization from natural language queries", |
|
"authors": [ |
|
{ |
|
"first": "Arpit", |
|
"middle": [], |
|
"last": "Narechania", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Stasko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arpit Narechania, Arjun Srinivasan, and John Stasko. 2020. Nl4dv: A toolkit for generating analytic speci- fications for data visualization from natural language queries. IEEE Transactions on Visualization and Computer Graphics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Dynamic fusion network for multidomain end-to-end task-oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Libo", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6344--6354", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.565" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Libo Qin, Xiao Xu, Wanxiang Che, Yue Zhang, and Ting Liu. 2020. Dynamic fusion network for multi- domain end-to-end task-oriented dialog. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 6344- 6354, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Storytelling with dialogue: A Critical Role Dungeons and Dragons Dataset", |
|
"authors": [ |
|
{ |
|
"first": "Revanth", |
|
"middle": [], |
|
"last": "Rameshkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Bailey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5121--5134", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.459" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Revanth Rameshkumar and Peter Bailey. 2020. Story- telling with dialogue: A Critical Role Dungeons and Dragons Dataset. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, pages 5121-5134, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "A survey of natural language generation techniques with a focus on dialogue systems-past, present and future directions", |
|
"authors": [ |
|
{ |
|
"first": "Sashank", |
|
"middle": [], |
|
"last": "Santhanam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samira", |
|
"middle": [], |
|
"last": "Shaikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.00500" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sashank Santhanam and Samira Shaikh. 2019. A sur- vey of natural language generation techniques with a focus on dialogue systems-past, present and future directions. arXiv preprint arXiv:1906.00500.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Eviza: A natural language interface for visual analysis", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Setlur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Battersby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Tory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Gossweiler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 29th Annual Symposium on User Interface Software and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Setlur, S. Battersby, Melanie Tory, R. Gossweiler, and Angel X. Chang. 2016. Eviza: A natural lan- guage interface for visual analysis. Proceedings of the 29th Annual Symposium on User Interface Soft- ware and Technology.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Sentifiers: Interpreting vague intent modifiers in visual analysis using word co-occurrence and sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Vidya", |
|
"middle": [], |
|
"last": "Setlur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arathi", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2009.12701" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vidya Setlur and Arathi Kumar. 2020. Sentifiers: In- terpreting vague intent modifiers in visual analysis using word co-occurrence and sentiment analysis. arXiv preprint arXiv:2009.12701.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "ChartDialogs: Plotting from Natural Language Instructions", |
|
"authors": [ |
|
{ |
|
"first": "Yutong", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ndapa", |
|
"middle": [], |
|
"last": "Nakashole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3559--3574", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.328" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yutong Shao and Ndapa Nakashole. 2020. ChartDi- alogs: Plotting from Natural Language Instructions. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3559-3574, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Image-chat: Engaging grounded conversations", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Humeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2414--2429", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.219" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt Shuster, Samuel Humeau, Antoine Bordes, and Ja- son Weston. 2020. Image-chat: Engaging grounded conversations. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, pages 2414-2429, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "How to ask what to say?: Strategies for evaluating natural language interfaces for data visualization", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Stasko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Keefe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Tory", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Computer Graphics and Applications", |
|
"volume": "40", |
|
"issue": "", |
|
"pages": "96--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Srinivasan, J. Stasko, Daniel F. Keefe, and Melanie Tory. 2020. How to ask what to say?: Strategies for evaluating natural language interfaces for data vi- sualization. IEEE Computer Graphics and Applica- tions, 40:96-103.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Plotthread: Creating expressive storyline visualizations using reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Tan", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renzhong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinke", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Knittel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Koch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Ertl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingyun", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peiran", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingcai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tan Tang, Renzhong Li, Xinke Wu, Shuhan Liu, Jo- hannes Knittel, Steffen Koch, Thomas Ertl, Lingyun Yu, Peiran Ren, and Yingcai Wu. 2020. Plotthread: Creating expressive storyline visualizations using re- inforcement learning. IEEE Transactions on Visual- ization and Computer Graphics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Interaction for visualization", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Tominski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Interaction for Visualization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Tominski. 2015. Interaction for visualization. In Interaction for Visualization.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Show and tell: A neural image caption generator", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Toshev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3156--3164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 3156-3164.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Vd-bert: A unified vision and dialog transformer with bert", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Shafiq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irwin", |
|
"middle": [], |
|
"last": "Lyu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "King", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hoi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Wang, Shafiq R. Joty, Michael R. Lyu, Irwin King, Caiming Xiong, and S. Hoi. 2020. Vd-bert: A uni- fied vision and dialog transformer with bert. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "A virtual reality memory palace variant aids knowledge retrieval from scholarly articles", |
|
"authors": [ |
|
{ |
|
"first": "Fumeng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Novotny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Badre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cullen", |
|
"middle": [], |
|
"last": "Jackson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Laidlaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fumeng Yang, Jing Qian, Johannes Novotny, David Badre, Cullen Jackson, and David Laidlaw. 2020. A virtual reality memory palace variant aids knowl- edge retrieval from scholarly articles. IEEE Trans- actions on Visualization and Computer Graphics.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Toward a deeper understanding of the role of interaction in information visualization", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Stasko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jacko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "1224--1231", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. S. Yi, Y. Kang, J. Stasko, and J. Jacko. 2007. Toward a deeper understanding of the role of interaction in information visualization. IEEE Transactions on Vi- sualization and Computer Graphics, 13:1224-1231.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "The hidden information state model: A practical framework for pomdp-based spoken dialogue management", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Ga\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Keizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran\u00e7ois", |
|
"middle": [], |
|
"last": "Mairesse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jost", |
|
"middle": [], |
|
"last": "Schatzmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blaise", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [ |
|
"Yu" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computer Speech & Language", |
|
"volume": "24", |
|
"issue": "2", |
|
"pages": "150--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve Young, Milica Ga\u0161i\u0107, Simon Keizer, Fran\u00e7ois Mairesse, Jost Schatzmann, Blaise Thomson, and Kai Yu. 2010. The hidden information state model: A practical framework for pomdp-based spoken dia- logue management. Computer Speech & Language, 24(2):150-174.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Pomdp-based statistical spoken dialog systems: A review", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Ga\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blaise", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the IEEE", |
|
"volume": "101", |
|
"issue": "5", |
|
"pages": "1160--1179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve Young, Milica Ga\u0161i\u0107, Blaise Thomson, and Ja- son D Williams. 2013. Pomdp-based statistical spo- ken dialog systems: A review. Proceedings of the IEEE, 101(5):1160-1179.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Flowsense: A natural language interface for visual data exploration within a dataflow system", |
|
"authors": [ |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Visualization and Computer Graphics", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bowen Yu and C. T. Silva. 2020. Flowsense: A natural language interface for visual data exploration within a dataflow system. IEEE Transactions on Visualiza- tion and Computer Graphics, 26:1-11.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Baby-Walk: Going farther in vision-and-language navigation by taking baby steps", |
|
"authors": [ |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hexiang", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiwei", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vihan", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Ie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2539--2556", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.229" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang Zhu, Hexiang Hu, Jiacheng Chen, Zhiwei Deng, Vihan Jain, Eugene Ie, and Fei Sha. 2020. Baby- Walk: Going farther in vision-and-language naviga- tion by taking baby steps. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 2539-2556, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Goal-oriented NLI as used inYu and Silva (2020), created from: https://visflow.org/demo/ 3 Existing Work 3.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "(2020); Huang et al. (2019); Yu and Silva (2020); Fu et al. (2020); Chowdhury et al. (2021); Setlur et al.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Visual exploration of a visual model of an aneurysm and an exemplary mixed-initiative dialogue", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |