{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T11:41:35.909950Z" }, "title": "", "authors": [ { "first": "Jacob", "middle": [], "last": "Andreas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Karthik", "middle": [], "last": "Narasimhan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aida", "middle": [], "last": "Nematzadeh", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [], "last": "Deepmind", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Catherine", "middle": [], "last": "Wong", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Robert", "middle": [ "D" ], "last": "Hawkins", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rakesh", "middle": [ "R" ], "last": "Menon", "suffix": "", "affiliation": {}, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [], "body_text": [ { "text": "To a growing extent, advances across machine learning application domains are driven by advances in NLP. In computer vision, image captions are used to shape learned representations of images [Frome et al., 2013 , Mu et al., 2020 , Radford et al., 2021 , Desai and Johnson 2021 . In programming languages, textual code comments are used to guide and constrain models for example-based program synthesis [Yaghmazadeh et al., 2017 , Austin et al., 2021 , Wong et al., 2021 . In robotics and more general policy learning settings, rules and instructions are used to enable generalization to new environments and goals [Zhong et al., 2020 , Narasimhan et al., 2018 , Sharma et al., 2020 . Within NLP, rich natural-language annotations and task descriptions are used to improve the performance and interpretability of models for text categorization and question answering [Hancock et al., 2018 , Weller et al., 2020 , Efrat et al., 2020 . And in cognitive science, experimental evidence suggests that language shapes many other aspects of human cognition (e.g. Jones et al., 1991) . At present, however, most research on learning from language takes place within individual application domains (and mostly outside of the NLP community). While many approaches to language supervision are domain-general, and closely connected to \"core\" NLP research, there are currently no venues where researchers from across the field can meet to share ideas and draw connections between their disparate lines of research. Our workshop will offer a central meeting point for research on language-based supervision, enabling researchers within and beyond NLP to discuss how language processing models and algorithms can be brought to bear on problems beyond the textual realm (e.g. visual recognition, robotics, program synthesis, sequential decision making). Existing workshops like RoboNLP, SPLU, and ViGiL focus on models for multi-modality; inspired by the relationship between language and human cognitive development, our workshop will emphasize broader use of language not just as an input modality but a fundamental source of information about the structure of tasks and problem domains. In keeping with this interdisciplinary focus, our workshop format differs in two ways from a standard NLP workshop: first, with a special emphasis on speakers and attendees who would not typically attend NLP conferences; second, by replacing the standard panel discussion with a series of workshop-wide breakout sessions aimed at seeding cross-institutional collaborations around new tasks, datasets, and models.", "cite_spans": [ { "start": 192, "end": 211, "text": "[Frome et al., 2013", "ref_id": null }, { "start": 212, "end": 229, "text": ", Mu et al., 2020", "ref_id": null }, { "start": 230, "end": 252, "text": ", Radford et al., 2021", "ref_id": null }, { "start": 253, "end": 277, "text": ", Desai and Johnson 2021", "ref_id": null }, { "start": 403, "end": 428, "text": "[Yaghmazadeh et al., 2017", "ref_id": null }, { "start": 429, "end": 450, "text": ", Austin et al., 2021", "ref_id": null }, { "start": 451, "end": 470, "text": ", Wong et al., 2021", "ref_id": null }, { "start": 615, "end": 634, "text": "[Zhong et al., 2020", "ref_id": null }, { "start": 635, "end": 660, "text": ", Narasimhan et al., 2018", "ref_id": null }, { "start": 661, "end": 682, "text": ", Sharma et al., 2020", "ref_id": null }, { "start": 867, "end": 888, "text": "[Hancock et al., 2018", "ref_id": null }, { "start": 889, "end": 910, "text": ", Weller et al., 2020", "ref_id": null }, { "start": 911, "end": 931, "text": ", Efrat et al., 2020", "ref_id": null }, { "start": 1056, "end": 1075, "text": "Jones et al., 1991)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": null } ], "back_matter": [], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Finding Sub-task Structure with Natural Language Instruction Ryokan Ri, Yufang Hou, Radu Marinescu and", "authors": [ { "first": ".", "middle": [ ". ." ], "last": "Akihiro Kishimoto", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Finding Sub-task Structure with Natural Language Instruction Ryokan Ri, Yufang Hou, Radu Marinescu and Akihiro Kishimoto . . . . . . . . . . . . . . . . . . . . . . . . . . . 1", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "GrammarSHAP: An Efficient Model-Agnostic and Structure-Aware NLP Explainer Edoardo Mosca", "authors": [ { "first": "Defne", "middle": [], "last": "Demirt\u00fcrk", "suffix": "" }, { "first": "Luca", "middle": [], "last": "M\u00fclln", "suffix": "" }, { "first": "Fabio", "middle": [], "last": "Raffagnato", "suffix": "" }, { "first": "Georg", "middle": [ ". . . ." ], "last": "Groh", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "GrammarSHAP: An Efficient Model-Agnostic and Structure-Aware NLP Explainer Edoardo Mosca, Defne Demirt\u00fcrk, Luca M\u00fclln, Fabio Raffagnato and Georg Groh . . . . . . . . . . 10", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Single-Turn Debate Does Not Help Humans Answer Hard Reading-Comprehension Questions Alicia", "authors": [ { "first": "Harsh", "middle": [], "last": "Parrish", "suffix": "" }, { "first": "Ethan", "middle": [], "last": "Trivedi", "suffix": "" }, { "first": "Angelica", "middle": [], "last": "Perez", "suffix": "" }, { "first": "Nikita", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Jason", "middle": [], "last": "Nangia", "suffix": "" }, { "first": "", "middle": [], "last": "Phang", "suffix": "" }, { "first": "R", "middle": [], "last": "Samuel", "suffix": "" }, { "first": ".", "middle": [ "." ], "last": "Bowman", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Single-Turn Debate Does Not Help Humans Answer Hard Reading-Comprehension Questions Alicia Parrish, Harsh Trivedi, Ethan Perez, Angelica Chen, Nikita Nangia, Jason Phang and Samuel R. Bowman . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 17", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "29 A survey on improving NLP models with human explanations Mareike Hartmann and Daniel Sonntag", "authors": [ { "first": "Mohit", "middle": [], "last": "Bansal", "suffix": "" }, { "first": ".", "middle": [ "." ], "last": "", "suffix": "" } ], "year": null, "venue": "When Can Models Learn From Explanations? A Formal Framework for Understanding the Roles of Explanation Data Peter Hase and", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "When Can Models Learn From Explanations? A Formal Framework for Understanding the Roles of Explanation Data Peter Hase and Mohit Bansal . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 29 A survey on improving NLP models with human explanations Mareike Hartmann and Daniel Sonntag . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 40", "links": null } }, "ref_entries": {} } }