{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T03:10:33.748541Z" }, "title": "", "authors": [], "year": "", "venue": null, "identifiers": {}, "abstract": "", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [], "body_text": [ { "text": "Welcome to UnImplicit: The Second Workshop on Understanding Implicit and Underspecified Language. The focus of this workshop is on implicit and underspecified phenomena in language, which pose serious challenges to standard natural language processing models as they often require incorporating greater context, using symbolic inference and common-sense reasoning, or more generally, going beyond strictly lexical and compositional meaning constructs. This challenge spans all phases of the NLP model's life cycle: from collecting and annotating relevant data, through devising computational methods for modeling such phenomena, to evaluating and designing proper evaluation metrics. In this workshop, our goal is to bring together theoreticians and practitioners from the entire NLP cycle, from annotation and benchmarking to modeling and applications, and to provide an umbrella for the development, discussion and standardization of the study of understanding implicit and underspecified language. In total, we received 11 submissions (6 of which non-archival), out of which 10 were accepted and 1 was withdrawn. All accepted submissions are presented as posters and two works are additionally presented in an oral presentation. The workshop also includes three invited talks on topics related to implicit language. The program committee consisted of 22 researchers, who we'd like to thank for providing helpful and constructive reviews on the papers. We'd also like to thank all authors for their submissions and interest in our workshop. ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": null } ], "back_matter": [], "bib_entries": { "BIBREF1": { "ref_id": "b1", "title": "Pre-trained Language Models' Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context Yan Cong Searching for PETs: Using Distributional and Sentiment-Based Methods to Find Potentially Euphemistic Terms Patrick Lee, Martha Gavidia, Anna Feldman and Jing", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Pre-trained Language Models' Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context Yan Cong Searching for PETs: Using Distributional and Sentiment-Based Methods to Find Potentially Euphemistic Terms Patrick Lee, Martha Gavidia, Anna Feldman and Jing Peng 11:00 -12:00 Poster 12:00 -13:30 Lunch 13:30 -14:15 Session B 14:15 -15:00 Invited Talk 2 15:00 -15:30 Break 15:30 -16:15 Invited Talk 3 16:15 -16:45 Session C 16:45 -17:00 Closing Remarks viii", "links": null } }, "ref_entries": { "TABREF1": { "html": null, "type_str": "table", "content": "