|
{ |
|
"paper_id": "N09-1030", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:42:42.120149Z" |
|
}, |
|
"title": "May All Your Wishes Come True: A Study of Wishes and How to Recognize Them", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goldberg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "goldberg@cs.wisc.edu" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Fillmore", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "nathanae@cs.wisc.edu" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Andrzejewski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhiting", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "zhiting@cs.wisc.edu" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Gibson", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "bgibson@cs.wisc.edu" |
|
}, |
|
{ |
|
"first": "Xiaojin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Wisconsin-Madison", |
|
"location": { |
|
"postCode": "53706", |
|
"settlement": "Madison", |
|
"region": "WI", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "A wish is \"a desire or hope for something to happen.\" In December 2007, people from around the world offered up their wishes to be printed on confetti and dropped from the sky during the famous New Year's Eve \"ball drop\" in New York City's Times Square. We present an in-depth analysis of this collection of wishes. We then leverage this unique resource to conduct the first study on building general \"wish detectors\" for natural language text. Wish detection complements traditional sentiment analysis and is valuable for collecting business intelligence and insights into the world's wants and desires. We demonstrate the wish detectors' effectiveness on domains as diverse as consumer product reviews and online political discussions.", |
|
"pdf_parse": { |
|
"paper_id": "N09-1030", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "A wish is \"a desire or hope for something to happen.\" In December 2007, people from around the world offered up their wishes to be printed on confetti and dropped from the sky during the famous New Year's Eve \"ball drop\" in New York City's Times Square. We present an in-depth analysis of this collection of wishes. We then leverage this unique resource to conduct the first study on building general \"wish detectors\" for natural language text. Wish detection complements traditional sentiment analysis and is valuable for collecting business intelligence and insights into the world's wants and desires. We demonstrate the wish detectors' effectiveness on domains as diverse as consumer product reviews and online political discussions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Each year, New York City rings in the New Year with the famous \"ball drop\" in Times Square. In December 2007, the Times Square Alliance, coproducer of the Times Square New Year's Eve Celebration, launched a Web site called the Virtual Wishing Wall 1 that allowed people around the world to submit their New Year's wishes. These wishes were then printed on confetti and dropped from the sky at midnight on December 31, 2007 in sync with the ball drop.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We obtained access to this set of nearly 100,000 New Year's wishes, which we call the \"WISH corpus.\" Table 1 shows a selected sample of the WISH 1 http://www.timessquarenyc.org/nye/nye interactive.html corpus. Some are far-reaching fantasies and aspirations, while others deal with everyday concerns like economic and medical distress. We analyze this first-of-its-kind corpus in Section 2.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 108, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The New Oxford American Dictionary defines \"wish\" as \"a desire or hope for something to happen.\" How wishes are expressed, and how such wishful expressions can be automatically recognized, are open questions in natural language processing. Leveraging the WISH corpus, we conduct the first study on building general \"wish detectors\" for natural language text, and demonstrate their effectiveness on domains as diverse as consumer product reviews and online political discussions. Such wish detectors have tremendous value in collecting business intelligence and public opinions. We discuss the wish detectors in Section 3, and experimental results in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Studying wishes is valuable in at least two aspects:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relation to Prior Work", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "1. Being a special genre of subjective expression, wishes add a novel dimension to sentiment analysis. Sentiment analysis is often used as an automatic market research tool to collect valuable business intelligence from online text (Pang and Lee, 2008; Shanahan et al., 2005; Koppel and Shtrimberg, 2004; Mullen and Malouf, 2008) . Wishes differ from the recent focus of sentiment analysis, namely opinion mining, by revealing what people explicitly want to happen, not just what they like or dislike (Ding et al., 2008; Hu and Liu, 2004) . For example, wishes in product reviews could contain new feature requests. Consider the following (real) prod-peace on earth peace world peace happy new year love health and happiness to be happy i wish for world peace i wish for health and happiness for my family let there be peace on earth i wish u to call me if you read this 555-1234 to find my true love i wish for a puppy for the war in iraq to end peace on earth please a free democratic venezuela may the best of 2007 be the worst of 2008 to be financially stable a little goodness for everyone would be nice i hope i get accepted into a college that i like i wish to get more sex in 2008 please let name be healthy and live all year to be emotionally stable and happy to take over the world Table 1 : Example wishes and their frequencies in the WISH corpus. uct review excerpt: \"Great camera. Indoor shots with a flash are not quite as good as 35mm. I wish the camera had a higher optical zoom so that I could take even better wildlife photos.\" The first sentence contains positive opinion, the second negative opinion. However, wishful statements like the third sentence are often annotated as non-opinion-bearing in sentiment analysis corpora (Hu and Liu, 2004; Ding et al., 2008) , even though they clearly contain important information. An automatic \"wish detector\" text-processing tool can be useful for product manufacturers, advertisers, politicians, and others looking to discover what people want.", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 252, |
|
"text": "(Pang and Lee, 2008;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 275, |
|
"text": "Shanahan et al., 2005;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 304, |
|
"text": "Koppel and Shtrimberg, 2004;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 329, |
|
"text": "Mullen and Malouf, 2008)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 520, |
|
"text": "(Ding et al., 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 538, |
|
"text": "Hu and Liu, 2004)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1746, |
|
"end": 1764, |
|
"text": "(Hu and Liu, 2004;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1765, |
|
"end": 1783, |
|
"text": "Ding et al., 2008)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1292, |
|
"end": 1299, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Relation to Prior Work", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "2. Wishes can tell us a lot about people: their innermost feelings, perceptions of what they're lacking, and what they desire (Speer, 1939) . Many psychology researchers have attempted to quantify the contents of wishes and how they vary with factors such as location, gender, age, and personality type (Speer, 1939; Milgram and Riedel, 1969; Ehrlichman and Eichenstein, 1992; King and Broyles, 1997) . These studies have been small scale with only dozens or hundreds of participants. The WISH corpus provides the first large-scale collection of wishes as a window into the world's desires.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 139, |
|
"text": "(Speer, 1939)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 316, |
|
"text": "(Speer, 1939;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 342, |
|
"text": "Milgram and Riedel, 1969;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 376, |
|
"text": "Ehrlichman and Eichenstein, 1992;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 400, |
|
"text": "King and Broyles, 1997)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relation to Prior Work", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Beyond sentiment analysis, classifying sentences as wishes is an instance of non-topical classification. Tasks under this heading include computational humor (Mihalcea and Strapparava, 2005) , genre classification (Boese and Howe, 2005), authorship attribution (Argamon and Shimoni, 2003) , and metaphor detection (Krishnakumaran and Zhu, 2007) , among others (Mishne et al., 2007; Mihalcea and Liu, 2006) . We share the common goal of classifying text into a unique set of target categories (in our case, wishful and non-wishful), but use different techniques catered to our specific task. Our feature-generation technique for wish detection resembles template-based methods for information extraction (Brin, 1999; Agichtein and Gravano, 2000) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 190, |
|
"text": "(Mihalcea and Strapparava, 2005)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 288, |
|
"text": "(Argamon and Shimoni, 2003)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 344, |
|
"text": "(Krishnakumaran and Zhu, 2007)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 381, |
|
"text": "(Mishne et al., 2007;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 405, |
|
"text": "Mihalcea and Liu, 2006)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 715, |
|
"text": "(Brin, 1999;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 716, |
|
"end": 744, |
|
"text": "Agichtein and Gravano, 2000)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relation to Prior Work", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "We analyze the WISH corpus with a variety of statistical methods. Our analyses not only reveal what people wished for on New Year's Eve, but also provide insight for the development of wish detectors in Section 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analyzing the WISH Corpus", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The complete WISH corpus contains nearly 100,000 wishes collected over a period of 10 days in December 2007, most written in English, with the remainder in Portuguese, Spanish, Chinese, French, and other languages. For this paper, we consider only the 89,574 English wishes. Most of these English wishes contain optional geographic meta data provided by the wisher, indicating a variety of countries (not limited to English-speaking) around the world. We perform minimal preprocessing, including TreeBank-style tokenization, downcasing, and punctuation removal. Each wish is treated as a single entity, regardless of whether it contains multiple sentences. After preprocessing, the average length of a wish is 8 tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analyzing the WISH Corpus", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As a first step in understanding the content of the wishes, we asked five annotators to manually annotate a random subsample of 5,000 wishes. Sections 2.1 and 2.2 report results on this subsample.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Topic and Scope of Wishes", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The wishes were annotated in terms of two at- tributes: topic and scope. We used 11 pre-defined topic categories, and their distribution in this subsample of the WISH corpus is shown in Figure 1 (a). The most frequent topic is love, while health, happiness, and peace are also common themes. Many wishes also fell into an other category, including specific individual requests (\"i wish for a new puppy\"), solicitations or advertisements (\"call me 555-1234\", \"visit website.com\"), or sinister thoughts (\"to take over the world\"). The 5,000 wishes were also manually assigned a scope. The scope of a wish refers to the range of people that are targeted by the wish. We used 6 pre-defined scope categories: self (\"I want to be happy\"), family (\"For a cure for my husband\"), specific person by name (\"Prayers for name\"), country (\"Bring our troops home!\"), world (\"Peace to everyone in the world\"), and other. In cases where mul-tiple scope labels applied, the broadest scope was selected. Figure 1 (b) shows the scope distribution. It is bimodal: over one third of the wishes are narrowly directed at one's self, while broad wishes at the world level are also frequent. The in-between scopes are less frequent.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 194, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 986, |
|
"end": 994, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Topic and Scope of Wishes", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, wishers had the option to enter a city/country when submitting wishes. Of the manually annotated wishes, about 4,000 included valid location information, covering all 50 states in the U.S., and all continents except Antarctica.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wishes Differ by Geographic Location", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We noticed a statistically significant difference between wishes submitted from the United States (about 3600) versus non-U.S. (about 400), both in terms of their topic and scope distributions. For each comparison, we performed a Pearson \u03c7 2 -test using location as the explanatory variable and either topic or scope as the response variable. 2 The null hypothesis is that the variables are independent. For both tests we reject the null hypothesis, with p < 0.001 for topic, and p = 0.006 for scope. This indicates a dependence between location and topic/scope. Asterisks in Figure 2 denote the labels that differ significantly between U.S. and non-U.S. wishes. 3 In particular, we observed that there are significantly more wishes about love, peace, and travel from non-U.S. locales, and more about religion from the U.S. There are significantly more world-scoped wishes from non-U.S. locales, and more countryand family-scoped wishes from the U.S.", |
|
"cite_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 344, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 664, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 576, |
|
"end": 584, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Wishes Differ by Geographic Location", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We also compared wishes from \"red states\" versus \"blue states\" (U.S. states that voted a majority for the Republican and Democratic presidential candidates in 2008, respectively), but found no significant differences. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wishes Differ by Geographic Location", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We now move beyond the annotated subsample and examine the full set of 89,574 English wishes. We noticed that a small fraction (4%) of unique wishes account for a relatively large portion (16%) of wish occurrences, while there are also many wishes that only occur once. The question naturally arises: do wishes obey Zipf's Law (Zipf, 1932; Manning and Sch\u00fctze, 1999) ? If so, we should expect the frequency of a unique wish to be inversely proportional to its rank, when sorted by frequency. Figure 3 plots rank versus frequency on a log-log scale and reveals an approximately linear negative slope, thus suggesting that wishes do follow Zipf's law. It also shows that low-occurrence wishes dominate, hence learning might be hindered by data sparseness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 327, |
|
"end": 339, |
|
"text": "(Zipf, 1932;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 366, |
|
"text": "Manning and Sch\u00fctze, 1999)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 492, |
|
"end": 500, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Wishes Follow Zipf's Law", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The 11 topics in Section 2.1 were manually predefined based on domain knowledge. In contrast, in this section we applied Latent Dirichlet Allocation (LDA) (Blei et al., 2003) to identify the latent topics in the full set of 89,574 English wishes in an unsupervised fashion. The goal is to validate and complement the study in Section 2.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 174, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Topic Modeling for Wishes", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To apply LDA to the wishes, we treated each individual wish as a short document. We used topics, Collapsed Gibbs Sampling (Griffiths and Steyvers, 2004) for inference, hyperparameters \u03b1 = 0.5 and \u03b2 = 0.1, and ran Markov Chain Monte Carlo for 2000 iterations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 152, |
|
"text": "(Griffiths and Steyvers, 2004)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Topic Modeling for Wishes", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The resulting 12 LDA topics are shown in Table 2, in the form of the highest probability words p(word|topic) in each topic. We manually added summary descriptors for readability. With LDA, it is also possible to observe which words were assigned to which topics in each wish. For example, LDA assigned most words in the wish \"world(8) peace(8) and my friends(4) in iraq(1) to come(1) home(1)\" to two topics: peace and troops (topic numbers in parentheses). Interestingly, these LDA topics largely agree with the pre-defined topics in Section 2.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Topic Modeling for Wishes", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We now study the novel NLP task of wish detection, i.e., classifying individual sentences as being wishes or not. Importantly, we want our approach to transfer to domains other than New Year's wishes, including consumer product reviews and online political discussions. It should be pointed out that wishes are highly domain dependent. For example, \"I wish for world peace\" is a common wish on New Year's Eve, but is exceedingly rare in product reviews; and vice versa: \"I want to have instant access to the volume\" may occur in product reviews, but is an un- likely New Year's wish. For this initial study, we do assume that there are some labeled training data in the target domains of interest.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Wish Detectors", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To transfer the knowledge learned from the outof-domain WISH corpus to other domains, our key insight is the following: while the content of wishes (e.g., \"world peace\") may not transfer across domains, the ways wishes are expressed (e.g., \"I wish for \") may. We call these expressions wish templates. Our novel contribution is an unsupervised method for discovering candidate templates from the WISH corpus which, when applied to other target domains, improve wish detection in those domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Wish Detectors", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Before describing our template discovery method, we first describe two simple wish detectors, which serve as baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Simple Wish Detectors", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two Simple Wish Detectors", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "[Manual]: It may seem easy to locate wishes. Perhaps looking for sentences containing the phrases \"i wish,\" \"i hope,\" or some other simple patterns is sufficient for identifying the vast majority of wishes in a domain. To test this hypothesis, we asked two native English speakers (not the annotators, nor affiliated with the project; no exposure to any of the wish datasets) to come up with text patterns that might be used to express wishes. They were shown three dictionary definitions of \"to wish (v)\" and \"wish (n)\". They produced a ranked list of 13 templates; see Table 3 . The underscore matches any string. These templates can be turned into a simple rule-based classifier: If part of a sentence matches one of the templates, the sentence is i wish i hope i want hopefully if only would be better if would like if should would that can't believe didn't don't believe didn't do want i can has classified as a wish. By varying the depth of the list, one can produce different precision/recall behaviors. Overall, we expect [Manual] to have relatively high precision but low recall.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 571, |
|
"end": 578, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Two Simple Wish Detectors", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Another simple method for detecting wishes is to train a standard word-based text classifier using the labeled training set in the target domain. Specifically, we represent each sentence as a binary word-indicator vector, normalized to sum to 1. We then train a linear Support Vector Machine (SVM). This method may have higher recall, but precision may suffer. For instance, the sentence \"Her wish was carried out by her husband\" is not a wish, but could be misclassified as one because of the word \"wish.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[Words]:", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Note that neither of the two baseline methods uses the WISH corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[Words]:", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "We now present our method to automatically discover high quality wish templates using the WISH corpus. The key idea is to exploit redundancy in how the same wish content is expressed. For example, as we see in Table 1 , both \"world peace\" and \"i wish for world peace\" are common wishes. Similarly, both \"health and happiness\" and \"i wish for health and happiness\" appear in the WISH corpus. It is thus reasonable to speculate that \"i wish for \" is a good wish template. Less obvious templates can be discovered in this way, too, such as \"let there be \" from \"peace on earth\" and \"let there be peace on earth.\"", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 217, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We formalize this intuition as a bipartite graph, illustrated in Figure 4 . Let W = {w 1 , . . . , w n } be the set of unique wishes in the WISH corpus. The bipartite graph has two types of nodes: content nodes C and template nodes T , and they are generated as follows. If a wish w j (e.g., \"i wish for world peace\") contains another wish w i (e.g., \"world peace\"), we create a content node c 1 = w i and a template node t 1 =\"i wish for \". We denote this relationship by w j = c 1 + t 1 . Note the order of c 1 and t 1 is insignificant, as how the two combine is determined by the underscore in t 1 , and w j = t 1 + c 1 is just fine. In addition, we place a directed edge from c 1 to t 1 with edge weight count(w j ), the frequency of wish w j in the WISH corpus. Then, a template node appears to be a good one if many heavy edges point to it.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 73, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "On the other hand, a template is less desirable if it is part of a content node. For example, when w j =\"health and happiness\" and w i =\"health\", we create the template t 2 =\" and happiness\" and the content node c 3 = w i . If there is another wish w k =\"i wish for health and happiness\", then there will be a content node c 2 = w j . The template t 2 thus contains some content words (since it matches c 2 ), and may not generalize well in a new domain. We capture this by backward edges: if \u2203c \u2208 C, and \u2203 string s (s not necessarily in C or W ) such that c = s + t, we add a backward edge from t to c with edge weight count(c ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Based on such considerations, we devised the following scheme for scoring templates: where in(t) is the in-degree of node t, defined as the sum of edge weights coming into t; out(t) is the outdegree of node t, defined similarly. In other words, a template receives a high score if it is \"used\" by many frequent wishes but does not match many frequent content-only wishes. To create the final set of template features, we apply the threshold score(t) \u2265 5. This produces a final list of 811 templates. Table 4 lists some of the top templates ranked by score(t).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 500, |
|
"end": 507, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score(t) = in(t) \u2212 out(t),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "While some of these templates still contain time-or scope-related words (\"for my family\"), they are devoid of specific topical content. Notice that we have automatically identified several of the manually derived templates in Table 3 , and introduce many new variations that a learning algorithm can leverage. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 233, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatically Discovering Wish Templates", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "After discovering wish templates as described above, we use them as features for learning in a new domain (e.g., product reviews). For each sentence in the new domain, we assign binary features indicating which templates match the sentence. Two types of matching are possible. Strict matching requires that the template must match an entire sentence from beginning to end, with at least one word filling in for the underscore. (All matching during the template generation process was strict.) Non-strict matching requires only that template match somewhere within a sentence. Rather than choose one type of matching, we create both strict and non-strict template features (1622 binary features total) and let the machine learning algorithm decide what is most useful.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning with Wish Template Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Our third wish detector, [Templates], is a linear SVM with the 1622 binary wish template features. Our fourth wish detector, [Words + Templates], is a linear SVM with both template and word features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning with Wish Template Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We experimented with two domains, manually labeled at the sentence-level as wishes or non-wishes. 4 Example wishes are listed in Table 6 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 99, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Target Domains and Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Products. Consumer product reviews: 1,235 sentences selected from a collection of amazon.com and cnet.com reviews (Hu and Liu, 2004; Ding et al., 2008) . 12% of the sentences are labeled as wishes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 132, |
|
"text": "(Hu and Liu, 2004;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 151, |
|
"text": "Ding et al., 2008)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Domains and Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Politics. Political discussion board postings: 6,379 sentences selected from politics.com (Mullen and Malouf, 2008) . 34% are labeled as wishes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 115, |
|
"text": "(Mullen and Malouf, 2008)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Domains and Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We automatically split the corpora into sentences using MxTerminator (Reynar and Ratnaparkhi, 1997) . As preprocessing before learning, we tokenized the text in the Penn TreeBank style, down- cased, and removed all punctuation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 99, |
|
"text": "(Reynar and Ratnaparkhi, 1997)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Domains and Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For all four wish detectors, we performed 10-fold cross validation. We used the default parameter in SVM light for all trials (Joachims, 1999) . As the data sets are skewed, we compare the detectors using precision-recall curves and the area under the curve (AUC). For the manual baseline, we produce the curve by varying the number of templates applied (in rank order), which gradually predicts more sentences as wishes (increasing recall at the expense of precision). A final point is added at recall 1.0, corresponding to applying an empty template that matches all sentences. For the SVM-based methods, we vary the threshold applied to the real-valued margin prediction to produce the curves. All curves are interpolated, and AUC measures are computed, using the techniques of (Davis and Goadrich, 2006) . Figure 5 shows the precision-recall curves for the Politics corpus. All curves are averages over 10 folds (i.e., for each of 100 evenly spaced, interpolated recall points, the 10 precision values are averaged). As expected, [Manual] can be very precise with low recall-only the very top few templates achieve high precision and pick out a small number of wishes with \"i wish\" and \"i hope.\" As we introduce more templates to cover more true wishes, precision drops off quickly. with slightly better precision in low recall regions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 142, |
|
"text": "(Joachims, 1999)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 781, |
|
"end": 807, |
|
"text": "(Davis and Goadrich, 2006)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 810, |
|
"end": 818, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Target Domains and Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "[Words] is the opposite: bad in high recall but good in low recall regions. [Words + Templates] is the best, taking the best from both kinds of features to dominate other curves. Table 5 shows the average AUC across 10 folds. [Words + Templates] is significantly better than all other detectors under paired t-tests (p = 1 \u00d7 10 \u22127 vs. [Manual] , p = 0.01 vs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 343, |
|
"text": "[Manual]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 186, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "[Words], and p = 4 \u00d7 10 \u22127 vs. [Templates] ). All other differences are statistically significant, too. Figure 6 shows the precision-recall curves for the Products corpus. Again, [Words + Templates] mostly dominates other detectors. In terms of average AUC across folds (Table 5) , [Words + Templates] is also the best. However, due to the small size of this corpus, the AUC values have high variance, and the difference between [Words + Templates] and [Words] is not statistically significant under a paired t-test (p = 0.16).", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 42, |
|
"text": "[Templates]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 112, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF8" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 279, |
|
"text": "(Table 5)", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Finally, to understand what is being learned in more detail, we take a closer look at the SVM models' weights for one fold of the Products corpus ( Table 7) . The most positive and negative features make intuitive sense. Note that [Words + Templates] seems to rely on templates for selecting wishes and words for excluding non-wishes. This partially explains the synergy of combining the feature types. Table 7 : Features with the largest magnitude weights in the SVM models for one fold of the Products corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 156, |
|
"text": "Table 7)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 410, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We have presented a novel study of wishes from an NLP perspective. Using the first-of-its-kind WISH corpus, we generated domain-independent wish templates that improve wish detection performance across product reviews and political discussion posts. Much work remains in this new research area, including the creation of more types of features. Also, due to the difficulty in obtaining wishannotated training data, we plan to explore semisupervised learning for wish detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The topic test examined a 2 \u00d7 11 contingency table, while the scope test used a 2 \u00d7 6 contingency table. In both tests, all of the cells in the tables had an expected frequency of at least 5, so the \u03c7 2 approximation is valid.3 To identify the labels that differ significantly by location, we computed the standardized residuals for the cells in the two contingency tables. Standardized residuals are approximately N (0, 1)-distributed and can be used to locate the major contributors to a significant \u03c7 2 -test statistic(Agresti, 2002). The asterisks inFigure 2indicate the surprisingly large residuals, i.e., the difference between observed and expected frequencies is outside a 95% confidence interval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These wish-annotated corpora are available for download at http://pages.cs.wisc.edu/ \u223c goldberg/wish data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the Times Square Alliance for providing the WISH corpus, and the Wisconsin Alumni Research Foundation. AG is supported in part by a Yahoo! Key Technical Challenges Grant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Snowball: Extracting relations from large plain-text collections", |
|
"authors": [ |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Agichtein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Gravano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 5th ACM International Conference on Digital Libraries", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eugene Agichtein and Luis Gravano. 2000. Snowball: Extracting relations from large plain-text collections. In In Proceedings of the 5th ACM International Con- ference on Digital Libraries, pages 85-94.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Categorical Data Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Agresti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Agresti. 2002. Categorical Data Analysis. Wiley- Interscience, second edition.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatically categorizing written texts by author gender. Literary and Linguistic Computing", |
|
"authors": [ |
|
{ |
|
"first": "Shlomo", |
|
"middle": [], |
|
"last": "Argamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anat Rachel", |
|
"middle": [], |
|
"last": "Shimoni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "401--412", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shlomo Argamon and Anat Rachel Shimoni. 2003. Au- tomatically categorizing written texts by author gen- der. Literary and Linguistic Computing, 17:401-412.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent dirichlet allocation. Journal of Machine Learning Research, 3:993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Genre classification of web documents", |
|
"authors": [ |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Sugar Boese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adele", |
|
"middle": [], |
|
"last": "Howe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 20th National Conference on Artificial Intelligence (AAAI-05)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elizabeth Sugar Boese and Adele Howe. 2005. Genre classification of web documents. In Proceedings of the 20th National Conference on Artificial Intelligence (AAAI-05), Poster paper.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Extracting patterns and relations from the world wide web", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sergey Brin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "WebDB '98: Selected papers from the International Workshop on The World Wide Web and Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--183", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergey Brin. 1999. Extracting patterns and relations from the world wide web. In WebDB '98: Selected papers from the International Workshop on The World Wide Web and Databases, pages 172-183. Springer- Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The relationship between precision-recall and roc curves", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Goadrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "ICML '06: Proceedings of the 23rd international conference on Machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Davis and Mark Goadrich. 2006. The relationship between precision-recall and roc curves. In ICML '06: Proceedings of the 23rd international conference on Machine learning, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A holistic lexicon-based approach to opinion mining", |
|
"authors": [ |
|
{ |
|
"first": "Xiaowen", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "WSDM '08: Proceedings of the international conference on Web search and web data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "231--240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaowen Ding, Bing Liu, and Philip S. Yu. 2008. A holistic lexicon-based approach to opinion mining. In WSDM '08: Proceedings of the international confer- ence on Web search and web data mining, pages 231- 240. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Private wishes: Gender similarities and difference", |
|
"authors": [ |
|
{ |
|
"first": "Howard", |
|
"middle": [], |
|
"last": "Ehrlichman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rosalind", |
|
"middle": [], |
|
"last": "Eichenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Sex Roles", |
|
"volume": "26", |
|
"issue": "9", |
|
"pages": "399--422", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Howard Ehrlichman and Rosalind Eichenstein. 1992. Private wishes: Gender similarities and difference. Sex Roles, 26(9):399-422.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Finding scientific topics", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steyvers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "101", |
|
"issue": "", |
|
"pages": "5228--5235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Griffiths and Mark Steyvers. 2004. Finding sci- entific topics. Proceedings of the National Academy of Sciences, 101(suppl. 1):5228-5235.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mining and summarizing customer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Minqing", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of KDD '04, the ACM SIGKDD international conference on Knowledge discovery and data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "168--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews. In Proceedings of KDD '04, the ACM SIGKDD international conference on Knowl- edge discovery and data mining, pages 168-177. ACM Press.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Making large-scale svm learning practical", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Advances in Kernel Methods -Support Vector Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 1999. Making large-scale svm learning practical. In B. Sch\u00f6lkopf, C. Burges, and A. Smola, editors, Advances in Kernel Methods -Sup- port Vector Learning. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Wishes, gender, personality, and well-being", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "King", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheri", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Broyles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Journal of Personality", |
|
"volume": "65", |
|
"issue": "1", |
|
"pages": "49--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura A. King and Sheri J. Broyles. 1997. Wishes, gen- der, personality, and well-being. Journal of Personal- ity, 65(1):49-76.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Good news or bad news? let the market decide", |
|
"authors": [ |
|
{ |
|
"first": "Moshe", |
|
"middle": [], |
|
"last": "Koppel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Itai", |
|
"middle": [], |
|
"last": "Shtrimberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "AAAI Spring Symposium on Exploring Attitude and Affect in Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "86--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moshe Koppel and Itai Shtrimberg. 2004. Good news or bad news? let the market decide. In AAAI Spring Symposium on Exploring Attitude and Affect in Text, pages 86-88.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Hunting elusive metaphors using lexical resources", |
|
"authors": [ |
|
{ |
|
"first": "Saisuresh", |
|
"middle": [], |
|
"last": "Krishnakumaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Workshop on Computational Approaches to Figurative Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saisuresh Krishnakumaran and Xiaojin Zhu. 2007. Hunting elusive metaphors using lexical resources. In Proceedings of the Workshop on Computational Approaches to Figurative Language, pages 13-20, Rochester, New York, April. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Foundations of Statistical Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning and Hinrich Sch\u00fctze. 1999. Foundations of Statistical Natural Language Process- ing. The MIT Press, Cambridge, Massachusetts.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A corpus-based approach to finding happiness", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of AAAI-CAAW-06, the Spring Symposia on Computational Approaches to Analyzing Weblogs", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Hugo Liu. 2006. A corpus-based ap- proach to finding happiness. In Proceedings of AAAI- CAAW-06, the Spring Symposia on Computational Ap- proaches to Analyzing Weblogs.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Making computers laugh: Investigations in automatic humor recognition", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recognition. In Empirical Methods in Natural Lan- guage Processing.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Developmental and experiential factors in making wishes", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Norman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Milgram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "Child Development", |
|
"volume": "40", |
|
"issue": "3", |
|
"pages": "763--771", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Norman A. Milgram and Wolfgang W. Riedel. 1969. Developmental and experiential factors in making wishes. Child Development, 40(3):763-771.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Moodviews: Tracking and searching mood-annotated blog posts", |
|
"authors": [ |
|
{ |
|
"first": "Gilad", |
|
"middle": [], |
|
"last": "Mishne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krisztian", |
|
"middle": [], |
|
"last": "Balog", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings International Conf. on Weblogs and Social Media (ICWSM-2007)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "323--324", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gilad Mishne, Krisztian Balog, Maarten de Rijke, and Breyten Ernsting. 2007. Moodviews: Tracking and searching mood-annotated blog posts. In Proceed- ings International Conf. on Weblogs and Social Media (ICWSM-2007), pages 323-324.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Taking sides: User classification for informal online political discourse", |
|
"authors": [ |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Mullen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Malouf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Internet Research", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "177--190", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tony Mullen and Robert Malouf. 2008. Taking sides: User classification for informal online political dis- course. Internet Research, 18:177-190.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Opinion mining and sentiment analysis. Foundations and Trends in Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Foundations and Trends in Infor- mation Retrieval, 2(1-2):1-135.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A maximum entropy approach to identifying sentence boundaries", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Jeffrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adwait", |
|
"middle": [], |
|
"last": "Reynar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ratnaparkhi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Fifth Conference on Applied Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey C. Reynar and Adwait Ratnaparkhi. 1997. A maximum entropy approach to identifying sentence boundaries. In Fifth Conference on Applied Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Computing attitude and affect in text", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Shanahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janyce", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Shanahan, Yan Qu, and Janyce Wiebe, editors. 2005. Computing attitude and affect in text. Springer, Dordrecht, The Netherlands.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Oral and written wishes of rural and city school children", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Speer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1939, |
|
"venue": "Child Development", |
|
"volume": "10", |
|
"issue": "3", |
|
"pages": "151--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George S. Speer. 1939. Oral and written wishes of rural and city school children. Child Development, 10(3):151-155.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Selected Studies of the Principle of Relative Frequency in Language", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Zipf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1932, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. K. Zipf. 1932. Selected Studies of the Principle of Relative Frequency in Language. Harvard University Press.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Topic and scope distributions based on manual annotations of a random sample of 5,000 wishes in the WISH corpus.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "(a) Wish topics differ by Location (b) Wish scopes differ by Location", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Geographical breakdown of topic and scope distributions based on approximately 4,000 locationtagged wishes. Asterisks indicate statistically significant differences.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "The rank vs. frequency plot of wishes, approximately obeying Zipf's law. Note the log-log scale.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"text": "The bipartite graph to create templates.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF6": { |
|
"text": "Politics domain precision-recall curves.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF8": { |
|
"text": "Products domain precision-recall curves.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": "Topic Summary Top words in the topic, sorted by p(word|topic) 0 NewYear year, new, happy, 2008, best, everyone, great, years, wishing, prosperous, may, hope 1 Troops all, god, home, come, may, safe, s, us, bless, troops, bring, iraq, return, 2008, true, dreams 2 Election wish, end, no, more, 2008 Life more, better, life, one, live, time, make, people, than, everyone, day, wish, every, each 4Prosperity health, happiness, good, family, friends, all, love, prosperity, wealth, success, wish, peace 5 Love love, me,find, wish, true, life, meet, want, man, marry, call, someone, boyfriend, fall, him 6 Career get, wish, job, out, t, hope, school, better, house, well, want, back, don, college, married 7 Lottery wish, win, 2008, money, want, make, become, lottery, more, great, lots, see, big, times", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>8</td><td>Peace</td><td>peace, world, all, love, earth, happiness, everyone, joy, may, 2008, prosperity, around</td></tr><tr><td>9</td><td>Religion</td><td>love, forever, jesus, know, loves, together, u, always, 2, 3, 4, much, best, mom, christ</td></tr><tr><td>10</td><td>Family</td><td>healthy, happy, wish, 2008, family, baby, life, children, long, safe, husband, stay, marriage</td></tr><tr><td>11</td><td>Health</td><td>com, wish, s, me, lose, please, let, cancer, weight, cure, mom, www, mother, visit, dad</td></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"text": "Wish topics learned from Latent Dirichlet Allocation. Words are sorted by p(word|topic).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF2": { |
|
"text": "Manual templates for identifying wishes.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"text": "Top templates according to Equation 1.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"text": "Politics 0.67 \u00b1 0.03 0.77 \u00b1 0.03 0.73 \u00b1 0.03 0.80 \u00b1 0.03 Products 0.49 \u00b1 0.13 0.52 \u00b1 0.16 0.47 \u00b1 0.16 0.56 \u00b1 0.16", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Corpus</td><td>[Manual]</td><td>[Words]</td><td>[Templates] [Words + Templates]</td></tr><tr><td/><td/><td/><td>[Templates] is similar,</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"text": "AUC results (10-fold averages \u00b1 one standard deviation).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Products:</td></tr><tr><td>the only area i wish apple had improved upon would be the screen</td></tr><tr><td>i just want music to eminate from it when i want how i want</td></tr><tr><td>the dial on the original zen was perfect and i wish it was on this model</td></tr><tr><td>i would like album order for my live albums and was just wondering</td></tr><tr><td>Politics:</td></tr><tr><td>all children should be allowed healthcare</td></tr><tr><td>please call on your representatives in dc and ask them to please stop the waste in iraq</td></tr><tr><td>i hope that this is a new beginning for the middle east</td></tr><tr><td>may god bless and protect the brave men and that we will face these dangers in the future</td></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |