{ "paper_id": "P11-1040", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T08:47:05.636998Z" }, "title": "Event Discovery in Social Media Feeds", "authors": [ { "first": "Edward", "middle": [], "last": "Benson", "suffix": "", "affiliation": { "laboratory": "Artificial Intelligence Laboratory", "institution": "Massachusetts Institute of Technology", "location": {} }, "email": "" }, { "first": "Aria", "middle": [], "last": "Haghighi", "suffix": "", "affiliation": { "laboratory": "Artificial Intelligence Laboratory", "institution": "Massachusetts Institute of Technology", "location": {} }, "email": "aria42@csail.mit.edu" }, { "first": "Regina", "middle": [], "last": "Barzilay", "suffix": "", "affiliation": { "laboratory": "Artificial Intelligence Laboratory", "institution": "Massachusetts Institute of Technology", "location": {} }, "email": "regina@csail.mit.edu" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "We present a novel method for record extraction from social streams such as Twitter. Unlike typical extraction setups, these environments are characterized by short, one sentence messages with heavily colloquial speech. To further complicate matters, individual messages may not express the full relation to be uncovered, as is often assumed in extraction tasks. We develop a graphical model that addresses these problems by learning a latent set of records and a record-message alignment simultaneously; the output of our model is a set of canonical records, the values of which are consistent with aligned messages. We demonstrate that our approach is able to accurately induce event records from Twitter messages, evaluated against events from a local city guide. Our method achieves significant error reduction over baseline methods. 1", "pdf_parse": { "paper_id": "P11-1040", "_pdf_hash": "", "abstract": [ { "text": "We present a novel method for record extraction from social streams such as Twitter. Unlike typical extraction setups, these environments are characterized by short, one sentence messages with heavily colloquial speech. To further complicate matters, individual messages may not express the full relation to be uncovered, as is often assumed in extraction tasks. We develop a graphical model that addresses these problems by learning a latent set of records and a record-message alignment simultaneously; the output of our model is a set of canonical records, the values of which are consistent with aligned messages. We demonstrate that our approach is able to accurately induce event records from Twitter messages, evaluated against events from a local city guide. Our method achieves significant error reduction over baseline methods. 1", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "We propose a method for discovering event records from social media feeds such as Twitter. The task of extracting event properties has been well studied in the context of formal media (e.g., newswire), but data sources such as Twitter pose new challenges. Social media messages are often short, make heavy use of colloquial language, and require situational context for interpretation (see examples in Figure 1 ). Not all properties of an event may be expressed in a single message, and the mapping between messages and canonical event records is not obvious. Seated at @carnegiehall waiting for @CraigyFerg's show to begin RT @leerader : getting REALLY stoked for #CraigyAtCarnegie sat night. Craig, , want to join us for dinner at the pub across the street? 5pm, be there! @DJPaulyD absolutely killed it at Terminal 5 last night. @DJPaulyD : DJ Pauly D Terminal 5 NYC Insanity ! #ohyeah @keadour @kellaferr24", "cite_spans": [], "ref_spans": [ { "start": 402, "end": 410, "text": "Figure 1", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Craig, nice seeing you at #noelnight this weekend @becksdavis! These properties of social media streams make existing extraction techniques significantly less effective. Despite these challenges, this data exhibits an important property that makes learning amenable: the multitude of messages referencing the same event. Our goal is to induce a comprehensive set of event records given a seed set of example records, such as a city event calendar table. While such resources are widely available online, they are typically high precision, but low recall. Social media is a natural place to discover new events missed by curation, but mentioned online by someone planning to attend.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We formulate our approach as a structured graphical model which simultaneously analyzes individual messages, clusters them according to event, and induces a canonical value for each event property. At the message level, the model relies on a conditional random field component to extract field values such as location of the event and artist name. We bias local decisions made by the CRF to be consistent with canonical record values, thereby facilitating consistency within an event cluster. We employ a factorgraph model to capture the interaction between each of these decisions. Variational inference techniques allow us to effectively and efficiently make predictions on a large body of messages.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "A seed set of example records constitutes our only source of supervision; we do not observe alignment between these seed records and individual messages, nor any message-level field annotation. The output of our model consists of an event-based clustering of messages, where each cluster is represented by a single multi-field record with a canonical value chosen for each field.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We apply our technique to construct entertainment event records for the city calendar section of NYC.com using a stream of Twitter messages. Our method yields up to a 63% recall against the city table and up to 85% precision evaluated manually, significantly outperforming several baselines.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "A large number of information extraction approaches exploit redundancy in text collections to improve their accuracy and reduce the need for manually annotated data (Agichtein and Gravano, 2000; Yangarber et al., 2000; Zhu et al., 2009; Mintz et al., 2009a; Yao et al., 2010b; Hasegawa et al., 2004; Shinyama and Sekine, 2006) . Our work most closely relates to methods for multi-document information extraction which utilize redundancy in input data to increase the accuracy of the extraction process. For instance, Mann and Yarowsky (2005) explore methods for fusing extracted information across multiple documents by performing extraction on each document independently and then merging extracted relations by majority vote. This idea of consensus-based extraction is also central to our method. However, we incorporate this idea into our model by simultaneously clustering output and labeling documents rather than performing the two tasks in serial fashion. Another important difference is inherent in the input data we are processing: it is not clear a priori which extraction decisions should agree with each other. Identifying messages that re-fer to the same event is a large part of our challenge.", "cite_spans": [ { "start": 165, "end": 194, "text": "(Agichtein and Gravano, 2000;", "ref_id": "BIBREF0" }, { "start": 195, "end": 218, "text": "Yangarber et al., 2000;", "ref_id": "BIBREF13" }, { "start": 219, "end": 236, "text": "Zhu et al., 2009;", "ref_id": "BIBREF16" }, { "start": 237, "end": 257, "text": "Mintz et al., 2009a;", "ref_id": "BIBREF9" }, { "start": 258, "end": 276, "text": "Yao et al., 2010b;", "ref_id": "BIBREF15" }, { "start": 277, "end": 299, "text": "Hasegawa et al., 2004;", "ref_id": "BIBREF3" }, { "start": 300, "end": 326, "text": "Shinyama and Sekine, 2006)", "ref_id": "BIBREF12" }, { "start": 517, "end": 541, "text": "Mann and Yarowsky (2005)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Our work also relates to recent approaches for relation extraction with distant supervision (Mintz et al., 2009b; Bunescu and Mooney, 2007; Yao et al., 2010a) . These approaches assume a database and a collection of documents that verbalize some of the database relations. In contrast to traditional supervised IE approaches, these methods do not assume that relation instantiations are annotated in the input documents. For instance, the method of Mintz et al. (2009b) induces the mapping automatically by bootstrapping from sentences that directly match record entries. These mappings are used to learn a classifier for relation extraction. Yao et al. (2010a) further refine this approach by constraining predicted relations to be consistent with entity types assignment. To capture the complex dependencies among assignments, Yao et al. (2010a) use a factor graph representation. Despite the apparent similarity in model structure, the two approaches deal with various types of uncertainties. The key challenge for our method is modeling message to record alignment which is not an issue in the previous set up.", "cite_spans": [ { "start": 92, "end": 113, "text": "(Mintz et al., 2009b;", "ref_id": "BIBREF10" }, { "start": 114, "end": 139, "text": "Bunescu and Mooney, 2007;", "ref_id": "BIBREF1" }, { "start": 140, "end": 158, "text": "Yao et al., 2010a)", "ref_id": "BIBREF14" }, { "start": 449, "end": 469, "text": "Mintz et al. (2009b)", "ref_id": "BIBREF10" }, { "start": 643, "end": 661, "text": "Yao et al. (2010a)", "ref_id": "BIBREF14" }, { "start": 829, "end": 847, "text": "Yao et al. (2010a)", "ref_id": "BIBREF14" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Finally, our work fits into a broader area of text processing methods designed for social-media streams. Examples of such approaches include methods for conversation structure analysis (Ritter et al., 2010) and exploration of geographic language variation (Eisenstein et al., 2010) from Twitter messages. To our knowledge no work has yet addressed record extraction from this growing corpus.", "cite_spans": [ { "start": 185, "end": 206, "text": "(Ritter et al., 2010)", "ref_id": "BIBREF11" }, { "start": 256, "end": 281, "text": "(Eisenstein et al., 2010)", "ref_id": "BIBREF2" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Here we describe the key latent and observed random variables of our problem. A depiction of all random variables is given in Figure 2 .", "cite_spans": [], "ref_spans": [ { "start": 126, "end": 134, "text": "Figure 2", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Problem Formulation", "sec_num": "3" }, { "text": "Message (x): Each message x is a single posting to Twitter. We use x j to represent the j th token of x, and we use x to denote the entire collection of messages. Messages are always observed during training and testing.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Formulation", "sec_num": "3" }, { "text": "A record is a representation of the canonical properties of an event. We use R i to denote the i th record and R i to denote the value of the th property of that record. In the figure above, R 1 1 =\"Craig Ferguson\" and R 2 1 =\"Carnegie Hall.\" Each tweet x i is associated with a labeling over tokens y i and is aligned to a record via the A i variable. See Section 3 for further details.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record (R):", "sec_num": null }, { "text": "x i y i x i\u22121 y i\u22121 x i+1 y i+1 A i\u22121 A i+1 A i", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record (R):", "sec_num": null }, { "text": "record's values for the schema ARTIST, VENUE . Throughout, we assume a known fixed number K of records R 1 , . . . , R K , and we use R to denote this collection of records. For tractability, we consider a finite number of possibilities for each R k which are computed from the input x (see Section 5.1 for details). Records are observed during training and latent during testing.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record (R):", "sec_num": null }, { "text": "We assume that each message has a sequence labeling, where the labels consist of the record fields (e.g., ARTIST and VENUE) as well as a NONE label denoting the token does not correspond to any domain field. Each token x j in a message has an associated label y j . Message labels are always latent during training and testing.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Message Labels (y):", "sec_num": null }, { "text": "We assume that each message is aligned to some record such that the event described in the message is the one represented by that record. Each message x i is associated with an alignment variable A i that takes a value in {1, . . . , K}. We use A to denote the set of alignments across all x i . Multiple messages can and do align to the same record. As discussed in Section 4, our model will encourage tokens associated with message labels to be \"similar\" to corresponding aligned record values. Alignments are always latent during training and testing.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Message to Record Alignment (A):", "sec_num": null }, { "text": "Our model can be represented as a factor graph which takes the form,", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": "4" }, { "text": "P (R, A, y|x) \u221d i \u03c6 SEQ (x i , y i ) (Seq. Labeling) \u03c6 U N Q (R ) (Rec. Uniqueness) \uf8eb \uf8ed i, \u03c6 P OP (x i , y i , R A i ) \uf8f6 \uf8f8 (Term Popularity) i \u03c6 CON (x i , y i , R A i ) (Rec. Consistency)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": "4" }, { "text": "where R denotes the sequence R 1 , . . . , R K of record values for a particular domain field . Each of the potentials takes a standard log-linear form:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": "4" }, { "text": "\u03c6(z) = \u03b8 T f (z)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": "4" }, { "text": "where \u03b8 are potential-specific parameters and f (\u2022) is a potential-specific feature function. We describe each potential separately below.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": "4" }, { "text": "The sequence labeling factor is similar to a standard sequence CRF (Lafferty et al., 2001) , where the potential over a message label sequence decomposes Figure 3 : Factor graph representation of our model. Circles represent variables and squares represent factors. For readability, we depict the graph broken out as a set of templates; the full graph is the combination of these factor templates applied to each variable. See Section 4 for further details. over pairwise cliques:", "cite_spans": [ { "start": 67, "end": 90, "text": "(Lafferty et al., 2001)", "ref_id": "BIBREF5" } ], "ref_spans": [ { "start": 154, "end": 162, "text": "Figure 3", "ref_id": null } ], "eq_spans": [], "section": "Sequence Labeling Factor", "sec_num": "4.1" }, { "text": "X i Y i \u03c6 SEQ R k R k+1 R k\u22121 \u03c6 UNQ th field (across records) \u03c6 P OP R k A i Y i X i A i Y i X i R k R +1 k \u03c6 CON k k th record", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sequence Labeling Factor", "sec_num": "4.1" }, { "text": "\u03c6 SEQ (x, y) = exp{\u03b8 T SEQ f SEQ (x, y)} = exp \uf8f1 \uf8f2 \uf8f3 \u03b8 T SEQ j f SEQ (x, y j , y j+1 ) \uf8fc \uf8fd \uf8fe", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sequence Labeling Factor", "sec_num": "4.1" }, { "text": "This factor is meant to encode the typical message contexts in which fields are evoked (e.g. going to see X tonight). Many of the features characterize how likely a given token label, such as ARTIST, is for a given position in the message sequence conditioning arbitrarily on message text context.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sequence Labeling Factor", "sec_num": "4.1" }, { "text": "The feature function f SEQ (x, y) for this component encodes each token's identity; word shape 2 ; whether that token matches a set of regular expressions encoding common emoticons, time references, and venue types; and whether the token matches a bag of words observed in artist names (scraped from Wikipedia; 21,475 distinct tokens from 22,833 distinct names) or a bag of words observed in New York City venue names (scraped from NYC.com; 304 distinct tokens from 169 distinct names). 3 The only edge feature is label-to-label.", "cite_spans": [ { "start": 487, "end": 488, "text": "3", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Sequence Labeling Factor", "sec_num": "4.1" }, { "text": "One challenge with Twitter is the so-called echo chamber effect: when a topic becomes popular, or \"trends,\" it quickly dominates the conversation online. As a result some events may have only a few referent messages while other more popular events may have thousands or more. In such a circumstance, the messages for a popular event may collect to form multiple identical record clusters. Since we fix the number of records learned, such behavior inhibits the discovery of less talked-about events. Instead, we would rather have just two records: one with two aligned messages and another with thousands. To encourage this outcome, we introduce a potential that rewards fields for being unique across records.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "The uniqueness potential \u03c6 U N Q (R ) encodes the preference that each of the values R , . . . , R K for each field do not overlap textually. This factor factorizes over pairs of records:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "\u03c6 U N Q (R ) = k =k \u03c6 U N Q (R k , R k )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "where R k and R k are the values of field for two records R k and R k . The potential over this pair of values is given by:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "\u03c6 U N Q (R k , R k ) = exp{\u2212\u03b8 T SIM f SIM (R k , R k )}", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "where f SIM is computes the likeness of the two values at the token level:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "f SIM (R k , R k ) = |R k \u2229 R k | max(|R k |, |R k |)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "This uniqueness potential does not encode any preference for record values; it simply encourages each field to be distinct across records.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Uniqueness Factor", "sec_num": "4.2" }, { "text": "The term popularity factor \u03c6 P OP is the first of two factors that guide the clustering of messages. Because speech on Twitter is colloquial, we would like these clusters to be amenable to many variations of the canonical record properties that are ultimately learned. The \u03c6 P OP factor accomplishes this by representing a lenient compatibility score between a message x, its labels y, and some candidate value v for a record field (e.g., Dave Matthews Band).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "This factor decomposes over tokens, and we align each token x j with the best matching token v k in v (e.g., Dave). The token level sum is scaled by the length of the record value being matched to avoid a preference for long field values.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "\u03c6 P OP (x, y, R A = v) = j max k \u03c6 P OP (x j , y j , R A = v k ) |v|", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "This token-level component may be thought of as a compatibility score between the labeled token x j and the record field assignment R A = v. Given that token x j aligns with the token v k , the token-level component returns the sum of three parts, subject to the constraint that y j = :", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "\u2022 IDF (x j )I[x j = v k ]", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": ", an equality indicator between tokens x j and v k , scaled by the inverse document frequency of x j", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "\u2022 \u03b1IDF (x j ) I[x j\u22121 = v k\u22121 ] + I[x j+1 = v k+1 ]", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": ", a small bonus of \u03b1 = 0.3 for matches on adjacent tokens, scaled by the IDF of x j", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "\u2022 I[x j = v k and x contains v]/|v|, a bonus for a complete string match, scaled by the size of the value. This is equivalent to this token's contribution to a complete-match bonus.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Term Popularity Factor", "sec_num": "4.3" }, { "text": "While the uniqueness factor discourages a flood of messages for a single event from clustering into multiple event records, we also wish to discourage messages from multiple events from clustering into the same record. When such a situation occurs, the model may either resolve it by changing inconsistent token labelings to the NONE label or by reassigning some of the messages to a new cluster. We encourage the latter solution with a record consistency factor \u03c6 CON . The record consistency factor is an indicator function on the field values of a record being present and labeled correctly in a message. While the popularity factor encourages agreement on a per-label basis, this factor influences the joint behavior of message labels to agree with the aligned record. For a given record, message, and labeling, \u03c6 CON (x, y, R A ) = 1 if \u03c6 P OP (x, y, R A ) > 0 for all , and 0 otherwise.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Record Consistency Factor", "sec_num": "4.4" }, { "text": "The weights of the CRF component of our model, \u03b8 SEQ , are the only weights learned at training time, using a distant supervision process described in Section 6. The weights of the remaining three factors were hand-tuned 4 using our training data set.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parameter Learning", "sec_num": "4.5" }, { "text": "Our goal is to predict a set of records R. Ideally we would like to compute P (R|x), marginalizing out the nuisance variables A and y. We approximate this posterior using variational inference. 5 Concretely, we approximate the full posterior over latent variables using a mean-field factorization:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "P (R, A, y|x) \u2248 Q(R, A, y) = K k=1 q(R k ) n i=1 q(A i )q(y i )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "where each variational factor q(\u2022) represents an approximation of that variable's posterior given observed random variables. The variational distribution Q(\u2022) makes the (incorrect) assumption that the posteriors amongst factors are independent. The goal of variational inference is to set factors q(\u2022) to optimize the variational objective:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "min Q(\u2022) KL(Q(R, A, y) P (R, A, y|x))", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "We optimize this objective using coordinate descent on the q(\u2022) factors. For instance, for the case of q(y i ) the update takes the form:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "q(y i ) \u2190 E Q/q(y i ) log P (R, A, y|x)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "where Q/q(y i ) denotes the expectation under all variables except y i . When computing a mean field update, we only need to consider the potentials involving that variable. The complete updates for each of the kinds of variables (y, A, and R ) can be found in Figure 4 . We briefly describe the computations involved with each update. q(y) update: The q(y) update for a single message yields an implicit expression in terms of pairwise cliques in y. We can compute arbitrary Message labeling update:", "cite_spans": [], "ref_spans": [ { "start": 261, "end": 269, "text": "Figure 4", "ref_id": null } ], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "ln q(y) \u221d E Q/q(y) ln \u03c6 SEQ (x, y) + ln \u03c6 P OP (x, y, R A )\u03c6 CON (x, y, R A ) = ln \u03c6 SEQ (x, y) + E Q/q(y) ln \u03c6 P OP (x, y, R A )\u03c6 CON (x, y, R A ) = ln \u03c6 SEQ (x, y) + z,v, q(A = z)q(y j = )q(R z = v) ln \u03c6 P OP (x, y, R z = v)\u03c6 CON (x, y, R z = v)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "Mention record alignment update:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "ln q(A = z) \u221d E Q/q(A) ln \u03c6 SEQ (x, y) + ln \u03c6 P OP (x, y, R A )\u03c6 CON (x, y, R A ) \u221d E Q/q(A) ln \u03c6 P OP (x, y, R A )\u03c6 CON (x, y, R A ) = z,v, q(R z = v) ln \u03c6 P OP (x, y, R z = v)\u03c6 CON (x, y, R z = v) = z,v, q(R z = v)q(y j i = ) ln \u03c6 P OP (x, y, R z = v)\u03c6 CON (x, y, R z = v)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "Record Field update:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "ln q(R k = v) \u221d E Q/q(R k ) k ln \u03c6 U N Q (R k , v) + i ln [\u03c6 P OP (x i , y i , v)\u03c6 CON (x i , y i , v)] = k =k,v q(R k = v ) ln \u03c6 U N Q (v, v ) + i q(A i = k) j q(y j i = ) ln \u03c6 P OP (x, y, R z = v, j)\u03c6 CON (x, y, R z = v, j)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "Figure 4: The variational mean-field updates used during inference (see Section 5). Inference consists of performing updates for each of the three kinds of latent variables: message labels (y), record alignments (A), and record field values (R ). All are relatively cheap to compute except for the record field update q(R k ) which requires looping potentially over all messages. Note that at inference time all parameters are fixed and so we only need to perform updates for latent variable factors. marginals for this distribution by using the forwardsbackwards algorithm on the potentials defined in the update. Therefore computing the q(y) update amounts to re-running forward backwards on the message where there is an expected potential term which involves the belief over other variables. Note that the popularity and consensus potentials (\u03c6 P OP and \u03c6 CON ) decompose over individual message tokens so this can be tractably computed.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "q(A) update: The update for individual record alignment reduces to being log-proportional to the expected popularity and consensus potentials. q(R k ) update: The update for the record field distribution is the most complex factor of the three. It requires computing expected similarity with other record field values (the \u03c6 U N Q potential) and looping over all messages to accumulate a contribution from each, weighted by the probability that it is aligned to the target record.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Inference", "sec_num": "5" }, { "text": "Since a uniform initialization of all factors is a saddle-point of the objective, we opt to initialize the q(y) factors with the marginals obtained using just the CRF parameters, accomplished by running forwards-backwards on all messages using only the \u03c6 SEQ potentials. The q(R) factors are initialized randomly and then biased with the output of our baseline model. The q(A) factor is initialized to uniform plus a small amount of noise.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Initializing Factors", "sec_num": "5.1" }, { "text": "To simplify inference, we pre-compute a finite set of values that each R k is allowed to take, conditioned on the corpus. To do so, we run the CRF component of our model (\u03c6 SEQ ) over the corpus and extract, for each , all spans that have a token-level probability of being labeled greater than \u03bb = 0.1. We further filter this set down to only values that occur at least twice in the corpus.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Initializing Factors", "sec_num": "5.1" }, { "text": "This simplification introduces sparsity that we take advantage of during inference to speed performance. Because each term in \u03c6 P OP and \u03c6 CON includes an indicator function based on a token match between a field-value and a message, knowing the possible values v of each R k enables us to precompute the combinations of (x, , v) for which nonzero factor values are possible. For each such tuple, we can also precompute the best alignment position k for each token x j .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Initializing Factors", "sec_num": "5.1" }, { "text": "Data We apply our approach to construct a database of concerts in New York City. We used Twitter's public API to collect roughly 4.7 Million tweets across three weekends that we subsequently filter down to 5,800 messages. The messages have an average length of 18 tokens, and the corpus vocabulary comprises 468,000 unique words 6 . We obtain labeled gold records using data scraped from the NYC.com music event guide; totaling 110 extracted records. Each gold record had two fields of interest: ARTIST and VENUE.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "The first weekend of data (messages and events) was used for training and the second two weekends were used for testing.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "Preprocessing Only a small fraction of Twitter messages are relevant to the target extraction task. Directly processing the raw unfiltered stream would prohibitively increase computational costs and make learning more difficult due to the noise inherent in the data. To focus our efforts on the promising portion of the stream, we perform two types of filter-ing. First, we only retain tweets whose authors list some variant of New York as their location in their profile. Second, we employ a MIRA-based binary classifier (Ritter et al., 2010) to predict whether a message mentions a concert event. After training on 2,000 hand-annotated tweets, this classifier achieves an F 1 of 46.9 (precision of 35.0 and recall of 71.0) when tested on 300 messages. While the two-stage filtering does not fully eliminate noise in the input stream, it greatly reduces the presence of irrelevant messages to a manageable 5,800 messages without filtering too many 'signal' tweets.", "cite_spans": [ { "start": 522, "end": 543, "text": "(Ritter et al., 2010)", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "We also filter our gold record set to include only records in which each field value occurs at least once somewhere in the corpus, as these are the records which are possible to learn given the input. This yields 11 training and 31 testing records.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "Training The first weekend of data (2,184 messages and 11 records after preprocessing) is used for training. As mentioned in Section 4, the only learned parameters in our model are those associated with the sequence labeling factor \u03c6 SEQ . While it is possible to train these parameters via direct annotation of messages with label sequences, we opted instead to use a simple approach where message tokens from the training weekend are labeled via their intersection with gold records, often called \"distant supervision\" (Mintz et al., 2009b) . Concretely, we automatically label message tokens in the training corpus with either the ARTIST or VENUE label if they belonged to a sequence that matched a gold record field, and with NONE otherwise. This is the only use that is made of the gold records throughout training. \u03b8 SEQ parameters are trained using this labeling with a standard conditional likelihood objective.", "cite_spans": [ { "start": 521, "end": 542, "text": "(Mintz et al., 2009b)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "Testing The two weekends of data used for testing totaled 3,662 tweets after preprocessing and 31 gold records for evaluation. The two weekends were tested separately and their results were aggregated across weekends.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "Our model assumes a fixed number of records K = 130. 7 We rank these records according to a heuristic ranking function that favors the uniqueness of a record's field values across the set and the number of messages in the testing corpus that have token overlap with these values. This ranking function is intended to push garbage collection records to the bottom of the list. Finally, we retain the top k records, throwing away the rest. Results in Section 7 are reported as a function of this k.", "cite_spans": [ { "start": 53, "end": 54, "text": "7", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "Baseline We compare our system against three baselines that employ a voting methodology similar to Mann and Yarowsky (2005) . The baselines label each message and then extract one record for each combination of labeled phrases. Each extraction is considered a vote for that record's existence, and these votes are aggregated across all messages. Our List Baseline labels messages by finding string overlaps against a list of musical artists and venues scraped from web data (the same lists used as features in our CRF component). The CRF Baseline is most similar to Mann and Yarowsky (2005) 's CRF Voting method and uses the maximum likelihood CRF labeling of each message. The Low Threshold Baseline generates all possible records from labelings with a token-level likelihood greater than \u03bb = 0.1. The output of these baselines is a set of records ranked by the number of votes cast for each, and we perform our evaluation against the top k of these records.", "cite_spans": [ { "start": 99, "end": 123, "text": "Mann and Yarowsky (2005)", "ref_id": "BIBREF8" }, { "start": 566, "end": 590, "text": "Mann and Yarowsky (2005)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Evaluation Setup", "sec_num": "6" }, { "text": "The evaluation of record construction is challenging because many induced music events discussed in Twitter messages are not in our gold data set; our gold records are precise but incomplete. Because of this, we evaluate recall and precision separately. Both evaluations are performed using hard zero-one loss at record level. This is a harsh evaluation criterion, but it is realistic for real-world use.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Evaluation", "sec_num": "7" }, { "text": "Recall We evaluate recall, shown in Figure 5 , against the gold event records for each weekend. This shows how well our model could do at replacing the a city event guide, providing Twitter users chat about events taking place.", "cite_spans": [], "ref_spans": [ { "start": 36, "end": 44, "text": "Figure 5", "ref_id": "FIGREF3" } ], "eq_spans": [], "section": "Evaluation", "sec_num": "7" }, { "text": "We perform our evaluation by taking the top k records induced, performing a stable marriage matching against the gold records, and then evaluating the resulting matched pairs. Stable marriage matching is a widely used approach that finds a bipartite matching between two groups such that no pairing exists in which both participants would prefer some other pairing (Irving et al., 1987) . With our hard loss function and no duplicate gold records, this amounts to the standard recall calculation. We choose this bipartite matching technique because it generalizes nicely to allow for other forms of loss calculation (such as token-level loss).", "cite_spans": [ { "start": 365, "end": 386, "text": "(Irving et al., 1987)", "ref_id": "BIBREF4" } ], "ref_spans": [], "eq_spans": [], "section": "Evaluation", "sec_num": "7" }, { "text": "Precision To evaluate precision we assembled a list of the distinct records produced by all models and then manually determined if each record was correct. This determination was made blind to which model produced the record. We then used this aggregate list of correct records to measure precision for each individual model, shown in Figure 6 . By construction, our baselines incorporate a hard constraint that each relation learned must be expressed in entirety in at least one message. Our model only incorporates a soft version of this constraint via the \u03c6 CON factor, but this constraint clearly has the ability to boost precision. To show it's effect, we additionally evaluate our model, labeled Our Work + Con, with this constraint applied in hard form as an output filter.", "cite_spans": [], "ref_spans": [ { "start": 335, "end": 343, "text": "Figure 6", "ref_id": "FIGREF5" } ], "eq_spans": [], "section": "Evaluation", "sec_num": "7" }, { "text": "The downward trend in precision that can be seen in Figure 6 is the effect of our ranking algorithm, which attempts to push garbage collection records towards the bottom of the record list. As we incorporate these records, precision drops. These lines trend up for two of the baselines because the rank-396 ing heuristic is not as effective for them. These graphs confirm our hypothesis that we gain significant benefit by intertwining constraints on extraction consistency in the learning process, rather than only using this constraint to filter output.", "cite_spans": [], "ref_spans": [ { "start": 52, "end": 60, "text": "Figure 6", "ref_id": "FIGREF5" } ], "eq_spans": [], "section": "Evaluation", "sec_num": "7" }, { "text": "One persistent problem is a popular phrase appearing in many records, such as the value \"New York\" filling many ARTIST slots. The uniqueness factor \u03b8 U N Q helps control this behavior, but it is a relatively blunt instrument. Ideally, our model would learn, for each field , the degree to which duplicate values are permitted. It is also possible that by learning, rather than hand-tuning, the \u03b8 CON , \u03b8 P OP , and \u03b8 U N Q parameters, our model could find a balance that permits the proper level of duplication for a particular domain.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "Other errors can be explained by the lack of constituent features in our model, such as the selection of VENUE values that do not correspond to noun phrases. Further, semantic features could help avoid learning syntactically plausible artists like \"Screw the Rain\" because of the message:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "Screw the rainArtist! Grab an umbrella and head down to Webster HallVenue for some American rock and roll.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "Our model's soft string comparison-based clustering can be seen at work when our model uncovers records that would have been impossible without this approach. One such example is correcting the misspelling of venue names (e.g. Terminal Five \u2192 Terminal 5) even when no message about the event spells the venue correctly.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "Still, the clustering can introduce errors by combining messages that provide orthogonal field contributions yet have overlapping tokens (thus escaping the penalty of the consistency factor). An example of two messages participating in this scenario is shown below; the shared term \"holiday\" in the second message gets relabeled as ARTIST:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "Come check out the holiday cheerArtist parkside is bursting.. Pls tune in to TV Guide NetworkVenue TONIGHT at 8 pm for 25 Most Hilarious Holiday TV Moments...", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "While our experiments utilized binary relations, we believe our general approach should be useful for n-ary relation recovery in the social media domain. Because short messages are unlikely to express high arity relations completely, tying extraction and clustering seems an intuitive solution. In such a scenario, the record consistency constraints imposed by our model would have to be relaxed, perhaps examining pairwise argument consistency instead.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Analysis", "sec_num": "7.1" }, { "text": "We presented a novel model for record extraction from social media streams such as Twitter. Our model operates on a noisy feed of data and extracts canonical records of events by aggregating information across multiple messages. Despite the noise of irrelevant messages and the relatively colloquial nature of message language, we are able to extract records with relatively high accuracy. There is still much room for improvement using a broader array of features on factors.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "8" }, { "text": "Data and code available at http://groups.csail.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "e.g.: xxx, XXX, Xxx, or other 3 These are just features, not a filter; we are free to extract any artist or venue regardless of their inclusion in this list.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Their values are: \u03b8UNQ = \u221210, \u03b8 Phrase P OP = 5, \u03b8 Token P OP = 10, \u03b8CON = 2e85 SeeLiang and Klein (2007) for an overview of variational techniques.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Only considering English tweets and not counting user names (so-called -mentions.)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Chosen based on the training set", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "The authors gratefully acknowledge the support of the DARPA Machine Reading Program under AFRL prime contract no. FA8750-09-C-0172. Any opinions, findings, and conclusions expressed in this material are those of the author(s) and do not necessarily reflect the views of DARPA, AFRL, or the US government. Thanks also to Tal Wagner for his development assistance and the MIT NLP group for their helpful comments.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgements", "sec_num": "9" } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Snowball: Extracting relations from large plain-text collections", "authors": [ { "first": "Eugene", "middle": [], "last": "Agichtein", "suffix": "" }, { "first": "Luis", "middle": [], "last": "Gravano", "suffix": "" } ], "year": 2000, "venue": "Proceedings of DL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Eugene Agichtein and Luis Gravano. 2000. Snowball: Extracting relations from large plain-text collections. In Proceedings of DL.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Learning to extract relations from the web using minimal supervision", "authors": [ { "first": "C", "middle": [], "last": "Razvan", "suffix": "" }, { "first": "Raymond", "middle": [ "J" ], "last": "Bunescu", "suffix": "" }, { "first": "", "middle": [], "last": "Mooney", "suffix": "" } ], "year": 2007, "venue": "Proceedings of the ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Razvan C. Bunescu and Raymond J. Mooney. 2007. Learning to extract relations from the web using mini- mal supervision. In Proceedings of the ACL.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "A latent variable model for geographic lexical variation", "authors": [ { "first": "J", "middle": [], "last": "Eisenstein", "suffix": "" }, { "first": "B", "middle": [], "last": "O'connor", "suffix": "" }, { "first": ".", "middle": [ "." ], "last": "Smith", "suffix": "" } ], "year": 2010, "venue": "Proceedings of the", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J Eisenstein, B O'Connor, and N Smith. . . . 2010. A latent variable model for geographic lexical variation. Proceedings of the 2010 . . . , Jan.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Discovering relations among named entities from large corpora", "authors": [ { "first": "Takaaki", "middle": [], "last": "Hasegawa", "suffix": "" }, { "first": "Satoshi", "middle": [], "last": "Sekine", "suffix": "" }, { "first": "Ralph", "middle": [], "last": "Grishman", "suffix": "" } ], "year": 2004, "venue": "Proceedings of ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Takaaki Hasegawa, Satoshi Sekine, and Ralph Grishman. 2004. Discovering relations among named entities from large corpora. In Proceedings of ACL.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "An efficient algorithm for the optimal stable marriage", "authors": [ { "first": "Robert", "middle": [ "W" ], "last": "Irving", "suffix": "" }, { "first": "Paul", "middle": [], "last": "Leather", "suffix": "" }, { "first": "Dan", "middle": [], "last": "Gusfield", "suffix": "" } ], "year": 1987, "venue": "J. ACM", "volume": "34", "issue": "", "pages": "532--543", "other_ids": {}, "num": null, "urls": [], "raw_text": "Robert W. Irving, Paul Leather, and Dan Gusfield. 1987. An efficient algorithm for the optimal stable marriage. J. ACM, 34:532-543, July.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", "authors": [ { "first": "John", "middle": [], "last": "Lafferty", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Mccallum", "suffix": "" }, { "first": "Fernando", "middle": [], "last": "Pereira", "suffix": "" } ], "year": 2001, "venue": "Proceedings of International Conference of Machine Learning (ICML)", "volume": "", "issue": "", "pages": "282--289", "other_ids": {}, "num": null, "urls": [], "raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira. 2001. Conditional random fields: Probabilistic mod- els for segmenting and labeling sequence data. In Proceedings of International Conference of Machine Learning (ICML), pages 282-289.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Structured Bayesian nonparametric models with variational inference (tutorial)", "authors": [ { "first": "P", "middle": [], "last": "Liang", "suffix": "" }, { "first": "D", "middle": [], "last": "Klein", "suffix": "" } ], "year": 2007, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "P. Liang and D. Klein. 2007. Structured Bayesian non- parametric models with variational inference (tutorial).", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Association for Computational Linguistics (ACL)", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "In Association for Computational Linguistics (ACL).", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Multi-field information extraction and cross-document fusion", "authors": [ { "first": "Gideon", "middle": [ "S" ], "last": "Mann", "suffix": "" }, { "first": "David", "middle": [], "last": "Yarowsky", "suffix": "" } ], "year": 2005, "venue": "Proceeding of the ACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Gideon S. Mann and David Yarowsky. 2005. Multi-field information extraction and cross-document fusion. In Proceeding of the ACL.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Distant supervision for relation extraction without labeled data", "authors": [ { "first": "Mike", "middle": [], "last": "Mintz", "suffix": "" }, { "first": "Steven", "middle": [], "last": "Bills", "suffix": "" } ], "year": 2009, "venue": "Proceedings of ACL/IJCNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mike Mintz, Steven Bills, Rion Snow, and Dan Juraf- sky. 2009a. Distant supervision for relation extraction without labeled data. In Proceedings of ACL/IJCNLP.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Distant supervision for relation extraction without labeled data", "authors": [ { "first": "Mike", "middle": [], "last": "Mintz", "suffix": "" }, { "first": "Steven", "middle": [], "last": "Bills", "suffix": "" } ], "year": 2009, "venue": "Proceedings of the ACL", "volume": "", "issue": "", "pages": "1003--1011", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mike Mintz, Steven Bills, Rion Snow, and Daniel Juraf- sky. 2009b. Distant supervision for relation extrac- tion without labeled data. In Proceedings of the ACL, pages 1003-1011.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Unsupervised modeling of twitter conversations", "authors": [ { "first": "A", "middle": [], "last": "Ritter", "suffix": "" }, { "first": "B", "middle": [], "last": "Cherry", "suffix": "" }, { "first": "", "middle": [], "last": "Dolan", "suffix": "" } ], "year": 2010, "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "172--180", "other_ids": {}, "num": null, "urls": [], "raw_text": "A Ritter, C Cherry, and B Dolan. 2010. Unsupervised modeling of twitter conversations. Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Com- putational Linguistics, pages 172-180.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Preemptive information extraction using unrestricted relation discovery", "authors": [ { "first": "Yusuke", "middle": [], "last": "Shinyama", "suffix": "" }, { "first": "Satoshi", "middle": [], "last": "Sekine", "suffix": "" } ], "year": 2006, "venue": "Proceedings of HLT/NAACL", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yusuke Shinyama and Satoshi Sekine. 2006. Preemp- tive information extraction using unrestricted relation discovery. In Proceedings of HLT/NAACL.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Automatic acquisition of domain knowledge for information extraction", "authors": [ { "first": "Roman", "middle": [], "last": "Yangarber", "suffix": "" }, { "first": "Ralph", "middle": [], "last": "Grishman", "suffix": "" }, { "first": "Pasi", "middle": [], "last": "Tapanainen", "suffix": "" }, { "first": "Silja", "middle": [], "last": "Huttunen", "suffix": "" } ], "year": 2000, "venue": "Proceedings of COLING", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Roman Yangarber, Ralph Grishman, Pasi Tapanainen, and Silja Huttunen. 2000. Automatic acquisition of domain knowledge for information extraction. In Pro- ceedings of COLING.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Collective cross-document relation extraction without labelled data", "authors": [ { "first": "Limin", "middle": [], "last": "Yao", "suffix": "" }, { "first": "Sebastian", "middle": [], "last": "Riedel", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Mccallum", "suffix": "" } ], "year": 2010, "venue": "Proceedings of the EMNLP", "volume": "", "issue": "", "pages": "1013--1023", "other_ids": {}, "num": null, "urls": [], "raw_text": "Limin Yao, Sebastian Riedel, and Andrew McCallum. 2010a. Collective cross-document relation extraction without labelled data. In Proceedings of the EMNLP, pages 1013-1023.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Cross-document relation extraction without labelled data", "authors": [ { "first": "Limin", "middle": [], "last": "Yao", "suffix": "" }, { "first": "Sebastian", "middle": [], "last": "Riedel", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Mccallum", "suffix": "" } ], "year": 2010, "venue": "Proceedings of EMNLP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Limin Yao, Sebastian Riedel, and Andrew McCallum. 2010b. Cross-document relation extraction without la- belled data. In Proceedings of EMNLP.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "StatSnowball: a statistical approach to extracting entity relationships", "authors": [ { "first": "Jun", "middle": [], "last": "Zhu", "suffix": "" }, { "first": "Zaiqing", "middle": [], "last": "Nie", "suffix": "" }, { "first": "Xiaojing", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Bo", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Ji-Rong", "middle": [], "last": "Wen", "suffix": "" } ], "year": 2009, "venue": "Proceedings of WWW", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jun Zhu, Zaiqing Nie, Xiaojing Liu, Bo Zhang, and Ji- Rong Wen. 2009. StatSnowball: a statistical approach to extracting entity relationships. In Proceedings of WWW.", "links": null } }, "ref_entries": { "FIGREF0": { "type_str": "figure", "uris": null, "text": "mit.edu/rbg/code/twitter", "num": null }, "FIGREF1": { "type_str": "figure", "uris": null, "text": "Examples of Twitter messages, along with automatically extracted records.", "num": null }, "FIGREF2": { "type_str": "figure", "uris": null, "text": "The key variables of our model. A collection of K latent records R k , each consisting of a set of L properties.", "num": null }, "FIGREF3": { "type_str": "figure", "uris": null, "text": "Recall against the gold records. The horizontal axis is the number of records kept from the ranked model output, as a multiple of the number of golds. The CRF lines terminate because of low record yield.", "num": null }, "FIGREF5": { "type_str": "figure", "uris": null, "text": "Precision, evaluated manually by crossreferencing model output with event mentions in the input data. The CRF and hard-constrained consensus lines terminate because of low record yield.", "num": null } } } }