|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:57:48.675693Z" |
|
}, |
|
"title": "Hierarchical Graph Convolutional Networks for Jointly Resolving Cross-document Coreference of Entity and Event Mentions", |
|
"authors": [ |
|
{ |
|
"first": "Duy", |
|
"middle": [], |
|
"last": "Phung", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VinAI Research", |
|
"location": { |
|
"country": "Vietnam" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tuan", |
|
"middle": [ |
|
"Ngo" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Oregon", |
|
"location": { |
|
"settlement": "Eugene", |
|
"region": "Oregon", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "tnguyen@cs.uoregon.edu" |
|
}, |
|
{ |
|
"first": "Thien", |
|
"middle": [ |
|
"Huu" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Oregon", |
|
"location": { |
|
"settlement": "Eugene", |
|
"region": "Oregon", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper studies the problem of crossdocument event coreference resolution (CDECR) that seeks to determine if event mentions across multiple documents refer to the same real-world events. Prior work has demonstrated the benefits of the predicateargument information and document context for resolving the coreference of event mentions. However, such information has not been captured effectively in prior work for CDECR. To address these limitations, we propose a novel deep learning model for CDECR that introduces hierarchical graph convolutional neural networks (GCN) to jointly resolve entity and event mentions. As such, sentencelevel GCNs enable the encoding of important context words for event mentions and their arguments while the document-level GCN leverages the interaction structures of event mentions and arguments to compute document representations to perform CDECR. Extensive experiments are conducted to demonstrate the effectiveness of the proposed model.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper studies the problem of crossdocument event coreference resolution (CDECR) that seeks to determine if event mentions across multiple documents refer to the same real-world events. Prior work has demonstrated the benefits of the predicateargument information and document context for resolving the coreference of event mentions. However, such information has not been captured effectively in prior work for CDECR. To address these limitations, we propose a novel deep learning model for CDECR that introduces hierarchical graph convolutional neural networks (GCN) to jointly resolve entity and event mentions. As such, sentencelevel GCNs enable the encoding of important context words for event mentions and their arguments while the document-level GCN leverages the interaction structures of event mentions and arguments to compute document representations to perform CDECR. Extensive experiments are conducted to demonstrate the effectiveness of the proposed model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Event coreference resolution (ECR) aims to cluster event-triggering expressions in text such that all event mentions in a group refer to the same unique event in real world. We are interested in cross-document ECR (CDECR) where event mentions might appear in the same or different documents. For instance, consider the following sentences (event mentions) S1 and S2 that involve \"leaving\" and \"left\" (respectively) as event trigger words (i.e., the predicates): S1: O'Brien was forced into the drastic step of leaving the 76ers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "S2: Jim O'Brien left the 76ers after one season as coach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "An CDECR system in this case would need to recognize that both event mentions in S1 and S2 refer to the same event.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A major challenge in CDECR involves the necessity to model entity mentions (e.g., \"Jim O'Brien\") that participate into events and reveal their spatio-temporal information (Yang et al., 2015 ) (called event arguments). In particular, as event mentions might be presented in different sentences/documents, an important evidence for predicting the coreference of two event mentions is to realize that the two event mentions have the same participants in the real world and/or occur at the same location and time (i.e., same arguments).", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 189, |
|
"text": "(Yang et al., 2015", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Motivated by this intuition, prior work for CDECR has attempted to jointly resolve crossdocument coreference for entities and events so the two tasks can mutually benefit from each other (iterative clustering) (Lee et al., 2012) . In fact, this iterative and joint modeling approach has recently led to the state-of-the-art performance for CDECR (Barhom et al., 2019; Meged et al., 2020) . Our model for CDECR follows this joint coreference resolution method; however, we advance it by introducing novel techniques to address two major limitations from previous work (Yang et al., 2015; Kenyon-Dean et al., 2018; Barhom et al., 2019) , i.e., the inadequate mechanisms to capture the argument-related information for representing event mentions and the use of only lexical features to represent input documents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 228, |
|
"text": "(Lee et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 367, |
|
"text": "(Barhom et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 387, |
|
"text": "Meged et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 586, |
|
"text": "(Yang et al., 2015;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 612, |
|
"text": "Kenyon-Dean et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 633, |
|
"text": "Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As the first limitation with the event argumentrelated evidence, existing methods for CDECR have mainly captured the direct information of event arguments for event mention representations, thus failing to explicitly encode other important context words in the sentences to reveal fine-grained nature of relations between arguments and triggers for ECR (Yang et al., 2015; Barhom et al., 2019) . For instance, consider the coreference prediction between the event mentions in S1 and the following sentence S3 (with \"leave\" as the event trigger word):", |
|
"cite_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 372, |
|
"text": "(Yang et al., 2015;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 393, |
|
"text": "Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "S3: The baseball coach Jim O'Brien decided to leave the The arguments for the event mention in S3 involves \"The baseball coach Jim O'Brien\", \"the club\", and \"Monday\". If an entity coreference resolution system considers the entity mention pairs (\"O'Brien\" in S1, \"The baseball coach Jim O'Brien\" in S3) and (\"the 76ers\" in S1 and \"the club\" in S3) as being coreferred, a CDECR system, which only concerns event arguments and their coreference information, would incorrectly predict the coreference between the event mentions in S1 and S3 in this case. However, if the CDECR system further models the important words for the relations between event triggers and arguments (i.e., the words \"was forced\" in S1 and \"decided\" in S3), it can realize the unwillingness of the subject for the position ending event in S1 and the self intent to leave the position for the event in S3. As such, this difference can help the system to reject the event coreference for S1 and S3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To this end, we propose to explicitly identify and capture important context words for event triggers and arguments in representation learning for CDECR. In particular, our motivation is based on the shortest dependency paths between event triggers and arguments that have been used to reveal important context words for their relations (Li et al., 2013; Sha et al., 2018; Veyseh et al., 2020a Veyseh et al., , 2021 . As an example, Figure 1 shows the dependency tree of S1 where the shortest dependency path between \"O'Brien\" and \"leaving\" can successfully include the important context word \"forced\". As such, for each event mention, we leverage the shortest dependency paths to build a pruned and argumentcustomized dependency tree to simultaneously contain event triggers, arguments and the important words in a single structure. Afterward, the structure will be exploited to learn richer representation vectors for CDECR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 354, |
|
"text": "(Li et al., 2013;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 372, |
|
"text": "Sha et al., 2018;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 393, |
|
"text": "Veyseh et al., 2020a", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 415, |
|
"text": "Veyseh et al., , 2021", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 441, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Second, for document representations, previous work on CDECR has proved that input documents also provide useful context information for event mentions (e.g., document topics) to enhance the clustering performance (Kenyon-Dean et al., 2018) . However, the document information is only captured via lexical features in prior work, e.g., TF-IDF vectors (Kenyon-Dean et al., 2018; Barhom et al., 2019) , leading to the poor generalization to unseen words/tokens and inability to encapsulate latent semantic information for CDECR. To this end, we propose to learn distributed representation vectors for input documents to enrich event mention representations and improve the generalization of the models for CDECR. In particular, as entity and event mentions are the main objects of interest for CDECR, our motivation is to focus on the context information from these objects to induce document representations for the models. To implement this idea, we propose to represent input documents via interaction graphs between their entity and event mentions, serving as the structures to generate document representation vectors. Based on those motivations, we introduce a novel hierarchical graph convolutional neural network (GCN) that involves two levels of GCN models to learn representation vectors for the iterative and joint model for CDECR. In particular, sentence-level GCNs will consume the pruned dependency trees to obtain context-enriched representation vectors for event and entity mentions while a document-level GCN will be run over the entityevent interaction graphs, leveraging the mention representations from the sentence-level GCNs as the inputs to generate document representations for CDECR. Extensive experiments show that the proposed model achieves the state-of-the-art resolution performance for both entities and events on the ECB+ dataset. To our knowledge, this is the first work that utilizes GCNs and entity-event interaction graphs for coreference resolution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 240, |
|
"text": "(Kenyon-Dean et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 377, |
|
"text": "TF-IDF vectors (Kenyon-Dean et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 398, |
|
"text": "Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ECR is considered as a more challenging task than entity coreference resolution due to the more complex structures of event mentions that require argument reasoning (Yang et al., 2015) . Previous work for within-document event resolution includes pairwise classifiers (Ahn, 2006; , spectral graph clustering methods , information propagation (Liu et al., 2014) , markov logic networks (Lu et al., 2016) , and deep learning (Nguyen et al., 2016) . For only crossdocument event resolution, prior work has considered mention-pair classifiers for coreference that use granularities of event slots and lexical features of event mentions for the features (Cybulska and Vossen, 2015b,a) . Within-and cross-document event coreference have also been solved simultaneously in previous work (Lee et al., 2012; Bejan and Harabagiu, 2010; Adrian Bejan and Harabagiu, 2014; Yang et al., 2015; Choubey and Huang, 2017; Kenyon-Dean et al., 2018) . The most related works to us involve the joint models for entity and event coreference resolution that use contextualized word embeddings to capture the dependencies between the two tasks and lead to the state-of-the-art performance for CDECR (Lee et al., 2012; Barhom et al., 2019; Meged et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 184, |
|
"text": "(Yang et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 279, |
|
"text": "(Ahn, 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 360, |
|
"text": "(Liu et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 402, |
|
"text": "(Lu et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 444, |
|
"text": "(Nguyen et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 649, |
|
"end": 679, |
|
"text": "(Cybulska and Vossen, 2015b,a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 798, |
|
"text": "(Lee et al., 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 799, |
|
"end": 825, |
|
"text": "Bejan and Harabagiu, 2010;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 826, |
|
"end": 859, |
|
"text": "Adrian Bejan and Harabagiu, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 860, |
|
"end": 878, |
|
"text": "Yang et al., 2015;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 879, |
|
"end": 903, |
|
"text": "Choubey and Huang, 2017;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 904, |
|
"end": 929, |
|
"text": "Kenyon-Dean et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1175, |
|
"end": 1193, |
|
"text": "(Lee et al., 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1214, |
|
"text": "Barhom et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1215, |
|
"end": 1234, |
|
"text": "Meged et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Finally, regarding the modeling perspective, our work is related to the models that use GCNs to learn representation vectors for different NLP tasks, e.g., event detection (Lai et al., 2020; Veyseh et al., 2019) and target opinion word extraction (Veyseh et al., 2020b), applying both sentence-and document-level graphs (Sahu et al., 2019; Tran et al., 2020; Nan et al., 2020; . However, to our knowledge, none of the prior work has employed GCNs for ECR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 190, |
|
"text": "(Lai et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 211, |
|
"text": "Veyseh et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 339, |
|
"text": "(Sahu et al., 2019;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 358, |
|
"text": "Tran et al., 2020;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 376, |
|
"text": "Nan et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a set of input documents D, the goal of CDECR is to cluster event mentions in the documents of D according to their coreference. Our model for CDECR follows (Barhom et al., 2019) that simultaneously clusters entity mentions in D to benefit from the inter-dependencies between entities and events for coreference resolution. In this section, we will first describe the overall framework of our iterative method for joint entity and event coreference resolution based on (Barhom et al., 2019 ) (a summary of the framework is given in Algorithm 1). The novel hierarchical GCN model for inducing mention 1 representation vectors will be discussed afterward. Iterative Clustering for CDECR: Following (Barhom et al., 2019) , we first cluster the input document set D into different topics to improve the coreference performance (the set of document topics is called T ). As we use the ECB+ dataset (Cybulska and Vossen, 2014) to evaluate the models in this work, the training phase directly utilizes the golden topics of the documents while the test phase applies the K-mean algorithm for document clustering as in (Barhom et al., 2019) . Afterward, given a topic t 2 T with the corresponding document subset D t \u21e2 D, our CDECR model initializes the entity and event cluster configurations E 0 t and V 0 t (respectively) where: E 0 t involves withindocument clusters of the entity mentions in the documents in D t , and V 0 t simply puts each event mention presented in D t into its own cluster (lines 2 and 3 in Algorithm 1). In the training phase, E 0 t is obtained from the golden within-document coreference information of the entity mentions (to reduce noise) while the within-document entity mention clusters returned by Stanford CoreNLP (Manning et al., 2014) are used for E 0 t in the test phase, following (Barhom et al., 2019) . For convenience, the sets of entity and event mentions in D t are called", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 184, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 495, |
|
"text": "(Barhom et al., 2019", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 723, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 899, |
|
"end": 926, |
|
"text": "(Cybulska and Vossen, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1116, |
|
"end": 1137, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1745, |
|
"end": 1767, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1816, |
|
"end": 1837, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "M E t and M V t respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Algorithm 1 Training algorithm", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1: for t 2 T do 2: E 0 t Within-doc clusters of entity mentions 3: V 0 t Singleton event mentions in M V t 4: k 1 5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "while 9 meaningful cluster-pair merge do 6: //Entities 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Generate entity mention representations", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "RE(me i , V k 1 t ) for all me i 2 M E t 8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Compute entity mention-pair coreference scores SE(me i , me j ) 9:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Train RE and SE using the gold entity mention clusters 10:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "E k t Agglomeratively cluster M E t based on SE(me i , me j ) 11: //Events 12: Generate event mention representations RV (mv i , E k t ) for all mv i 2 M V t 13:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Compute event mention-pair coreference scores SV (mv i , mv j ) 14:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Train RV and SV using the gold entity mention clusters 15:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "V k t Agglomeratively cluster M V t based on SV (mv i , mv j ) 16: k k + 1 17:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "end while 18: end for Given the initial configurations, our iterative algorithm involves a sequence of clustering iterations, generating new cluster configurations E k V k t (alternating the clustering). Starting with the entity clustering at iteration k, each entity mention m e i is first transformed into a representation vector R E (m e i , V k 1 t ) (line 7 in Algorithm 1) that is conditioned on not only the specific context of m e i but also the current event cluster configuration V k 1 t (i.e., to capture the event-entity inter-dependencies). Afterward, a scoring function S E (m e i , m e j ) is used to compute the coreference probability/score for each pair of entity mentions, leveraging their mention representations", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "R E (m e i , V k 1 t ) and R E (m e j , V k 1 t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "at the current iteration (R E and S E will be discussed in the next section) (line 8). An agglomerative clustering algorithm then utilizes these coreference scores to cluster the entity mentions, leading to a new configuration E k t for entity mention clusters (line 10). Given E k t , the same procedure is applied to cluster the event mentions for iteration k, including: (i) obtaining an event representation vector", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "R V (m v i , E k t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "for each event mention m v i based on the current entity cluster configuration E k t , (ii) computing coreference scores for the event mention pairs via the scoring function", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "S V (m v i , m v j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ", and (iii) performing agglomerative clustering for the event mentions to produce the new configuration V k t for event clusters (lines 12, 13, and 15). Note that during the training process, the parameters of the representation and scoring functions for entities R E and S E (or for events with R V and S V ) are updated/optimized after the coreference scores are computed for all the entity (or event) mention pairs. This corresponds to lines 9 and 14 in Algorithm 1 (not used in the test phase). In particular, the loss function to optimize R E and S E in line 9 is based on the cross-entropy over every pair of entity mentions (m e i , m e j ) in M E t :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "L ent,coref t = P me i 6 =me j y ent ij log S E (m e i , m e j ) (1 y ent ij ) log(1 S E (m e i , m e j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where y ent ij is a golden binary variable to indicate whether m e i and m e j corefer or not (symmetrically for L env,coref t to train R V and S V in line 14). Also, we use the predicted configurations E k t and V k 1 t in the mention representation computation (instead of the golden clusters as in y ent ij for the loss functions) during both the training and test phases to achieve a consistency (Barhom et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 421, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Finally, we note that the agglomerative clustering in each iteration of our model also starts with the initial clusters as in E 0 t and V 0 t , and greedily merges multiple cluster pairs with the high-est cluster-pair scores until all the scores are below a predefined threshold . In this way, the algorithm first focuses on high-precision merging operations and postpones less precise ones until more information is available. The cluster-pair score S C (c i , c j ) for two clusters c i and c j (mention sets) at some algorithm step is based on averaging mention linkage coreference scores: Barhom et al., 2019) where \u21e4 can be E or V depending on whether c i and c j are entity or event clusters (respectively).", |
|
"cite_spans": [ |
|
{ |
|
"start": 593, |
|
"end": 613, |
|
"text": "Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "S C (c i , c j ) = 1 |c i ||c j | P m i 2c i ,m j 2c j S \u21e4 (m i , m j ) (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Mention Representations: Let m be a mention (event or entity) in a sentence W = w 1 , w 2 , . . . , w n of n words (w i is the i-th word) where w a is the head word of m. To prepare W for the mention representation computation and achieve a fair comparison with (Barhom et al., 2019) , we first convert each word w i 2 W into a vector x i using the ELMo embeddings (Peters et al., 2018) . Here, x i is obtained by running the pre-trained ELMo model over W and averaging the hidden vectors for w i at the three layers in ELMo. This transforms W into a sequence of vectors X = x 1 , x 2 , . . . , x n for the next steps. The mention representations in our work are based on two major elements, i.e., the modeling of important context words for event triggers and arguments, and the induction of document presentations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 283, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 386, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(i) Modeling Important Context Words: A motivation for representation learning in our model is to capture event arguments and important context words (for the relations between event triggers and arguments) to enrich the event mention representations. In this work, we employ a symmetric intuition to compute a representation vector for an entity mention m, aiming to encode associated predicates (i.e., event triggers that accept m as an argument W ) and important context words (for the relations between the entity mention and associated event triggers). As such, following (Barhom et al., 2019) , we first identify the attached arguments (if m is an event mention) or predicates (if m is an entity mention) in W for m using a semantic role labeling (SRL) system. In particular, we focus on four semantic roles of interest: Arg0, Arg1, Location, and Time. For convenience, let A m = {w i 1 , . . . , w io } \u21e2 W be the set of head words of the attached event arguments or event triggers for m in W based on the SRL system and the four roles (o is the number of head words). In particular, if m is an event mention, A m would involve the head words of the entity mentions that fill the four semantic roles for m in W . In contrast, if m is an entity mention, A m would capture the head words of the event triggers that take m as an argument with one of the four roles in W . Afterward, to encode the important context words for the relations between m and the words in A m , we employ the shortest dependency paths P i j between the head word w a of m and the words w i j 2 A m . As such, starting with the dependency tree of G m = {N m , E m } of W (N m involves the words in W ), we build a pruned tree\u011c m = {N m ,\u00ca m } of G m to explicitly focus on the mention m and its important context words in the paths P i j . Concretely, the node setN m of G m contains all the words in the paths", |
|
"cite_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 598, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "P i j (N m = S j=1..o P i j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "while the edge set\u00ca m preserves the dependency connections in G m of the words in", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "N m (\u00ca m = {(a, b)|a, b 2N m , (a, b) 2 E m }).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As such, the pruned tree\u011c m helps to gather the relevant context words for the coreference resolution of m and organize them into a single dependency graph, serving as a rich structure to learn a representation vector for m 2 (e.g., in Figure 1 ). (ii) Graph Convolutional Networks: The context words and structure in\u011c m suggest the use of Graph Convolutional Networks (GCN) (Kipf and Welling, 2017; Nguyen and Grishman, 2018) to learn the representation vector for m (called the sentencelevel GCN). In particular, the GCN model in this work involves several layers (i.e., L layers in our case) to compute the representation vectors for the nodes in the graph\u011c m at different abstract levels. The input vector h 0 v for the node v 2N m for GCN is set to the corresponding ELMo-based vectors in X. After L layers, we obtain the hidden vectors h L v (in the last layer) for the nodes v 2N m . We call sent 2N m ) ] the sentence-level GCN vector that will be used later to represent m (v m is the corresponding node of the head word of m inN m ). (iii) GCN Interaction: Our discussion about the sentence-level GCN so far has been agnostic to whether m is an event or entity mention and the straightforward approach is to apply the same GCN model for both entity and event mentions. However, this approach might limit the flexibility of the GCN model to focus on the necessary aspects of information that are specific to each coreference resolution task (i.e., events or entities). For example, event coreference might need to weight the information from event arguments and important context words more than those for entity coreference (Yang et al., 2015) . To this end, we propose to apply two separate sentence-level GCN models for event and entity mentions, which share the architecture but differ from the parameters, to enhance representation learning (called G ent and G evn for entities and events respectively). In addition, to introduce a new source of training signals and promote the knowledge transfer between the two GCN networks, we propose to regularize the GCN models so they produce similar representation vectors for the same input sentence W . In particular, we apply both GCN models G ent and G evn over the full dependency tree 3 G m using the ELMo-based vectors X for W as the input. This produces the hidden vectors h ent 1 , . . . , h ent n and h env 1 , . . . , h env n in the last layers of G ent and G evn for W . Afterward, we compute two versions of representation vectors for W based on max-pooling:", |
|
"cite_spans": [ |
|
{ |
|
"start": 1634, |
|
"end": 1653, |
|
"text": "(Yang et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 244, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 904, |
|
"end": 910, |
|
"text": "2N m )", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(m) = [h L vm , max_pool(h L v |v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "h ent = max_pool(h ent 1 , . . . , h ent n ), h env = max_pool(h env 1 , . . . , h env n )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ". Finally, the mean squared difference between h ent and h env is introduced into the overall loss function to regularize the models: L reg m = ||h ent h env || 2 2 . As this loss is specific to mention m, it will be computed independently for event and entity mentions and added into the corresponding training loss (i.e., lines 9 or 14 in Algorithm 1). In particular, the overall training loss for entity mention coreference resolution in line 9 of Algorithm 1 is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "L ent t = \u21b5 ent L ent,coref t +(1 \u21b5 ent ) P me2M E t L reg me", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "while those for event mention coreference resolution is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "L env t = \u21b5 env L env,coref t + (1 \u21b5 env ) P mv2M V t L reg mv (line 14)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ". Here, \u21b5 ent and \u21b5 env are the trade-off parameters. (iv) Document Representation: As motivated in the introduction, we propose to learn representation vectors for input documents to enrich mention representations and improve the generalization of the models (over the lexical features for documents). As such, our principle is to employ entity and event mentions (the main objects of interest in CDECR) and their interactions/structures to represent input documents (i.e., documents as interaction graphs of entity and event mentions). Given the mention m of interest and its correspond-ing document d 2 D t , we start by building an interaction graph G doc = {N doc , E doc } where the node set N doc involves all the entity and event mentions in d. For the edge set E doc , we leverage two types of information to connect the nodes in N doc : (i) predicate-argument information: we establish a link between the nodes for an event mention x and an entity mention y in N doc if y is an argument of x for one of the four semantic roles, i.e., Arg0, Arg1, Location, and Time (identified by the SRL system), and (ii) entity mention coreference: we connect any pairs of nodes in N doc that correspond to two coreferring entity mentions in d (using the gold within-document entity coreference in training and the predicted one in testing, as in E 0 t ). In this way, G doc helps to emphasize on important objects of d and enables intra-and intersentence interactions between event mentions (via entity mentions/arguments) to produce effective document representations for CDECR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In the next step, we feed the interaction graph G doc into a GCN model G doc (called the documentlevel GCN) using the sentence-level GCN-based representations of the entity and event mentions in N doc (i.e., sent(m)) as the initial vectors for the nodes (thus called a hierarchical GCN model). The hidden vectors produced by the last layer of G doc for the nodes in G doc is called {h d u |u 2 N doc }. Finally, we obtain the representation vector doc(m) for d based on the max-pooling:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "doc(m) = max_pool(h d u |u 2 N doc ). (v)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Final Representation: Given the representation vectors learned so far, we form the final representation vector for m", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(R E (m, V k 1 t ) or R V (m, E k t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "in lines 7 or 12 of Algorithm 1) by concatenating the following vectors:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(1) The sentence-and document-level GCNbased representation vectors for m (i.e., sent(m) and doc(m)).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "( 2)The cluster-based representation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "cluster(m) = [Arg0 m , Arg1 m , Location m , Time m ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Taking Arg0 m as an example, it is computed by considering the mention m 0 that is associated with m via the semantic role Arg0 in W using the SRL system. Here, m 0 is an event mention if m is an entity mention and vice versa (Arg0 = 0 if m 0 does not exist). As such, let c be the cluster in the current configuration (i.e., V k 1 t or E k t ) that contain m 0 . We then obtain Arg0 m by averaging the ELMo-based vectors (i.e., X = x 1 , . . . , x n ) of the head words of the mentions in c: Arg0 m = 1/|c| P q2c x head(q) (head(q) is the index of the head word of mention q in W ). Note that as the current entity cluster configuration E k t is used to generate the cluster-based representations if m is an event mention (and vice verse), it serves as the main mechanism to enable the two coreference tasks to interact and benefit from each other. These vectors are inherited from (Barhom et al., 2019) for a fair comparison.", |
|
"cite_spans": [ |
|
{ |
|
"start": 883, |
|
"end": 904, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Finally, given two mentions m 1 and m 2 , and their corresponding representation vectors R(m 1 ) and R(m 2 ) (as computed above), the coreference score functions S E and S V send the concatenated vector [R(m 1 ), R(m 2 ), R(m 1 ) R(m 2 )] to twolayer feed-forward networks (separate ones for S E and S V ) that involve the sigmoid function in the end to produce coreference score for m 1 and m 2 . Here, is the element-wise product. This completes the description of our CDECR model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Dataset: We use the ECB+ dataset (Cybulska and Vossen, 2014) to evaluate the CDECR models in this work. Note that ECB+ is the largest dataset with both within-and cross-document annotation for the coreference of entity and event mentions so far. We follow the setup and split for this dataset in prior work to ensure a fair comparison (Cybulska and Vossen, 2014; Kenyon-Dean et al., 2018; Barhom et al., 2019; Meged et al., 2020) . In particular, this setup employs the annotation subset that has been validated for correctness by (Cybulska and Vossen, 2014) and involves a larger portion of the dataset for training. In ECB+, only a part of the mentions are annotated. This setup thus utilizes gold-standard event and entity mentions in the evaluation and does not require special treatment for unannotated mentions (Barhom et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 60, |
|
"text": "(Cybulska and Vossen, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 409, |
|
"text": "Barhom et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 429, |
|
"text": "Meged et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 558, |
|
"text": "(Cybulska and Vossen, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 838, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Note that there is a different setup for ECB+ that is applied in (Yang et al., 2015) and (Choubey and Huang, 2017) . In this setup, the full ECB+ dataset is employed, including the portions with known annotation errors. In test time, such prior work utilizes the predicted mentions from a mention extraction tool (Yang et al., 2015) . To handle the partial annotation in ECB+, those prior work only evaluates the systems on the predicted mentions that are also annotated as the gold mentions. However, as shown by (Upadhyay et al., 2016) , this ECB+ setup has several limitations (e.g., the ignorance of clusters with a single mention and the separate evaluation for each sub-topic). Following (Barhom et al., 2019) , we thus do not evaluate the systems on this setup, i.e., not comparing our model with those models in (Yang et al., 2015) and (Choubey and Huang, 2017) due to the incompatibility. Hyper-Parameters: To achieve a fair comparison, we utilize the preprocessed data and extend the implementation for the model in (Barhom et al., 2019) to include our novel hierarchical GCN model. The development dataset of ECB+ is used to tune the hyper-parameters of the proposed model (called HGCN). The suggested values and the resources for our model are reported in Appendix A. Comparison: Following (Barhom et al., 2019) , we compare HGCN with the following baselines:", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 84, |
|
"text": "(Yang et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 114, |
|
"text": "(Choubey and Huang, 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 332, |
|
"text": "(Yang et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 537, |
|
"text": "(Upadhyay et al., 2016)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 715, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 839, |
|
"text": "(Yang et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 844, |
|
"end": 869, |
|
"text": "(Choubey and Huang, 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1026, |
|
"end": 1047, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1302, |
|
"end": 1323, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(i) LEMMA (Barhom et al., 2019) : This first clusters documents to topics and then groups event mentions that are in the same document clusters and share the head lemmas.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 31, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(ii) CV (Cybulska and Vossen, 2015b) : This is a supervised learning method for CDECR that leverages discrete features to represent event mentions and documents. We compare with the best reported results for this method as in (Barhom et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 36, |
|
"text": "(Cybulska and Vossen, 2015b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 247, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(iii) KCP (Kenyon-Dean et al., 2018) : This is a neural network model for CDECR. Both event mentions and document are represented via word embeddings and hand-crafted binary features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 36, |
|
"text": "(Kenyon-Dean et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(iv) C-KCP (Barhom et al., 2019) : This is the KCP model that is retrained and tuned using the same document clusters in the test phase as in (Barhom et al., 2019) and our model. (v) BSE (Barhom et al., 2019) : This is a joint resolution model for cross-document coreference of entity and event mentions, using ELMo to compute representations for the mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 32, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 208, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(v) BSE-DJ (Barhom et al., 2019) : This is a variant of BSE that does not use the cluster-based representations cluster(m) in the mention representations, thus performing event and entity coreference resolution separately.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 32, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "(vii) MCS (Meged et al., 2020) : An extension of BSE where some re-ranking features are included. Note that BSE and MCS are the current state-ofthe-art (SOTA) models for CDECR on ECB+.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 30, |
|
"text": "(Meged et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For cross-document entity coreference resolution, we compare our model with the LEMMA and BSE models in (Barhom et al., 2019) , the only works that report the performance for event mentions on ECB+ so far. Following (Barhom et al., 2019) , we use the common coreference resolution metrics to evaluate the models in this work, including MUC (Vilain et al., 1995) , B 3 , CEAF-e (Luo, 2005) , and CoNLL F1 (average of three previous metrics). The official CoNLL scorer in (Pradhan et al., 2014) is employed to compute these metrics. Tables 1 and 2 show the performance (F1 scores) of the models for cross-document resolution for entity and event mentions (respectively). Note that we also report the performance of a variant (called HGCN-DJ) of the proposed HGCN model where the cluster-based representations cluster(m) are excluded (thus separately doing event and entity resolution as BSE-DJ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 125, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 237, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 361, |
|
"text": "(Vilain et al., 1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 388, |
|
"text": "(Luo, 2005)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 492, |
|
"text": "(Pradhan et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 531, |
|
"end": 545, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Model MUC B 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "CEAF-e CoNLL LEMMA (Barhom et al., 2019) 78.1 77.8 73.6 76.5 CV (Cybulska and Vossen, 2015b) 73.0 74.0 64.0 73.0 KCP (Kenyon-Dean et al. 2019) 69.0 69.0 69.0 69.0 CKCP (Barhom et al., 2019) 73.4 75.9 71.5 73.6 BSE-DJ (Barhom et al., 2019) 79.4 80.4 75.9 78.5 BSE (Barhom et al., 2019) 80.9 80.3 77.3 79.5 MCS (Meged et al., 2020) 81 CEAF-e CoNLL LEMMA (Barhom et al., 2019) 76.7 65.6 60.0 67.4 BSE-DJ (Barhom et al., 2019) 78.7 69.9 61.6 70.0 BSE (Barhom et al., 2019) 79 As can be seen, HGCN outperforms the all the baselines models on both entity and event coreference resolution (over different evaluation metrics). In particular, the CoNLL F1 score of HGCN for event coreference is 1.3% better than those for MCS (the prior SOTA model) while the CoNLL F1 improvement of HGCN over BSE (the prior SOTA model for entity coreference on ECB+) is 1.2%. These performance gaps are significant with p < 0.001, thus demonstrating the effectiveness of the proposed model for CDECR. Importantly, HGCN is significantly better than BSE, the most direct baseline of the proposed model, on both entity and event coreference regardless of whether the cluster-based representations cluster(m) for joint entity and event resolution is used or not. This testifies to the benefits of the proposed hierarchical model for representation learning for CDECR. Finally, we evaluate the full HGCN model when ELMo embeddings are replaced with BERT embeddings (Devlin et al., 2019) , leading to the CoNLL F1 scores of 79.7% and 72.3% for event and entity coreference (respectively). This performance is either worse (for events) or comparable (for entities) than those for EMLo, thus showing the advantages of ELMo for our tasks on ECB+. Ablation Study: To demonstrate the benefits of the proposed components for our CDECR model, we evaluate three groups of ablated/varied models for HGCN. First, for the effectiveness of the pruned dependency tree\u011c m and the sentence-level GCN models G ent and G env , we consider the following baselines: (i) HGCN-Sentence GCNs: this model removes the sentence-level GCN models G ent and G env from HGCN and directly feed the ELMobased vectors X of the mention heads into the document-level GCN G doc (the representation vector sent(m) is thus not included), and (ii) HGCN-Pruned Tree: this model replaces the pruned dependency tree\u011c m with the full dependency tree G m in the computation. Second, for the advantage of the GCN interaction between G ent and G env , we examine two baselines: (iii) HGCN with One Sent GCN: this baseline only uses one sentencelevel GCN model for both entity and event mentions (the regularization loss L reg m for G ent and G env is thus not used as well), and (iv) HGCN-L reg m : this baseline still uses two sentence-level GCN models but excludes the regularization term L reg m from the training losses. Finally, for the benefits of the document-level GCN G doc , we study the following variants: (v) HGCN-G doc : this model removes the document-level GCN model from HGCN, thus excluding the document representations doc(m) from the mention representations, (vi) HGCN-G doc +TFIDF: this model also excludes G doc , but it includes the TF-IDF vectors for documents, based on uni-, bi-and tri-grams, in the mention representations (inherited from (Kenyon-Dean et al., 2018)), and (vii) HGCN-G doc +MP: instead of using the GCN model G doc , this model aggregates mention representation vectors produced by the sentence-level GCNs (sent(m)) to obtain the document representations doc(m) using maxpooling. Table 3 presents the performance of the models for event coreference on the ECB+ test set. It is clear from the table that all the ablated baselines are significantly worse than the full model HGCN (with p < 0.001), thereby confirming the necessity of the proposed GCN models (the sentence-level GCNs with pruned trees and document-level GCN) and the GCN interaction mechanism for HGCN and CDECR. In addition, the same trends for the model performance are also observed for entity coreference in this ablation study (the results are shown in Appendix B), thus further demonstrating the benefits of the proposed components in this work. Finally, we show the distribution of the error types of our HGCN model in Appendix C for future improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 40, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 92, |
|
"text": "(Cybulska and Vossen, 2015b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 142, |
|
"text": "(Kenyon-Dean et al. 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 189, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 238, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 284, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 329, |
|
"text": "(Meged et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 373, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 422, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 468, |
|
"text": "(Barhom et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1435, |
|
"end": 1456, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3547, |
|
"end": 3554, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We present a model to jointly resolve the crossdocument coreference of entity and event mentions. Our model introduces a novel hierarchical GCN that captures both sentence and document context for the representations of entity and event mentions. In particular, we design pruned dependency trees to capture important context words for sentencelevel GCNs while interaction graphs between entity and event mentions are employed for documentlevel GCN. In the future, we plan to explore better mechanisms to identify important context words for CDECR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We use mentions to refer to both event and entity mentions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "t and V k t for entities and events (respectively) after each iteration. As such, each iteration k performs two independent clustering steps where entity mentions are clustered first to produce E k t , followed by event mention clustering to obtain", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If A m is empty, the pruned tree\u011c m only contains the head word of m.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We tried the pruned dependency tree\u011c m in this regularization, but the full dependency tree G m led to better results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research has been supported by the Army Research Office (ARO) grant W911NF-21-1-0112. This research is also based upon work supported by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA Contract No. 2019-19051600006 under the Better Extraction from Text Towards Enhanced Retrieval (BETTER) Program. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of ARO, ODNI, IARPA, the Department of Defense, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for governmental purposes notwithstanding any copyright annotation therein. This document does not contain technology or technical data controlled under either the U.S. International Traffic in Arms Regulations or the U.S. Export Administration Regulations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Cosmin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanda", |
|
"middle": [], |
|
"last": "Bejan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harabagiu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cosmin Adrian Bejan and Sanda Harabagiu. 2014. Un- supervised event coreference resolution. In Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The stages of event extraction", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Workshop on Annotating and Reasoning about Time and Events", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Ahn. 2006. The stages of event extraction. In Proceedings of the Workshop on Annotating and Reasoning about Time and Events.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Revisiting joint modeling of cross-document entity and event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Shany", |
|
"middle": [], |
|
"last": "Barhom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Eirew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bugert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shany Barhom, Vered Shwartz, Alon Eirew, Michael Bugert, Nils Reimers, and Ido Dagan. 2019. Re- visiting joint modeling of cross-document entity and event coreference resolution. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Unsupervised event coreference resolution with rich linguistic features", |
|
"authors": [ |
|
{ |
|
"first": "Cosmin", |
|
"middle": [], |
|
"last": "Bejan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanda", |
|
"middle": [], |
|
"last": "Harabagiu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cosmin Bejan and Sanda Harabagiu. 2010. Unsuper- vised event coreference resolution with rich linguis- tic features. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Graph-based event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Workshop on Graph-based Methods for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zheng Chen and Heng Ji. 2009. Graph-based event coreference resolution. In Proceedings of the 2009 Workshop on Graph-based Methods for Natural Lan- guage Processing (TextGraphs-4).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A pairwise event coreference model, feature impact and evaluation for event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Haralick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on Events in Emerging Text Types", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zheng Chen, Heng Ji, and Robert Haralick. 2009. A pairwise event coreference model, feature impact and evaluation for event coreference resolution. In Proceedings of the Workshop on Events in Emerging Text Types.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Event coreference resolution by iteratively unfolding inter-dependencies among events", |
|
"authors": [ |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Kumar Choubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruihong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prafulla Kumar Choubey and Ruihong Huang. 2017. Event coreference resolution by iteratively unfold- ing inter-dependencies among events. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using a sledgehammer to crack a nut? lexical diversity and event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Cybulska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piek", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agata Cybulska and Piek Vossen. 2014. Using a sledgehammer to crack a nut? lexical diversity and event coreference resolution. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Translating granularity of event slots into features for event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Cybulska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piek", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the The 3rd Workshop on EVENTS: Definition, Detection, Coreference, and Representation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agata Cybulska and Piek Vossen. 2015a. Translat- ing granularity of event slots into features for event coreference resolution. In Proceedings of the The 3rd Workshop on EVENTS: Definition, Detection, Coreference, and Representation.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "bag of events\" approach to event coreference resolution. supervised classification of event templates", |
|
"authors": [ |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Cybulska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J M" |
|
], |
|
"last": "Piek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Int. J. Comput. Linguistics Appl", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agata Cybulska and Piek T. J. M. Vossen. 2015b. \"bag of events\" approach to event coreference resolution. supervised classification of event templates. In Int. J. Comput. Linguistics Appl.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Montani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embed- dings, convolutional neural networks and incremen- tal parsing. In To appear.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Resolving event coreference with supervised representation learning and clusteringoriented regularization", |
|
"authors": [ |
|
{ |
|
"first": "Kian", |
|
"middle": [], |
|
"last": "Kenyon-Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jackie Chi Kit", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doina", |
|
"middle": [], |
|
"last": "Precup", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kian Kenyon-Dean, Jackie Chi Kit Cheung, and Doina Precup. 2018. Resolving event coreference with supervised representation learning and clustering- oriented regularization. In Proceedings of the Sev- enth Joint Conference on Lexical and Computa- tional Semantics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Semisupervised classification with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- supervised classification with graph convolutional networks. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Event detection: Gate diversity and syntactic importance scores for graph convolution neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Tuan", |
|
"middle": [], |
|
"last": "Viet Dac Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Ngo Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viet Dac Lai, Tuan Ngo Nguyen, and Thien Huu Nguyen. 2020. Event detection: Gate diversity and syntactic importance scores for graph convolution neural networks. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Joint entity and event coreference resolution across documents", |
|
"authors": [ |
|
{ |
|
"first": "Heeyoung", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heeyoung Lee, Marta Recasens, Angel Chang, Mihai Surdeanu, and Dan Jurafsky. 2012. Joint entity and event coreference resolution across documents. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Joint event extraction via structured prediction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global fea- tures. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Supervised within-document event coreference using information propagation", |
|
"authors": [ |
|
{ |
|
"first": "Zhengzhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Araki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teruko", |
|
"middle": [], |
|
"last": "Mitamura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengzhong Liu, Jun Araki, Eduard Hovy, and Teruko Mitamura. 2014. Supervised within-document event coreference using information propagation. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Joint inference for event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Venugopal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Lu, Deepak Venugopal, Vibhav Gogate, and Vin- cent Ng. 2016. Joint inference for event coreference resolution. In COLING.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "On coreference resolution performance metrics", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqiang Luo. 2005. On coreference resolution per- formance metrics. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The Stanford CoreNLP natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven Bethard, and David McClosky. 2014. The Stanford CoreNLP natural language pro- cessing toolkit. In ACL System Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Vered Shwartz, and Ido Dagan. 2020. Paraphrasing vs coreferring: Two sides of the same coin", |
|
"authors": [ |
|
{ |
|
"first": "Yehudit", |
|
"middle": [], |
|
"last": "Meged", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yehudit Meged, Avi Caciularu, Vered Shwartz, and Ido Dagan. 2020. Paraphrasing vs coreferring: Two sides of the same coin. In ArXiv abs/2004.14979.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Reasoning with latent structure refinement for document-level relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Guoshun", |
|
"middle": [], |
|
"last": "Nan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhijiang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Sekulic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guoshun Nan, Zhijiang Guo, Ivan Sekulic, and Wei Lu. 2020. Reasoning with latent structure refinement for document-level relation extraction. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Cross-task instance representation interactions and label dependencies for joint information extraction with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "Minh", |
|
"middle": [], |
|
"last": "Van Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viet Dac", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh Van Nguyen, Viet Dac Lai, and Thien Huu Nguyen. 2021. Cross-task instance representation interactions and label dependencies for joint in- formation extraction with graph convolutional net- works. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "New york university 2016 system for kbp event nugget: A deep learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, , Adam Meyers, and Ralph Grish- man. 2016. New york university 2016 system for kbp event nugget: A deep learning approach. In TAC.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Graph convolutional networks with argument-aware pooling for event detection", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2018. Graph convolutional networks with argument-aware pool- ing for event detection. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. In Journal of Machine Learning Research.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Scoring coreference partitions of predicted mentions: A reference implementation", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Sameer Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Pradhan, Xiaoqiang Luo, Marta Recasens, Ed- uard Hovy, Vincent Ng, and Michael Strube. 2014. Scoring coreference partitions of predicted men- tions: A reference implementation. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Inter-sentence relation extraction with document-level graph convolutional neural network", |
|
"authors": [ |
|
{ |
|
"first": "Fenia", |
|
"middle": [], |
|
"last": "Sunil Kumar Sahu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Christopoulou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Miwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunil Kumar Sahu, Fenia Christopoulou, Makoto Miwa, and Sophia Ananiadou. 2019. Inter-sentence relation extraction with document-level graph convo- lutional neural network. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Jointly extracting event triggers and arguments by dependency-bridge rnn and tensor-based argument interaction", |
|
"authors": [ |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baobao", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lei Sha, Feng Qian, Baobao Chang, and Zhifang Sui. 2018. Jointly extracting event triggers and argu- ments by dependency-bridge rnn and tensor-based argument interaction. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Combination strategies for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez I Villodre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Comas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "In Journal of Artificial Intelligence Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Surdeanu, Llu\u00eds M\u00e0rquez i Villodre, X. Carreras, and P. Comas. 2007. Combination strategies for se- mantic role labeling. In Journal of Artificial Intelli- gence Research.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The dots have their values: Exploiting the node-edge connections in graph-based neural models for document-level relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Minh", |
|
"middle": [ |
|
"Trung" |
|
], |
|
"last": "Hieu Minh Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP Findings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hieu Minh Tran, Minh Trung Nguyen, and Thien Huu Nguyen. 2020. The dots have their values: Exploit- ing the node-edge connections in graph-based neu- ral models for document-level relation extraction. In EMNLP Findings.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Graph convolutional networks for event causality identification with rich document-level structures", |
|
"authors": [ |
|
{ |
|
"first": "Minh", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien", |
|
"middle": [], |
|
"last": "Huu Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh Tran and Thien Huu Nguyen. 2021. Graph con- volutional networks for event causality identifica- tion with rich document-level structures. In NAACL- HLT.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Revisiting the evaluation for cross document event coreference", |
|
"authors": [ |
|
{ |
|
"first": "Shyam", |
|
"middle": [], |
|
"last": "Upadhyay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christos", |
|
"middle": [], |
|
"last": "Christodoulopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shyam Upadhyay, Nitish Gupta, Christos Christodoulopoulos, and Dan Roth. 2016. Re- visiting the evaluation for cross document event coreference. In COLING.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Inducing rich interaction structures between words for document-level event argument extraction", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Amir Pouran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Veyseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Quan Hung Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidan", |
|
"middle": [], |
|
"last": "Manjunatha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajiv", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doo", |
|
"middle": [ |
|
"Soon" |
|
], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 25th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Pouran Ben Veyseh, Franck Dernoncourt, Quan Hung Tran, Varun Manjunatha, Lidan Wang, Rajiv Jain, Doo Soon Kim, Walter Chang, and Thien Huu Nguyen. 2021. Inducing rich interac- tion structures between words for document-level event argument extraction. In Proceedings of the 25th Pacific-Asia Conference on Knowledge Discov- ery and Data Mining (PAKDD).", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Graph based neural networks for event factuality prediction using syntactic and semantic structures", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Amir Pouran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien", |
|
"middle": [], |
|
"last": "Veyseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dejing", |
|
"middle": [], |
|
"last": "Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Pouran Ben Veyseh, Thien Huu Nguyen, and De- jing Dou. 2019. Graph based neural networks for event factuality prediction using syntactic and se- mantic structures. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Graph transformer networks with syntactic and semantic structures for event argument extraction", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Amir Pouran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tuan", |
|
"middle": [], |
|
"last": "Veyseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Ngo Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP Findings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Pouran Ben Veyseh, Tuan Ngo Nguyen, and Thien Huu Nguyen. 2020a. Graph transformer net- works with syntactic and semantic structures for event argument extraction. In EMNLP Findings.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Franck Dernoncourt, Dejing Dou, and Thien Huu Nguyen. 2020b. Introducing syntactic structures into target opinion word extraction with deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Amir Pouran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nasim", |
|
"middle": [], |
|
"last": "Veyseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nouri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Pouran Ben Veyseh, Nasim Nouri, Franck Der- noncourt, Dejing Dou, and Thien Huu Nguyen. 2020b. Introducing syntactic structures into target opinion word extraction with deep learning. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Dennis Connolly, and Lynette Hirschman. 1995. A modeltheoretic coreference scoring scheme", |
|
"authors": [ |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Vilain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Burger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Aberdeen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Sixth Message Understanding Conference (MUC-6)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marc Vilain, John Burger, John Aberdeen, Dennis Con- nolly, and Lynette Hirschman. 1995. A model- theoretic coreference scoring scheme. In Sixth Mes- sage Understanding Conference (MUC-6).", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A hierarchical distance-dependent Bayesian model for event coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Bishan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Frazier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "TACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bishan Yang, Claire Cardie, and Peter Frazier. 2015. A hierarchical distance-dependent Bayesian model for event coreference resolution. In TACL.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "The pruned dependency tree for the event mention in S1. The trigger is red while argument heads are blue. club on Thursday.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td>MUC</td><td>B 3</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The cross-document event coreference resolution performance (F1) on the ECB+ test set." |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The cross-document entity coreference resolution performance (F1) on the ECB+ test set." |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The CDECR F1 scores on the ECB+ test set." |
|
} |
|
} |
|
} |
|
} |