|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:57:55.327134Z" |
|
}, |
|
"title": "GTN-ED: Event Detection Using Graph Transformer Networks", |
|
"authors": [ |
|
{ |
|
"first": "Sanghamitra", |
|
"middle": [], |
|
"last": "Dutta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tanay", |
|
"middle": [ |
|
"Kumar" |
|
], |
|
"last": "Saha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "tsaha@dataminr.com" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "jtetreault@dataminr.com" |
|
}, |
|
{ |
|
"first": "Alejandro", |
|
"middle": [], |
|
"last": "Jaimes", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "ajaimes@dataminr.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent works show that the graph structure of sentences, generated from dependency parsers, has potential for improving event detection. However, they often only leverage the edges (dependencies) between words, and discard the dependency labels (e.g., nominal-subject), treating the underlying graph edges as homogeneous. In this work, we propose a novel framework for incorporating both dependencies and their labels using a recently proposed technique called Graph Transformer Networks (GTN). We integrate GTNs to leverage dependency relations on two existing homogeneousgraph-based models, and demonstrate an improvement in the F1 score on the ACE dataset.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent works show that the graph structure of sentences, generated from dependency parsers, has potential for improving event detection. However, they often only leverage the edges (dependencies) between words, and discard the dependency labels (e.g., nominal-subject), treating the underlying graph edges as homogeneous. In this work, we propose a novel framework for incorporating both dependencies and their labels using a recently proposed technique called Graph Transformer Networks (GTN). We integrate GTNs to leverage dependency relations on two existing homogeneousgraph-based models, and demonstrate an improvement in the F1 score on the ACE dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Event detection is an important task in natural language processing, which encompasses predicting important incidents in texts, e.g., news, tweets, messages, and manuscripts (Yang and Mitchell, 2016; Nguyen et al., 2016; Feng et al., 2016; Du and Cardie, 2020; McClosky et al., 2011; Ji and Grishman, 2008; Liao and Grishman, 2010; Li et al., 2013; Yang et al., 2019) . As an example, consider the following sentence: The plane arrived back to base safely. Here, the word arrived is an event trigger that denotes an event of the type \"Movement:Transport,\" while \"The plane\" and \"base\" are its arguments. Given a sentence, the objective of the event detection task is to predict all such event triggers and their respective types.", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 199, |
|
"text": "(Yang and Mitchell, 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 220, |
|
"text": "Nguyen et al., 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 239, |
|
"text": "Feng et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 260, |
|
"text": "Du and Cardie, 2020;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 283, |
|
"text": "McClosky et al., 2011;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 306, |
|
"text": "Ji and Grishman, 2008;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 331, |
|
"text": "Liao and Grishman, 2010;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 348, |
|
"text": "Li et al., 2013;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 367, |
|
"text": "Yang et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recent works on event detection (Nguyen and Grishman, 2018; Liu et al., 2018; Yan et al., 2019; Balali et al., 2020) employ graph based methods (Graph Convolution Networks (Kipf and Welling, 2017) ) using the dependency graph (shown in Fig. 1 ) generated from syntactic dependencyparsers. These methods are able to capture useful non-local dependencies between words that are * S. Dutta was a research intern at Dataminr. relevant for event detection. However, in most of these works (with the notable exception of Cui et al. (2020) ), the graph is treated as a homogeneous graph, and the dependency labels (i.e., edge-types in the graph) are ignored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 59, |
|
"text": "(Nguyen and Grishman, 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 77, |
|
"text": "Liu et al., 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 95, |
|
"text": "Yan et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 116, |
|
"text": "Balali et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 196, |
|
"text": "(Graph Convolution Networks (Kipf and Welling, 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 532, |
|
"text": "Cui et al. (2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 242, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Dependency labels can often better inform whether a word is a trigger or not. Consider the two sentences in Fig. 1 . In both the sentences, there is an edge between \"police\" and \"fired\". A model that does not take into account dependency labels will only have access to the information that they are connected. However, in the first sentence, \"fired\" is an event trigger of type \"Conflict:Attack,\" whereas in the second sentence, it is of type \"Personnel:End Position.\" The fact that the edge label between \"police\" and \"fired\" is a nominal-subject or an object relation serves as an indicator of the type of event trigger. Hence, leveraging the dependency labels can help improve the event detection performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 114, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we propose a simple method to employ the dependency labels into existing models inspired from a recently proposed technique called Graph Transformer Networks (GTN) (Yun et al., 2019) . GTNs enable us to learn a soft selection of edge-types and composite relations (e.g., multi-hop connections, called meta-paths) among the words, thus producing heterogeneous adjacency matrices.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 196, |
|
"text": "(Yun et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We integrate GTNs into two homogeneousgraph-based models (that previously ignored the dependency relations), namely, a simple gated-graphconvolution-based model inspired by Nguyen and Grishman (2018); Liu et al. (2018) ; Balali et al. (2020) , and the near-state-of-the-art MOGANED model (Yan et al., 2019) , enabling them to now leverage the dependency relations as well. Our method demonstrates a relative improvement in the F1 score on the ACE dataset (Walker et al., 2006) for both models, proving the value of leveraging dependency relations for a graph-based model. While the goal of this paper is not to establish a state-ofthe-art (SOTA) method, but rather to show the merit of our approach, we do note that the improvements with our method approach the current SOTA (Cui et al., 2020 ) (which leverages dependency relations using embeddings instead of GTNs).", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 218, |
|
"text": "Liu et al. (2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 241, |
|
"text": "Balali et al. (2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 306, |
|
"text": "(Yan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 476, |
|
"text": "(Walker et al., 2006)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 775, |
|
"end": 792, |
|
"text": "(Cui et al., 2020", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To summarize, our main contribution is a method of enabling existing homogeneous-graph-based models to exploit dependency labels for event detection, inspired from GTNs. Incorporating GTNs in NLP tasks has received less attention (also see recent related work Veyseh et al. (2020)). Notations: We denote matrices and vectors in bold, e.g., A (matrix) or a (vector). Note that, A(u, v) denotes the element at index (u, v) in matrix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we incorporate GTNs onto two homogeneous-graph-based models: (i) Model I: a gated-graph-convolution-based model inspired by Nguyen and Grishman (2018); Liu et al. 2018; Balali et al. (2020) ; and (ii) Model II: MOGANED model (Yan et al., 2019) . Both models have a similar initial embedding and BiLSTM module, followed by a graph-based module (where their differences lie), and finally a classification module. Embedding and BiLSTM Module: Our initial module (shown in Fig. 2 ) is similar to existing works (e.g., Yan et al. (2019) ). Each word of the sentence is represented by a token which consists of the word embedding, the POS tag embedding, the Named-Entity type embedding, and its positional embedding. For a sentence of n words, we denote this sequence of tokens as", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 203, |
|
"text": "Balali et al. (2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 257, |
|
"text": "(Yan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 545, |
|
"text": "Yan et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 489, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "X = x 0 , x 1 , . . . , x n\u22121 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Next, we introduce a BiLSTM to encode X into its context", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "P = p 0 , p 1 , . . . , p n\u22121 where p i = [ \u2192 LST M (x i )|| \u2190 LST M (x i )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ", and || denotes the concatenation operation. P is then fed to the graphbased module, as discussed next. Graph-Based Module: We first introduce the basic unit of both Model I and II, i.e., gatedgraph-convolution network (see Fig. 3 ). Let", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 231, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "H k = h k 0 , h k 1 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ". . , h k n\u22121 be the input and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "H k+1 = h k+1 0 , h k+1 1 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ". . , h k+1 n\u22121 be the output of the k-th layer of this module with H 0 = P . Given any adjacency matrix A and input H k , consider the following operation at layer k:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "f u (H k , A) = n\u22121 v=0 G k A (u, v)(W k A h k v + b k A ). (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here, W k A and b k A are the weight matrix and bias item for the adjacency matrix A at layer k, and G k A (u, v) is the gated-importance, given by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "G k A (u, v) = A(u, v)\u03c3(w k att,A h k v + k att,A ), where \u03c3(\u2022)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "is an activation function, and w att,A and att,A are the attention weight vector and bias item.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A dependency parser, e.g., Stanford Core NLP (Manning et al., 2014), generates a directed heterogeneous graph G for each sentence (recall Fig. 1 ). Existing works typically do not use the dependency labels (e.g., nominal-subject); they only derive three homogeneous adjacency matrices from G as follows: (i) A f wd where A f wd (i, j) = 1 if there is an edge from node i to j; (ii) A rev where A rev (i, j) = 1 if there is an edge from node j to i; and (iii) A loop which is an identity matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 144, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For Model I (see Fig. 4 (Left)), the output of the k-th layer (input to k +1-th layer) is given by h k+1 u =ReLu( A\u2208{A f wd ,Arev,A loop } f u (H k , A)). The first layer of gated-graph-convolution network captures dependencies between immediate neighbors (1-hop). To capture K-hop dependencies, Model I has K consecutive layers of such gated- Figure 5 : GTN to obtain heterogeneous adjacency matrix of meta-path length 1 (Recall Fig. 1 for the graph) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 23, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 352, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 451, |
|
"text": "Fig. 1 for the graph)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "graph-convolution networks. The output of this graph-based module is then fed to a multi-layer perceptron (MLP) with attention weights for classifying each word into its event-type (or \"not an event\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In Model II, instead of passing the BiLSTM output P through a series of K consecutive gatedgraph-convolution layers (to capture K-hop connections), this model separately aggregates the outputs of T parallel graph-convolution layers with separate adjacency matrices representing hops of length 1, 2, . . . , T (see Fig. 4 (Right) ). Let H 0 (= P ) be the input and H 1 be the output of the graph-based module of Model II (which effectively has only one layer, i.e., k=0). In Yan et al. (2019) , the homogeneous adjacency matrices A f wd , A rev , and A loop are considered with their corresponding t-hop adjacency matrices A t f wd , A t rev , and A t loop (multiplied t times) respectively. The output of the graph-based module is given by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 474, |
|
"end": 491, |
|
"text": "Yan et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 328, |
|
"text": "Fig. 4 (Right)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "h 1 u = T \u22121 t=0 w att,t v t where v t = A\u2208{A f wd ,Arev,A loop } \u03c3(f u (H 0 , A t ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ". Here, w att,t is an attention-weight (further details in (Yan et al., 2019) ) and \u03c3(\u2022) is the exponential linear unit 1 . Finally, these outputs are passed through an MLP with attention weights for classification. Remark. The reason for using only three matrices instead of a separate adjacency matrix for each edge-type is that it results in an explosion of parameters for the gated-graph-convolution network, as individual weight matrices have to be learnt for each type of edge (see also Nguyen and Grishman (2018)). In this work, we replace the homogeneous matrices A f wd and A rev with heterogeneous adjacency matrices without a significant overhead in the number of parameters, as discussed next. Obtaining Heterogeneous Adjacency Matrices With GTN: Consider a directed heterogeneous graph G with each edge belonging to one of L types. This graph can be represented using a set of L adjacency matrices {A f wd,0 , A f wd,1 , . . . , A f wd,L\u22121 },", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 77, |
|
"text": "(Yan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1 The gated-importance G k A (u, v) has subtle differences between Model I and II. each corresponding to a different edge-type (dependency label). A f wd,l (i, j) = 1 if there is a directed edge from node i to j of type l. A GTN obtains a heterogeneous adjacency matrix by learning a convex combination Q f wd = L\u22121 l=0 \u03b1 l A f wd,l (see Fig. 5 ) where \u03b1 = softmax(w) and w is a weight vector that the model learns. The matrix Q f wd is a heterogeneous adjacency matrix with an \"appropriately weighted\" edge between any two nodes that have an edge in any of the L original matrices.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 344, |
|
"text": "Fig. 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For Model I, we first generate a set of L adjacency matrices (for L edge-types) corresponding to the directed forward edges, and another set of L adjacency matrices corresponding to the reverse edges. Next, we learn heterogeneous adjacency matrices, i.e., A f wd = Q f wd and A rev = Q rev . Our technique enables baseline Model I to leverage dependency relations by learning only 2L more scalar parameters which is significantly less than learning individual weight matrices for L edge-types.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For Model II, we not only aim to learn heterogeneous adjacency matrices to replace the homogeneous A f wd and A rev , but also learn heterogeneous adjacency matrices that have an \"appropriately weighted\" edge between every two nodes that are t-hops apart in the original graph G (called a meta-path of length t) so as to replace A t f wd and A t rev . Specifically, for the case of t = 2, GTN first learns two convex combinations Q f wd,0 and Q f wd,1 (each corresponds to meta-paths of length 1), and then computes the product Q f wd,0 Q f wd,1 . Similarly, one can compute a product of t such adjacency matrices to learn meta-paths of length t.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We replace all t-hop adjacency matrices with heterogeneous adjacency matrices of meta-path length t, learnt through GTNs, e.g., A t f wd is replaced by Q f wd,0 Q f wd,1 . . . Q f wd,t\u22121 , where each Q f wd,i is a convex combination of L-adjacency matrices corresponding to the directed forward edges. Similar heterogeneous adjacency matrices of meta-path length t are learnt for the reverse edges as well to replace A t rev . This modification enables the baseline Model II to leverage the dependency relations, by only learning 2Lt more scalar parameters for each t, which is practicable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Dataset and Evaluation Metrics: We use the benchmark ACE2005 English dataset (Walker et al., 2006) with the same data split as in prior works (where the sentences from 529, 30, and 40 docu-", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 98, |
|
"text": "(Walker et al., 2006)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "K P R F1 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "72.0 75.5 73.7 2 70.7 75.7 73.1 3 72.8 70.7 71.7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "K P R F1 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "72.9 76.4 74.6 2 72.1 75.9 74.0 3 73.8 73.6 73.7 ments are used as the training, validation, and test set). We use the Stanford CoreNLP toolkit (Manning et al., 2014) for sentence splitting, tokenizing, POS-tagging and dependency parsing. We use word embeddings trained over the New York Times corpus with Skip-gram algorithm following existing works (Yan et al., 2019) . We evaluate the Precision (P), Recall (R) and F1 score. Model Settings: For Model I, the number of consecutive layers of gated-graph-convolution networks (K) is varied from 1 to 3. For Model II, we use the code 2 with same hyper parameter settings. Performance: For both Models I and II, GTNs demonstrate an improvement of about 1 point F1 score (see Tables 1 and 2 ). The 76.8 F1 score for Model II with GTNs is also quite close to the SOTA performance of 77.6 for this task (Cui et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 166, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 369, |
|
"text": "(Yan et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 848, |
|
"end": 866, |
|
"text": "(Cui et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 723, |
|
"end": 737, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To explain the role of GTNs, we examined all the predictions on the validation set using the baseline Model II (no GTNs) and the proposed Model II (with GTNs). We include some specific instances here that we found interesting and insightful.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examining Specific Predictions For Insights:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We observe that using GTNs makes the predictions more \"precise,\" by reducing the number of false-positive event trigger detections. For instance, He's now national director of Win Without War, and former Congressman Bob Dornan, Republican of California. Here, \"former\" is the only event trigger (type Personnel:End-Position), as is correctly identified by our model. However, the baseline model also falsely identifies \"War,\" as an event trigger of type Conflict:Attack. Another example is: In a monstrous conflict of interest, [...] . Here, the baseline falsely identifies \"conflict,\" as a trigger of type Conflict:Attack. Our model is able to identify \"War,\" and \"conflict\" as non-triggers based on their context in the sentence, while the baseline seems to be over-emphasizing on their literal meaning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 528, |
|
"end": 533, |
|
"text": "[...]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examining Specific Predictions For Insights:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In some cases, the baseline model also leads to misclassification. For instance, The Apache troop opened its tank guns, [...] . Here, \"opened,\" is an event trigger of type Conflict:Attack, as is correctly identified by our model; however, the baseline misclassifies it as type Movement:Transport.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 125, |
|
"text": "[...]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examining Specific Predictions For Insights:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Another interesting example is: [...] Beatriz walked into the USCF Offices in New Windsor and immediately fired 17 staff members. Here, \"walked\" is an event trigger of type Movement:Transport, and \"fired\" is of type Personnel:End-Position. The baseline model misclassifies \"fired\" as Conflict:Attack, while using GTNs help classify it correctly. However, using GTNs can sometimes miss certain event triggers while attempting to be more precise, e.g., \"walked\" is missed when using GTNs while the baseline model identifies it correctly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examining Specific Predictions For Insights:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Lastly, there are examples where both the baseline and proposed models make the same errors. E.g., I visited all their families. or, I would have shot the insurgent too. Here, both models misclassify \"visited,\" (type Contact:Meet) as Movement:Transport, and \"shot,\" (type Life:Die) as Conflict:Attack. As future work, we are examining alternate techniques that better inform the context of the event trigger in such sentences. Another interesting example is: \"It is legal, and it is done.\" Both models miss \"it,\" (type Transaction:Transfer-Money). For this example (and some other similar examples of anaphora resolution), we believe that it might be quite non-intuitive to classify the event trigger from the sentence alone, and dependencies among sentences from the same article might need to be leveraged to better inform the context, as we will examine in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examining Specific Predictions For Insights:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We developed a novel method of enabling existing event extraction models to leverage dependency relations without a significant rise in the number of parameters to be learnt. Our method relies on GTN, and demonstrates an improvement in F1 score over two strong baseline models that do not leverage dependency relations. The benefits of using GTN in an NLP task suggests that other NLP tasks could be improved in the future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://github.com/ll0iecas/MOGANED", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/nlpcl-lab/ace2005-preprocessing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "There are some subtle differences in the graph-attention mechanisms of Model I and II. In particular, for Model II, the gated-importance G 0 A (u, v) in equation 1is redefined as follows:, \u03b3 is LeakyReLU (with negative input slope \u03b1), and W c,A and W att,A are weight matrices. Further details are provided in Yan et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 327, |
|
"text": "Yan et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A More Details on the MOGANED model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the same data split as several existing works (Nguyen et al., 2016; Nguyen and Grishman, 2018; Liu et al., 2018; Balali et al., 2020; Yan et al., 2019; Cui et al., 2020; Ji and Grishman, 2008; Liao and Grishman, 2010; Li et al., 2013) , where the sentences from 529, 30, and 40 documents are used as the training, validation, and test set. For preprocessing, we directly used the following code 3 which uses the Stanford Core NLP toolkit (Manning et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 74, |
|
"text": "(Nguyen et al., 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 101, |
|
"text": "Nguyen and Grishman, 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 119, |
|
"text": "Liu et al., 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 140, |
|
"text": "Balali et al., 2020;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 158, |
|
"text": "Yan et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 176, |
|
"text": "Cui et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 199, |
|
"text": "Ji and Grishman, 2008;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 224, |
|
"text": "Liao and Grishman, 2010;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 241, |
|
"text": "Li et al., 2013)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 467, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Data Preprocessing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For both the models, we select 100 as the dimension of the word embeddings, and 50 as the dimension of all the other embeddings, i.e., POS-tag embedding, Named-Entity-type embedding, and positional embedding. Following prior work, we restrict the length of each sentence to be 50 (truncating long sentences if necessary). We select the hidden units of the BiLSTM network as 100. We choose a batch size of 10, and Adam with initial learning rate of 0.0002. We select the dimension of the graph representation to be 150. When using GTNs, the number of edge-types (L) is 35, which is determined by the number of unique types of dependency relations, e.g., nsubj, case, etc., as obtained from the dependency parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Hyper Parameter Setting", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Joint event extraction along shortest dependency paths using graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Balali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masoud", |
|
"middle": [], |
|
"last": "Asadpour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Campos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.08615" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Balali, Masoud Asadpour, Ricardo Campos, and Adam Jatowt. 2020. Joint event extraction along shortest dependency paths using graph convolu- tional networks. arXiv preprint arXiv:2003.08615.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Event detection with relation-aware graph convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Shiyao", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tingwen", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuebin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinqiao", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.10757" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shiyao Cui, Bowen Yu, Tingwen Liu, Zhenyu Zhang, Xuebin Wang, and Jinqiao Shi. 2020. Event detec- tion with relation-aware graph convolutional neural networks. arXiv preprint arXiv:2002.10757.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Event extraction by answering (almost) natural questions", |
|
"authors": [ |
|
{ |
|
"first": "Xinya", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13625" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinya Du and Claire Cardie. 2020. Event extrac- tion by answering (almost) natural questions. arXiv preprint arXiv:2004.13625.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A languageindependent neural network for event detection", |
|
"authors": [ |
|
{ |
|
"first": "Xiaocheng", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lifu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaocheng Feng, Lifu Huang, Duyu Tang, Heng Ji, Bing Qin, and Ting Liu. 2016. A language- independent neural network for event detection. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 66-71.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Refining event extraction through cross-document inference", |
|
"authors": [ |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: Hlt", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "254--262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heng Ji and Ralph Grishman. 2008. Refining event ex- traction through cross-document inference. In Pro- ceedings of ACL-08: Hlt, pages 254-262.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Semisupervised classification with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "5th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- supervised classification with graph convolutional networks. In 5th International Conference on Learn- ing Representations, ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Joint event extraction via structured prediction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "73--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global fea- tures. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 73-82.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using document level cross-event inference to improve event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Shasha", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shasha Liao and Ralph Grishman. 2010. Using doc- ument level cross-event inference to improve event extraction. In Proceedings of the 48th Annual Meet- ing of the Association for Computational Linguistics, pages 789-797.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Jointly multiple events extraction via attentionbased graph information aggregation", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhunchen", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Liu, Zhunchen Luo, and Heyan Huang. 2018. Jointly multiple events extraction via attention- based graph information aggregation. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1247-1256, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The stanford corenlp natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Closky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of 52nd annual meeting of the association for computational linguistics: system demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The stanford corenlp natural language processing toolkit. In Proceedings of 52nd annual meeting of the association for computational linguis- tics: system demonstrations, pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Event extraction as dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1626--1635", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David McClosky, Mihai Surdeanu, and Christopher D Manning. 2011. Event extraction as dependency parsing. In Proceedings of the 49th Annual Meet- ing of the Association for Computational Linguistics: Human Language Technologies, pages 1626-1635.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Joint event extraction via recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "300--309", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, Kyunghyun Cho, and Ralph Gr- ishman. 2016. Joint event extraction via recurrent neural networks. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 300-309.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Graph convolutional networks with argument-aware pooling for event detection", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 32nd AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5900--5907", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2018. Graph convolutional networks with argument-aware pool- ing for event detection. In Proceedings of the 32nd AAAI Conference on Artificial Intelligence, pages 5900-5907.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Graph transformer networks with syntactic and semantic structures for event argument extraction", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Amir Pouran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tuan", |
|
"middle": [], |
|
"last": "Veyseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien Huu", |
|
"middle": [], |
|
"last": "Ngo Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.13391" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Pouran Ben Veyseh, Tuan Ngo Nguyen, and Thien Huu Nguyen. 2020. Graph transformer networks with syntactic and semantic structures for event argument extraction. arXiv preprint arXiv:2010.13391.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Ace 2005 multilingual training corpus. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Medero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "57", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Walker, Stephanie Strassel, Julie Medero, and Kazuaki Maeda. 2006. Ace 2005 multilin- gual training corpus. Linguistic Data Consortium, Philadelphia, 57:45.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Event detection with multi-order graph convolution and aggregated attention", |
|
"authors": [ |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangbin", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiafeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueqi", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5770--5774", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoran Yan, Xiaolong Jin, Xiangbin Meng, Jiafeng Guo, and Xueqi Cheng. 2019. Event detection with multi-order graph convolution and aggregated atten- tion. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5770-5774.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Joint extraction of events and entities within a document context", |
|
"authors": [ |
|
{ |
|
"first": "Bishan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.03632" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bishan Yang and Tom Mitchell. 2016. Joint extrac- tion of events and entities within a document context. arXiv preprint arXiv:1609.03632.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Exploring pre-trained language models for event extraction and generation", |
|
"authors": [ |
|
{ |
|
"first": "Sen", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawei", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linbo", |
|
"middle": [], |
|
"last": "Qiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhigang", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongsheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5284--5294", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sen Yang, Dawei Feng, Linbo Qiao, Zhigang Kan, and Dongsheng Li. 2019. Exploring pre-trained lan- guage models for event extraction and generation. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5284- 5294.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Graph transformer networks", |
|
"authors": [ |
|
{ |
|
"first": "Seongjun", |
|
"middle": [], |
|
"last": "Yun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minbyul", |
|
"middle": [], |
|
"last": "Jeong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raehyun", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyunwoo J", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11983--11993", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seongjun Yun, Minbyul Jeong, Raehyun Kim, Jaewoo Kang, and Hyunwoo J Kim. 2019. Graph trans- former networks. In Advances in Neural Informa- tion Processing Systems, pages 11983-11993.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A question answering-based framework for one-step event argument extraction", |
|
"authors": [ |
|
{ |
|
"first": "Yunyan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guangluan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daoyu", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenglong", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tinglei", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Access", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "65420--65431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunyan Zhang, Guangluan Xu, Yang Wang, Daoyu Lin, Feng Li, Chenglong Wu, Jingyuan Zhang, and Tinglei Huang. 2020. A question answering-based framework for one-step event argument extraction. IEEE Access, 8:65420-65431.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Examples of syntactic dependency parsing.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Embedding and BiLSTM Module.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Basic Gated-Graph-Convolution Network.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "(Left) Model I; (Right) Model II.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td colspan=\"4\">: Performance of gated-graph-convolution-</td></tr><tr><td colspan=\"4\">based models (Model I) for varying number of consec-</td></tr><tr><td colspan=\"4\">utive convolution layers (K): (Left) Baseline models</td></tr><tr><td colspan=\"4\">with no GTNs; (Right) Proposed models with GTNs.</td></tr><tr><td>Method</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>Baseline (no GTNs)</td><td colspan=\"3\">79.5 72.3 75.7</td></tr><tr><td colspan=\"4\">Proposed (with GTNs) 80.9 73.2 76.8</td></tr></table>", |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Performance of MOGANED (Model II).", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |