|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:58:13.481525Z" |
|
}, |
|
"title": "Learning Clause Representation from Dependency-Anchor Graph for Connective Prediction", |
|
"authors": [ |
|
{ |
|
"first": "Yanjun", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Pennsylvania State University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Pennsylvania State University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Passonneau", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Pennsylvania State University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Semantic representation that supports the choice of an appropriate connective between pairs of clauses inherently addresses discourse coherence, which is important for tasks such as narrative understanding, argumentation, and discourse parsing. We propose a novel clause embedding method that applies graph learning to a data structure we refer to as a dependencyanchor graph. The dependency anchor graph incorporates two kinds of syntactic information, constituency structure and dependency relations, to highlight the subject and verb phrase relation. This enhances coherencerelated aspects of representation. We design a neural model to learn a semantic representation for clauses from graph convolution over latent representations of the subject and verb phrase. We evaluate our method on two new datasets: a subset of a large corpus where the source texts are published novels, and a new dataset collected from students' essays. The results demonstrate a significant improvement over tree-based models, confirming the importance of emphasizing the subject and verb phrase. The performance gap between the two datasets illustrates the challenges of analyzing student's written text, plus a potential evaluation task for coherence modeling and an application for suggesting revisions to students.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Semantic representation that supports the choice of an appropriate connective between pairs of clauses inherently addresses discourse coherence, which is important for tasks such as narrative understanding, argumentation, and discourse parsing. We propose a novel clause embedding method that applies graph learning to a data structure we refer to as a dependencyanchor graph. The dependency anchor graph incorporates two kinds of syntactic information, constituency structure and dependency relations, to highlight the subject and verb phrase relation. This enhances coherencerelated aspects of representation. We design a neural model to learn a semantic representation for clauses from graph convolution over latent representations of the subject and verb phrase. We evaluate our method on two new datasets: a subset of a large corpus where the source texts are published novels, and a new dataset collected from students' essays. The results demonstrate a significant improvement over tree-based models, confirming the importance of emphasizing the subject and verb phrase. The performance gap between the two datasets illustrates the challenges of analyzing student's written text, plus a potential evaluation task for coherence modeling and an application for suggesting revisions to students.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The clause is a fundamental unit in coherent text. Much work in NLP investigates how clauses combine to form larger units, ultimately spanning a whole discourse (Wang et al., 2017; Ji and Eisenstein, 2014) ; how to decompose complex sentences into distinct propositions (Wang et al., 2018; Narayan et al., 2017) ; how to identify explicit or implicit semantic relations between clauses (Lee and Goldwasser, 2019; Rutherford and Xue, 2015) , or how to select a connective to link multiple clauses into a complex sentence (Nie et al., 2019; Malmi et al., 2018) . In this paper, we", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 180, |
|
"text": "(Wang et al., 2017;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 205, |
|
"text": "Ji and Eisenstein, 2014)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 289, |
|
"text": "(Wang et al., 2018;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 311, |
|
"text": "Narayan et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 412, |
|
"text": "(Lee and Goldwasser, 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 438, |
|
"text": "Rutherford and Xue, 2015)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 538, |
|
"text": "(Nie et al., 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 558, |
|
"text": "Malmi et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Bob cooked Tia a burger. P1, Q1 alth P2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bob cooked himself a burger. P1, Q2 bec Q1 Bob was hungry.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P1, Q3 none Q2 Tia was hungry.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P1, Q4 alth Q3 Bob was thirsty.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P2, Q1 bec Q4 Tia was thirsty.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P2, Q2 alth alth(ough): contrast P2, Q3 alth bec(ause): precondition P2, Q4 none Figure 1 : For the propositions P m , Q n to be joined by although or because, P m and Q n . must have some semantic commonality to allow for contrast or causation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 89, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Four cases allow although. The two cases that allow because have a strong semantic relation between the predicates (make someone a burger, be hungry) and, there is no conflict in the to-object of the first clause and the subject of the second. The remaining two cases have no commonality, and neither connective can occur.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "focus on clause representation to support accurate connective prediction, a task which is important for coherence modeling (Pishdad et al., 2020) , finegrained opinion mining (Wiegand et al., 2015) , argument mining (Kuribayashi et al., 2019; Jo et al., 2020) and argumentation (Park and Cardie, 2014) . We present a case for a model that learns from a novel graph we refer to as a dependency-anchor graph, which retains information from dependency parses and constituency parses of input sentences that is critical for identification of the core proposition of a clause, while omitting structural information that is less relevant. We assume that determining whether two clauses can be joined by a connective, and what connective to choose, depends primarily on the main verb in each clause, and on the arguments that occur in both clauses, particularly the grammatical subject. There are a large number of connectives and connective phrases in English; e.g., the Penn Discourse Tree Bank (Prasad et al., 2008) has 141. Here we illustrate the nature of the problem with respect to the two connectives, although and because. Figure 1 illustrates how the choice of connective to join two simple clauses P m and Q n , and whether a connective is appropriate at all, depends on the main verbs and their arguments. Use of although requires only some dimension of contrast between the joined clauses, while because requires that Q n be a precondition for P m . The table lists two variants of P m with cook as the main verb, one with three distinct entities (Bob, Tia, a burger) and one with two (Bob, burger). These are considered in turn with four variants of Q n where the predicate is either closely related to cook (e.g.be hungry) or not (e.g., be thirsty), and the two propositions share an argument or not. In two of the eight cases, neither connective makes sense because there is no other cohesive relation (e.g., coreference, association) between the clauses. In four cases, there is some similarity and some contrast, which licenses although, and in two cases the more restrictive condition that licenses because is present. These examples illustrate that both the choice of verb, and the grammatical relations of the arguments to the verb, affect whether a connective can be used, and which one. Adding modifiers on the subject or object, or VP or sentence adverbials, would have little effect on choice of connective in these sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 145, |
|
"text": "(Pishdad et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 197, |
|
"text": "(Wiegand et al., 2015)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 242, |
|
"text": "(Kuribayashi et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 259, |
|
"text": "Jo et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 301, |
|
"text": "(Park and Cardie, 2014)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 990, |
|
"end": 1011, |
|
"text": "(Prasad et al., 2008)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1125, |
|
"end": 1131, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We assume that training clause representations based on connective prediction will be useful for developing representations that capture aspects of coherence, such as those shown in Fig 1. Pishdad et al. (2020) examine a series of coherence evaluation tasks that capture different aspects of coherence. They argue that connective substitution is one of four critical tests of coherence modeling. For example, connectives that express temporal succession should not be substitutable for connectives that express simultaneity, as doing so would change the meaning. Studies of students' writing skills look at connectives with respect to quality of students' argumentative writing (Kuhn et al., 2016) , and whether automated assessments differ for low-skilled versus high-skilled writers (Perin and Lauterbach, 2018) . Although students can fill in correct connectives eliminated from source texts, they typically do not use connectives as precisely in their own writing (Millis et al., 1993) . NLP applications aimed at supporting student revision use connectives as an indication of writing quality (Nguyen et al., 2016; Afrin and Litman, 2018) , but do not help students choose correct connectives. To better evaluate model performance in connective selection, and to highlight differences between text from skillful versus developing writers, we provide two large datasets of clauses linked by connectives drawn from published fiction and from students' written text. We demonstrate the potential for a model trained on expert data to identify incorrect uses of connectives in students' writing, where students frequently misuse connectives like and.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 210, |
|
"text": "Fig 1. Pishdad et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 697, |
|
"text": "(Kuhn et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 785, |
|
"end": 813, |
|
"text": "(Perin and Lauterbach, 2018)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 968, |
|
"end": 989, |
|
"text": "(Millis et al., 1993)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1098, |
|
"end": 1119, |
|
"text": "(Nguyen et al., 2016;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1120, |
|
"end": 1143, |
|
"text": "Afrin and Litman, 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our contributions are: 1) a data structure we refer to as a Dependency-Anchor graph that incorporates information from both dependency and constituency trees; 2) DAnCE (Dependency-Anchor graph representation for Clause Embedding), a novel neural architecture that exploits bi-LSTMs at the lower layers for learning inter-word influences, and graph learning of relational structure encoded in the dependency-anchor graph; 3) two datasets for carefully edited versus student text. Our approach outperforms the state-of-the-art on connective prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "P1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The question of whether latent representations of sentence meaning can benefit from syntax has been addressed in work that compares recurrence and recursion, and finds the main benefit of recursive models to be better treatment of long-distance dependencies . Two recent works compare tree-based models derived from dependency parses with constituency parses on semantic relatedness tasks, with no clear advantage of one grammar formalism over the other (Tai et al., 2015; Ahmed et al., 2019) . As discussed in (Tai et al., 2015) , dependency trees provide a more compact structure than constituency trees, through shorter paths from the root to leaf words. Further, all of a verb's arguments are its direct dependents. The recursive structure of constituency trees, on the other hand, facilitates identification of subtrees that span more of the leaf words as one moves up the tree, and that have a compositional contribution to the meaning of the sentence. Tree-based models take input from syntactic parses and compose the latent vectors through a uni-directional traversal, where the parent node representation is the sum of the child nodes. For both formalisms, many parameters are needed to encode the child-to-parent representations. For this reason, previous work strictly limits the model dimensionality (Tai et al., 2015; Ahmed et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 472, |
|
"text": "(Tai et al., 2015;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 492, |
|
"text": "Ahmed et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 529, |
|
"text": "(Tai et al., 2015)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1313, |
|
"end": 1331, |
|
"text": "(Tai et al., 2015;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1332, |
|
"end": 1351, |
|
"text": "Ahmed et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To combine advantageous features from both kinds of grammar formalism, we propose dependency-anchor graphs as a compact represen-tation that highlights the core elements of a proposition. We construct a graph with only selected components from two kinds of parse trees, thus limiting the number of parameters to learn. The subject of a clause and the main verb phrase are the two outer nodes in the graph, where we refer to the verb phrase node as the anchor. The subject arc from a dependency parse points from the anchor node to the subject. The anchor node is a subgraph that retains the dependency structure of words within the verb phrase. Other syntactic relations (e.g., involving words in the subject phrase or adverbial phrases), are ignored.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To encode the graph, we propose DAnCE, which applies graph convolution (GCN) (Kipf and Welling, 2017) to encode the arc between the subject and verb phrase. The input to the graph convolution comes from a bidirectional LSTM (Hochreiter and Schmidhuber, 1997) that encodes all the input tokens, including the words outside the subject and verb phrase. The interaction that DAnCE captures between subject and verb phrase has been essential in word representation but missing in tree based models (Weir et al., 2016; White et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 494, |
|
"end": 513, |
|
"text": "(Weir et al., 2016;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 533, |
|
"text": "White et al., 2018)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We demonstrate the effectiveness of the dependency-anchor graph and DAnCE architecture through its superior performance over baselines, including tree-based models. The rest of the paper is organized as follows: we first present related work and give a detailed discussion of the Dependency-Anchor Graph and DAnCE. Then we present the datasets, experiments and discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Much research has addressed ways to learn high quality clause representations. Xu et al. (2015) propose a shortest dependency path LSTM for sentence representation in the task of relation classification. Dai and Huang (2018) propose a BiLSTM based model that combines paragraph vectors and word vectors into clause embeddings for a situation entity classification task. Connective prediction has often been addressed: Ji and Eisenstein (2015) and Rutherford et al. (2017) use recursive neural networks with parse trees as input to predict connectives and discourse relations, with solid improvements on PDTB. Malmi et al. (2018) use a decomposable attention model to predict connectives on sentences pairs extracted from Wikipedia. Our work draws on the idea of incorporating syntax into representation for connective prediction, is constructed from its phrase-structure parse (top left) and dependency parse (bottom). Words spanning the VP subtree of the constituency parse (orange nodes) become a single anchor node whose internal structure preserves the dependencies among words in the VP. The nsubj dependent of the main verb is promoted to be a dependent of the entire anchor. specifically for clauses. Sileo et al. (2019) propose a large dataset with 170M sentence pairs with connectives for unsupervised sentence representation learning, and apply it on the SentEval task. Nie et al. 2019develop universal sentence embeddings from a connective prediction task, and create a large corpus extracted from published fiction. They achieve state-of-theart performance on predicting connectives, as well as on sentence embedding benchmarks from Sen-tEval (Conneau and Kiela, 2018) . Our work modifies the corpus from (Nie et al., 2019) to restrict the pairs of sentences for connective prediction to simple sentences. Our goal is to generate clause embeddings specifically for connective prediction, rather than universal sentence representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 95, |
|
"text": "Xu et al. (2015)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 224, |
|
"text": "Dai and Huang (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 442, |
|
"text": "Ji and Eisenstein (2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 471, |
|
"text": "Rutherford et al. (2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 609, |
|
"end": 628, |
|
"text": "Malmi et al. (2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1208, |
|
"end": 1227, |
|
"text": "Sileo et al. (2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1655, |
|
"end": 1680, |
|
"text": "(Conneau and Kiela, 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1717, |
|
"end": 1735, |
|
"text": "(Nie et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The input to DAnCE is a graph for each simple sentence that includes syntactic information from a phrase structure parse to identify the VP, and from a dependency parse to identify the grammatical subject, and dependencies within the VP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DAnCE Architecture", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The anchor VP and its subject serve as nodes in a graph, as illustrated in Figure 2 . The Stanford CoreNLP dependency grammar has 58 dependency relations, eight of which are a type of subject (Van Valin, 2001; Schuster and Manning, 2016) . The subject in our dependency-anchor graphs can originate as any of these eight types, and is represented as a node with a single subject edge to the anchor. The anchor node has internal graph structure, that replicates the dependency relations among the words in the VP. We align two syntax parses by the words then extract the depen-dencies between words inside the anchor. Each dependency-anchor graph constitutes a complete proposition. The dependency relation from the anchor to the subject, and the other dependencies for words within the VP, differentiate words by their closeness to the root verb of the dependency parse. Words outside the subject-anchor are omitted from the graph to maintain the focus of subject-VP, but they are encoded by the BiLSTM as part of the sequence and contribute to the hidden states.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 209, |
|
"text": "(Van Valin, 2001;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 237, |
|
"text": "Schuster and Manning, 2016)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 83, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dependency-Anchor graph", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To learn a semantic representation from a dependency-anchor graph, DAnCE has the three layers illustrated in Figure 3 . An initial embedding lookup layer retrieves word embeddings. A BiL-STM layer captures the hidden states over the input words at each time step. Finally, a graph convolution layer takes the subject word representation from the BiLSTM (the S node in Figure 3) , and an anchor embedding that is generated from an separate module (the A node in Figure 3) , to produce the final learned semantic representation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 117, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 377, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 470, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The input sequence of words x i \u2208 X is first fed into a pre-trained word embedding lookup layer, using GloVe (Pennington et al., 2014) , with a bidirectional LSTM of dimension 2D, where D is the dimension of hidden states in the BiLSTM:", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 134, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h i = f (x i , h i\u22121 ), h i \u2208 R 2D", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The BiLSTM captures long-term dependencies within the clause. The resulting latent representation for the subject is fed directly to the graph convolution layer. The anchor embedding h A is computed with two alternative settings: Flat-Anchor (FA) and Graph-Anchor (GA). The main difference between the two settings is that FA treats the anchor as a sequence of words with their BiLSTM hidden states h i , and ignores the dependency relations within the anchor. GA turns the dependencies into an adjacency matrix and then generates h A G i as the anchor node representation by encoding the BiLSTM hidden states within the matrix through graph attention (GAT) (Velickovi\u0107 et al., 2018) . GAT will attend to whatever nodes are within the anchor, thus it fits well for learning the anchor representation for any length anchor. We first explain the derivation of h A G i . Following (Marcheggiani and Titov, 2017) , we treat the dependency arcs within the anchor as directed. Given the latent representations of a pair of nodes within the anchor h i , h j , and a one-hot vector for each dependency arc arc i,j , we compute an attention coefficient e i,j :", |
|
"cite_spans": [ |
|
{ |
|
"start": 658, |
|
"end": 683, |
|
"text": "(Velickovi\u0107 et al., 2018)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 878, |
|
"end": 908, |
|
"text": "(Marcheggiani and Titov, 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e i,j = a(W h h i , W d [h j ||arc i,j ])", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where || is the concatenation operation, and a, W h , W d are learned parameters for the head and the dependent. Then we apply softmax and a Leaky ReLU activation to normalize the attention weights:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u03b1 i.j = LeakyReLU ( exp(e i,j ) m\u2208N A (i) exp(e i,m ) ) (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where N A (i) represents all nodes in the anchor that are linked to i, including itself. Leaky Relu activation on e i,j enables the network to learn the importance of node j and arc i, j to node i. Therefore, \u03b1 i,j is a vector, whose length is the number of anchor words, that represents differential attention on word pairs associated with their dependency relations. We apply the attention weights on the node features from the first BiLSTM layer:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h A G i = j\u2208N A (i) \u03b1 i.j W A G h j", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Again, there are two alternative settings to generate the anchor embedding. We use maxpool over all the nodes in anchor N A :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h A = M axpool(|| i\u2208N A h i ) if FA M axpool(|| i\u2208N A h A G i ) if GA", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The third layer applies graph convolution (GCN) to the subject hidden states from BiLSTM and subject and anchor nodes, where the subject node is the hidden state from the BiLSTM and the anchor node is the anchor embedding h A . Given a node i, we first compute its GCN node embedding h k+1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S i from its neighbor N (i), including a self loop, i \u2208 N (i): h k+1 S i = ReLu( j\u2208N (i) W S k h S k j + b S k )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where k represents the k-order neighbor (the maximum hop between two nodes). W S k and b S k are the learned weights and bias. We use k = 1, as there is only one edge between the subject and anchor, thus h k=0 S i is either the anchor embedding h A or the BiLSTM output for the subject word. The node representation is thus more informative by merging with its relevant neighbor through graph convolution, and enhances the final aggregation. Once the GCN node features are obtained, we compute the final embedding h K S as the average over all node features N k at the last layer K,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h K S = 1 |N K | v\u2208N K h K v , h K S \u2208 R 2D", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "5 Data Collection", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This section introduces two corpora we use in our experiments. They differ in genre, size, and distribution of connectives, as well as a contrast between spontaneous student writing and carefully edited text. They also differ in the way they were annotated, and in whether they include negative examples. DeSSE (Decomposed Sentences from Student Essays) consists of sentences from students' opinion essays, 78% of which are complex. The annotation of DeSSE rewrites complex sentences into atomic tensed clauses, omitting any discourse connectives. Sentences are considered complex if there are at least two clauses with tensed verbs, thus a sentence consisting of a subject, verb and its clausal argument are not considered complex. The corpus also includes complex sentences with relative clauses rather than connectives, which serve as negative examples for connective prediction. We assume that a model should be able to discriminate between cases where two clauses have a cohesive relation other than one given by a connective. This is analogous to the motivation for inclusion of adversarial examples in a recent corpus for natural language arguments (Niven and Kao, 2019) . In that work, it was shown that transformer models that appeared to perform well without the adversarial examples were exploiting accidental correlations, given that performance degraded significantly once adversarial examples were included. Previous work has shown similar results that neural models for summarization learn more about the position of lead sentences in news articles than about the actual meanings of sentences, due to the lead bias in news (Kedzie et al., 2018) . DeSSE consists of 39K source sentences, with 68 connectives of the 141 connective words and phrases identified in PDTB. Most connectives occur with very low frequency. More than 50% of pairs are connected by and, punctuation, or no connective. Fifty-five of the 68 connectives are rare with frequencies below 1% of the total. A detailed distribution is shown in appendix A. 1 Our second corpus is a modification of the Book corpus, which consists of connective prediction data taken from published novels (Nie et al., 2019) . The Book corpus extracts pairs of simple or complex sentences from source texts, where a connective linked the pair. The original Book corpus contains 15 connectives, and two subsets of 8 and 5 connectives. We created subsets consisting of connectives that joined simple clauses: Book-Simpl 5 with their 5 connectives (285K clause pairs), and Book-Simpl 8 with their 8 (359K clause pairs). Table 1 shows that the average clause length for DeSSE is longer than in Book-Simpl, with one-fifth the total vocabulary. In comparison to Book-Simpl, the language in DeSSE is less formal and coherent.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1156, |
|
"end": 1177, |
|
"text": "(Niven and Kao, 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1638, |
|
"end": 1659, |
|
"text": "(Kedzie et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 2036, |
|
"end": 2037, |
|
"text": "1", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 2167, |
|
"end": 2185, |
|
"text": "(Nie et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 2578, |
|
"end": 2585, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural architecture", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "DeSSE includes identification of complex sentences with tensed clauses, and excludes infinitival or gerundive clauses, as a first step towards training corpora for clause identification. It covers a wide range of intra-sentential syntactic and semantic phenomena. It includes all tensed clauses occurring in conjoined structures, including subordinating conjunctions, along with relative clauses, parentheticals, and conjoined verb phrases. It excludes clausal arguments of verbs, because the semantic relationship of the clausal argument in its sentence is given by the verb semantics. The annotation process is unique in that it involves identifying where to split the source sentence into distinct clauses, and how to rephrase the source sentence into a set of complete, independent clauses that omit any discourse connectives. It is designed for developing connective prediction, sentence segmentation and decomposition, and semantic representation. Figure 4 illustrates intra-sentential connectives (then, but) that join two clauses. In example 1), the first clause (in parentheses) contains a free relative clause as a verb argument (in italics). In example 2), the first clause contains a clausal argument of the main verb. In both cases, however, the entire first clause is the first argument of the connective. 2 We collected over 17,000 opinion essays written by U.S. university students in a large undergraduate social science class. Students watched video clips about race relations, and wrote essays in a blog environment to share their opinions with the class. We selected 39K sentences out of 173K for annotation, corresponding to the first 3,592 essays.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1320, |
|
"end": 1321, |
|
"text": "2", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 954, |
|
"end": 962, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DeSSE", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Amazon Mechanical Turk (AMT) is a popular crowdsourcing platform for NLP annotation. While it facilitates data collection, using untrained annotators requires care. In a series of pilot tasks on AMT, we iteratively designed annotation instructions and an annotation interface, while monitoring quality. Figure 5 illustrates two steps in the annotation: identification of n split points between tensed clauses, and rephrasing the source into n+1 simple clauses, where any connectives are dropped. The final version of the instructions describes the two annotation steps, provides a list of connectives, and illustrates a positive and negative example. 3 The ten most frequent connectives in DeSSE are and, because, when, as, so, or, for, if, also, but. We postprocess the corpus to identify pairs of clauses from complex sentences, and any connectives. The resulting dataset has the following distribution: a single atomic clause (22%), two clauses (45%) or more than two clauses (33%). Given sentences with exactly two atomic clauses in the source, 30% joined them with a discourse connective.", |
|
"cite_spans": [ |
|
{ |
|
"start": 651, |
|
"end": 652, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 303, |
|
"end": 311, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DeSSE", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Nie et al. (2019) presented the Book corpus, which has 15 frequently used connectives and 4.7M pairs of sentences. Their goal was to exploit the semantic relationship given by the connective prediction task to improve sentence representation, as noted above. The Book corpus contains two versions, Book-5 with 5 connectives: and, but, if, because, when; and Book-8, an extended version with 3 more connectives: before, though, so. The sentences linked by a connective can be simple or complex.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Book-Simpl", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To create a subset of the Book corpus that is more parallel to DeSSE, we selected Book corpus examples where the connective linked two simple clauses. The new Book-Simpl dataset has a distribution of connectives similar to the Book corpus (see appendix A).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Book-Simpl", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For our experiments to predict connectives, we use the same classifier used in (Nie et al., 2019) . An input pair of sentence vectors representing the clauses to be joined by a connective are concatenated with vectors resulting from three pairwise vector operations: averaging, subtraction and multiplication. The concatenated vectors are fed into three fully-connected layers, then projected to a lower dimension prior to softmax over the classification categories.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 97, |
|
"text": "(Nie et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Experiments on Book-Simpl predict the correct connective, given positive examples of clause pairs. Experiments on DeSSE predict the correct connective, given positive and negative examples. We compare DAnCE with four baselines on both datasets, reporting accuracy and F1. Student writing is much less coherent than much of the text that applies NLP to tasks related to discourse structure, N=3,466) and DeSSE (N=3, 894) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 398, |
|
"text": "N=3,466)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 414, |
|
"text": "DeSSE (N=3,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 419, |
|
"text": "894)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "such as discourse connective prediction, discourse parsing, and semantic representation of clauses. We find all models perform better on Book-Simpl than DeSSE, and DAnCE-FA yields good performance on both corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We use three kinds of architecture as baselines: Bag-of-words feed-forward networks (BoW), Treebased LSTM (Tree-LSTM), and sequential LSTM (Seq-LSTM). For BoW group, we include Glove-CNN (Kim, 2014) , a widely used convolutional network for text classification that takes word vectors as input and generates sentence vectors. The Tree-LSTM group includes two models: Dependency Tree-LSTM (Tree) (Tai et al., 2015) , which encodes the dependency parse, and an improved version of Tree-LSTM with attention (Tr-Attn) (Ahmed et al., 2019) . The Seq-LSTM group consists of DisSent (Nie et al., 2019; Conneau et al., 2017 ), a BiLSTM model with max-pooling over all hidden units of sentences, and self attention. Hyperparameters are shown in Appendix D. Our experiments ask two questions: 1) How does DAnCE, which relies on graph convolution, and whose input is a Dependency-Anchor graph, compare with tree-based models? 2) How does DAnCE compare against the two types of models that do not rely on syntax (sequence-based and BOW). The two anchor settings for DAnCE enable us also to test alternative DAnCE settings (DAnCE-FA and DAnCE-GA). Our question here is whether learning from dependency relations within verb phrases through graph attention produces better representations for connective prediction. 4 Table 2 reports mean accuracy and the standard deviation from 16 bootstrapped iterations on 90% of the test data, and F1 for the full test data. Bootstrapped F1 standard deviation shows the same magnitude as accuracy therefore is omitted from the table. Overall, all models report higher accuracy and F1 on Book-Simpl than DeSSE, which suggests that including \"no connectives\" increases the difficulty of the learning task. For Book-Simpl, increasing the number of connectives also increases the prediction difficulty, reflected in lower accuracy and F1 scores for all models on Book-Simpl 8 in comparison to Book-Simpl 5. DAnCE outperforms all baselines, DisSent falls between the two tree variants, and the BoW model has the lowest performance. On DeSSE 5 and 8, however, it is the BoW model that shows the highest accuracy. DisSent achieves the highest F1 on both versions of DeSSE. DAnCE-FA has higher accuracy but slightly lower F1 than DisSent, and both greatly outperform the two tree models and DAnCE-GA.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 198, |
|
"text": "(Kim, 2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 413, |
|
"text": "(Tai et al., 2015)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 534, |
|
"text": "(Ahmed et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 594, |
|
"text": "(Nie et al., 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 615, |
|
"text": "Conneau et al., 2017", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1304, |
|
"end": 1311, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baselines and settings", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Recall that DeSSE includes adversarial samples, hence evaluation on DeSSE may be more revealing in comparison to Book-Simpl. Figure 6 gives a breakdown of F1 by connective on DeSSE 5 and 8 for DAnCE-FA, DisSent and Tree-Attn. It is surprising that for DeSSE 5, Tree-Attn fails completely on and, so, as, while it outperforms DisSent and DAnCE-FA on because, no connective. On DeSSE 8, Tree-Attn fails to predict and, for, if. DAnCE-FA and DisSent have similar F1 scores Table 3 : Ablation studies of DAnCE.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 133, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 477, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Pairs (Obs, Pred) Pairs (and, but) 24.0% (and, but) 18.0% (and, and) 17.0% (and, and) 12.0% (because, but) 7.8% (None, but) 7.3% (None, but)", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 34, |
|
"text": "(and, but)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 41, |
|
"end": 51, |
|
"text": "(and, but)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 58, |
|
"end": 68, |
|
"text": "(and, and)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 85, |
|
"text": "(and, and)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DeSSE 5 DeSSE 8 (Obs, Pred)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "6.8% (because, but) 5.6% (so, but) 4.1% (and, when) 4.8% on and, when, if, but DAnCE-FA rarely or never predicts the connectives so, for and as. It may be that subtle differences in meaning based on sentence elements apart from the subject and verb phrase are predictive, given the failure of DAnCE-FA to perform at all well on these connectives. To summarize, DAnCE-FA performs comparably to DisSent and shows improvements over tree-based models. DAnCE-GA is worse than DAnCE-FA, which might be attributed to the noisy information introduced by dependency arcs within the anchor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DeSSE 5 DeSSE 8 (Obs, Pred)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We conducted an ablation test on DAnCE-GA to address the following questions: 1) does the performance drop if subject and verb are not highlighted? and 2) do undirected dependency arcs result in better performance within the anchor. To address the first question, we remove the GCN layer (-GCN). To address the second, we remove the directionality of dependency arcs inside the anchor to produce a symmetric adjacency matrix (-DIR). Table 3 presents F1 scores on the for sets of connectives from Book-Simpl and DeSSE. Compared to the DAnCE variants presented in Table 2 , removing the emphasis on subject and verb significantly lowers the performance, especially on DeSSE. Using a symmetric adjacency matrix for graph attention results in lower performance on Book-Simpl, but surprisingly higher F1 on DeSSE. This shows that our emphasis on the subject and verb phrase enhances clause representation. However, incorporating more dependency arcs within the anchor degrades the performance. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 440, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 569, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Experiment", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Here we discuss the potential to suggest an alternative connective for students when their choice of connective differs from a connective predicted by a model that has been trained on professionally written text. The benefits of this analysis are two-fold: it explores the feasibility of an education application to help students revise their choice of connective, and it allows us to examine DAnCE's ability to model aspects of coherence that pertain to choice of connective. For all pairs of sentences in DeSSE 5 and 8, we compared the observed choice made by the student writer with the prediction from DAnCE-FA trained on Book Simpl 5 or Book Simpl 8. Table 4 shows the five most frequent pairs of student choice in DeSSE 5 or DeSSE 8 versus the prediction from the model trained on Book Simpl 5 (left columns) or trained on Book Simpl 8 (right columns). As illustrated, in many of the cases where students used and, the model trained on text from professional writers predicts but. Figure 5 shows a few examples where a student used the semantically neutral conjunction and, the model predicted a more specific conjunction, and the model's prediction seems more precise. Future work will investigate in detail the feasibility of suggesting alternative connectives.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 656, |
|
"end": 663, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 987, |
|
"end": 995, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "This paper presented the dependency-anchor graph, a new data structure emphasizing the propositional structure of clauses, and DAnCE, a neural architecture with a distinct module for learning verb phrase representation, and graph convolution for semantic relation between the verb phrase and its subject.DAnCE shows good performance on two datasets for connective prediction, and introduces a potential application that could help students revise their writing through improved choice of connectives. Future work will extend DAnCE for coherence modeling within and across sentences, and for applications to support students' revisions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "A Connective distributions in Book-Simpl and DeSSE The instruction illustrate the two phases of annotation. The annotator first chooses whether to add one or more split points to an input sentence, where the word after a split point represents the first word of a new segment. Once an annotator has identified the split points, which happens on the first page of the AMT interface, shown as Figure 9 , a second view of the interface appears. Figure 10 shows the second view when annotators rewrite the segments. Every span of words defined by split points (or the original sentence if no split points), appears in its own text entry box for the annotator to rewrite. Annotators cannot submit if they remove all the words from a text entry box. They are instructed to rewrite each text span as a complete sentence, and to leave out the discourse connectives. Several auto-checking and warnings are applied in the interface to reassure the quality. If a rewrite contains a discourse connective, a warning box pops up asking if they should drop the discourse connective before submitting it. A warning box will show up if annotators use vocabulary outside the original sentence. To prevent annotators from failing to rewrite, we monitored the output, checking for cases where they submitted the text spans with no rewriting. Annotators are prohibited to submit if the interface detects an empty rewrite box or", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 391, |
|
"end": 399, |
|
"text": "Figure 9", |
|
"ref_id": "FIGREF7" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 451, |
|
"text": "Figure 10", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Size and but because if when before though so Book 5 3054K 31 32 15 5 16 na na na Book-Simpl 5 285k 33 28 8 5 27 na na na Book 8 3435K 28 28 5 13 14 6 3 2 Book-Simpl 8 359K 29 24 4 7 23 8 3 1 Table 6 : Number of sentence pairs (Size), and the distribution of connectives (as percentages) for the original Book corpus and our modified version.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 235, |
|
"text": "Book 5 3054K 31 32 15 5 16 na na na Book-Simpl 5 285k 33 28 8 5 27 na na na Book 8 3435K 28 28 5 13 14 6 3 2 Book-Simpl 8 359K 29 24 4 7 23 8 3 1 Table 6", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the total lengths of the rewrites are too short compared to the source sentence. We warned annotators by email that if they failed to produce complete sentences in the rewrite boxes, they would be blocked. Some annotators were blocked, but most responded positively to the warnings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To test the clarity of instruction and interface, the initial 500 sentences were used for evaluating the task quality, each labeled by three turkers (73 turkers overall), using three measures of consistency, all in [0, 1] . Average pairwise boundary similarity (Fournier, 2013) , a very conservative measure of whether annotators produce the same number of segments with boundaries at nearly the same locations, was 0.55. Percent agreement on number of output substrings was 0.80. On annotations with the same number of segments, we measured the average Jaccard score (ratio of set intersection to set union) of words in segments from different annotators, which was 0.88, and words from rephrasings, which was 0.73. With all metrics close to 1, and boundary similarity above 0.5, we concluded quality was already high. During the actual data collection, quality was higher because we monitored quality on daily basis and communicated with turkers who had questions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 218, |
|
"text": "[0,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 221, |
|
"text": "1]", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 277, |
|
"text": "(Fournier, 2013)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Quality control in DeSSE", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All the methods take GloVe word embeddings as input. Due to the size difference between Book-Simpl and DeSSE, we use different dimensionalities for the word embeddings (w) and classifier hidden layers (h) with the two corpora on all baseline systems: for Book-Simpl, D w = 300, D h = 512; for DeSSe D w = 100, D h = 256. We train Dis-Sent using the original D h = 4096 for Book-Simpl, and reduce it to 256 for DeSSE. Apart from this one change to DisSent, we use the published settings for all baseline systems. We train DAnCE using the same vector dimensions as for DisSent. Because DAnCE has twice the number of parameters as Dis-Sent, we use the smaller classifier dimensionality of 256 on both corpora. We use SGD as optimizer for DAnCE, with the learning rate at 0.01. Learning rates between [0.1,0.001] did not show obvious performance differences, and 0.01 converged faster. We use earlystopping to prevent overfitting. We did not use dropout, due to a negative impact on performance (cf. (Nie et al., 2019) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 996, |
|
"end": 1014, |
|
"text": "(Nie et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Experiment Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All training is done on 4 Nvidia RTX 2080 Ti GPUs. The longest training time is 35 hours, for DAnCE on Book-Simpl 8. During testing, we perform 16 bootstrap iterations", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Experiment Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "DeSSE is available at https://github.com/ serenayj/DeSSE. DAnCE is available at https:// github.com/serenayj/DAnCE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As in(Webber and Joshi, 1998), we take connectives to be predicates whose arguments are the clauses they join.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The interface checked for connectives remaining in step two to warn annotators. Details about the interface and quality control are included in appendix B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We attempt to train a fine-tune BERT on our dataset, however due to the size of the training set we make no success in finetuning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "If you have not experienced what they have experienced), then you will never truly understand", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "(If you have not experienced what they have experi- enced), then you will never truly understand.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "I believe that talking about race more in a civil way can only improve our society), but I can see why other people may have a different opinion", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "(I believe that talking about race more in a civil way can only improve our society), but I can see why other people may have a different opinion.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Annotation and classification of sentence-level revision improvement", |
|
"authors": [ |
|
{ |
|
"first": "Tazin", |
|
"middle": [], |
|
"last": "Afrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Litman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "240--246", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tazin Afrin and Diane Litman. 2018. Annotation and classification of sentence-level revision improve- ment. In Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Ap- plications, pages 240-246.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Improving tree-LSTM with tree attention", |
|
"authors": [ |
|
{ |
|
"first": "Mahtab", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [ |
|
"Rifayat" |
|
], |
|
"last": "Samee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "247--254", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mahtab Ahmed, Muhammad Rifayat Samee, and Robert E. Mercer. 2019. Improving tree-LSTM with tree attention. In 2019 IEEE 13th International Con- ference on Semantic Computing (ICSC), pages 247- 254. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "SentEval: An evaluation toolkit for universal sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau and Douwe Kiela. 2018. SentEval: An evaluation toolkit for universal sentence representa- tions. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Supervised learning of universal sentence representations from natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "670--680", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1070" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing, pages 670-680, Copen- hagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Building contextaware clause representations for situation entity type classification", |
|
"authors": [ |
|
{ |
|
"first": "Zeyu", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruihong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3305--3315", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeyu Dai and Ruihong Huang. 2018. Building context- aware clause representations for situation entity type classification. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 3305-3315.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Evaluating text segmentation using boundary edit distance", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Fournier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1702--1712", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Fournier. 2013. Evaluating text segmentation us- ing boundary edit distance. In Proceedings of the 51st Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1702-1712.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Representation learning for text-level discourse parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "13--24", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji and Jacob Eisenstein. 2014. Representa- tion learning for text-level discourse parsing. In Pro- ceedings of the 52nd Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 13-24, Baltimore, Maryland. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "One vector is not enough: Entity-augmented distributed semantics for discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "329--344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji and Jacob Eisenstein. 2015. One vector is not enough: Entity-augmented distributed semantics for discourse relations. Transactions of the Associa- tion for Computational Linguistics, 3:329-344.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Machine-aided annotation for finegrained proposition types in argumentation", |
|
"authors": [ |
|
{ |
|
"first": "Yohan", |
|
"middle": [], |
|
"last": "Jo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elijah", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Reed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1008--1018", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yohan Jo, Elijah Mayfield, Chris Reed, and Eduard Hovy. 2020. Machine-aided annotation for fine- grained proposition types in argumentation. In Pro- ceedings of The 12th Language Resources and Eval- uation Conference, pages 1008-1018.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Content selection in deep learning models of summarization", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Kedzie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1818--1828", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1208" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Kedzie, Kathleen McKeown, and Hal Daum\u00e9 III. 2018. Content selection in deep learning models of summarization. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing, pages 1818-1828, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Convolutional neural networks for sentence classification", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1746--1751", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1746-1751.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Semisupervised classification with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Tracing the development of argumentive writing in a discourse-rich context", |
|
"authors": [ |
|
{ |
|
"first": "Deanna", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Hemberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valerie", |
|
"middle": [], |
|
"last": "Khait", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Written Communication", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "92--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deanna Kuhn, Laura Hemberger, and Valerie Khait. 2016. Tracing the development of argumentive writ- ing in a discourse-rich context. Written Communica- tion, 33(1):92-121.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An empirical study of span representations in argumentation structure parsing", |
|
"authors": [ |
|
{ |
|
"first": "Tatsuki", |
|
"middle": [], |
|
"last": "Kuribayashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroki", |
|
"middle": [], |
|
"last": "Ouchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoya", |
|
"middle": [], |
|
"last": "Inoue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Reisert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshinori", |
|
"middle": [], |
|
"last": "Miyoshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4691--4698", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tatsuki Kuribayashi, Hiroki Ouchi, Naoya Inoue, Paul Reisert, Toshinori Miyoshi, Jun Suzuki, and Kentaro Inui. 2019. An empirical study of span representa- tions in argumentation structure parsing. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4691-4698.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multi-relational script learning for discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "I-Ta", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Goldwasser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4214--4226", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1413" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I-Ta Lee and Dan Goldwasser. 2019. Multi-relational script learning for discourse relations. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4214-4226, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "SegBot: a generic neural text segmentation model with pointer network", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aixin", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Joint Conference on Artificial Intelligence (IJCAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4166--4172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Li, Aixin Sun, and Shafiq Joty. 2018. SegBot: a generic neural text segmentation model with pointer network. In Proceedings of the 27th International Joint Conference on Artificial Intelligence (IJCAI), pages 4166-4172.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "When are tree structures necessary for deep learning of representations?", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2304--2314", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Minh-Thang Luong, Dan Jurafsky, and Ed- uard Hovy. 2015. When are tree structures necessary for deep learning of representations? In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2304-2314.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Automatic prediction of discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Malmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Pighin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikhail", |
|
"middle": [], |
|
"last": "Kozhevnikov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Malmi, Daniele Pighin, Sebastian Krause, and Mikhail Kozhevnikov. 2018. Automatic prediction of discourse connectives. In Proceedings of the Eleventh International Conference on Language Re- sources and Evaluation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Encoding sentences with graph convolutional networks for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Marcheggiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1506--1515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Marcheggiani and Ivan Titov. 2017. Encoding sentences with graph convolutional networks for se- mantic role labeling. In Proceedings of the 2017 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1506-1515.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The impact of connectives on the memory for expository texts", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Keith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Millis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Arthur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Graesser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haberlandt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Applied Cognitive Psychology", |
|
"volume": "7", |
|
"issue": "4", |
|
"pages": "317--339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith K Millis, Arthur C Graesser, and Karl Haberlandt. 1993. The impact of connectives on the memory for expository texts. Applied Cognitive Psychology, 7(4):317-339.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Split and rephrase", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EMNLP 2017: Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "617--627", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Claire Gardent, Shay Cohen, and Anastasia Shimorina. 2017. Split and rephrase. In EMNLP 2017: Conference on Empirical Methods in Natural Language Processing, pages 617-627.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Instant feedback for increasing the presence of solutions in peer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Huy", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenting", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Litman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huy Nguyen, Wenting Xiong, and Diane Litman. 2016. Instant feedback for increasing the presence of solu- tions in peer reviews. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demon- strations, pages 6-10.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "DisSent: Learning sentence representations from explicit discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Allen", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erin", |
|
"middle": [], |
|
"last": "Bennett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4497--4510", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allen Nie, Erin Bennett, and Noah Goodman. 2019. DisSent: Learning sentence representations from ex- plicit discourse relations. In Proceedings of the 57th Annual Meeting of the Association for Compu- tational Linguistics, pages 4497-4510.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Probing neural network comprehension of natural language arguments", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Niven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yu", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4658--4664", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Niven and Hung-Yu Kao. 2019. Probing neu- ral network comprehension of natural language ar- guments. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 4658-4664.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Identifying appropriate support for propositions in online user comments", |
|
"authors": [ |
|
{ |
|
"first": "Joonsuk", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the first workshop on argumentation mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joonsuk Park and Claire Cardie. 2014. Identifying appropriate support for propositions in online user comments. In Proceedings of the first workshop on argumentation mining, pages 29-38.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Assessing text-based writing of low-skilled college students", |
|
"authors": [ |
|
{ |
|
"first": "Dolores", |
|
"middle": [], |
|
"last": "Perin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Lauterbach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Journal of Artificial Intelligence in Education", |
|
"volume": "28", |
|
"issue": "1", |
|
"pages": "56--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dolores Perin and Mark Lauterbach. 2018. Assessing text-based writing of low-skilled college students. International Journal of Artificial Intelligence in Ed- ucation, 28(1):56-78.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Ran Zhang, and Afsaneh Fazly. 2020. How coherent are neural models of coherence?", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Pishdad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federico", |
|
"middle": [], |
|
"last": "Fancellu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6126--6138", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Pishdad, Federico Fancellu, Ran Zhang, and Af- saneh Fazly. 2020. How coherent are neural mod- els of coherence? In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 6126-6138.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "The penn discourse treebank 2.0", |
|
"authors": [ |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Dinesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Livio", |
|
"middle": [], |
|
"last": "Robaldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Aravind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind K Joshi, and Bon- nie L Webber. 2008. The penn discourse treebank 2.0. In LREC. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "A systematic study of neural discourse models for implicit discourse relation", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "281--291", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol Rutherford, Vera Demberg, and Nianwen Xue. 2017. A systematic study of neural discourse mod- els for implicit discourse relation. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 281-291.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Improving the inference of implicit discourse relations via classifying explicit discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "799--808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol Rutherford and Nianwen Xue. 2015. Improv- ing the inference of implicit discourse relations via classifying explicit discourse connectives. In Pro- ceedings of the 2015 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 799-808.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Enhanced English universal dependencies: An improved representation for natural language understanding tasks", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2371--2378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Schuster and Christopher D Manning. 2016. Enhanced English universal dependencies: An im- proved representation for natural language under- standing tasks. In Proceedings of the Tenth Interna- tional Conference on Language Resources and Eval- uation (LREC'16), pages 2371-2378.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Mining discourse markers for unsupervised sentence representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Damien", |
|
"middle": [], |
|
"last": "Sileo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Van De Cruys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Camille", |
|
"middle": [], |
|
"last": "Pradel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3477--3486", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1351" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Damien Sileo, Tim Van De Cruys, Camille Pradel, and Philippe Muller. 2019. Mining discourse mark- ers for unsupervised sentence representation learn- ing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 3477-3486, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Improved semantic representations from tree-structured long short-term memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Kai Sheng", |
|
"middle": [], |
|
"last": "Tai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1556--1566", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D Manning. 2015. Improved semantic representations from tree-structured long short-term memory net- works. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1556-1566.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "An introduction to syntax", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Valin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert D. Van Valin. 2001. An introduction to syntax. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Velickovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Li\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Sixth International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Velickovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f3, and Yoshua Bengio. 2018. Graph attention networks. In Sixth Inter- national Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "A two-stage parsing method for text-level discourse analysis", |
|
"authors": [ |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "184--188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yizhong Wang, Sujian Li, and Houfeng Wang. 2017. A two-stage parsing method for text-level discourse analysis. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 184-188.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Toward fast and accurate neural discourse segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfeng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "962--967", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yizhong Wang, Sujian Li, and Jingfeng Yang. 2018. Toward fast and accurate neural discourse segmen- tation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 962-967.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Anchoring a lexicalized tree adjoining grammar for discourse", |
|
"authors": [ |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "ACL/COLING Workshop on Discourse Relations and Discourse Markers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "86--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bonnie Webber and Aravind Joshi. 1998. Anchoring a lexicalized tree adjoining grammar for discourse. In ACL/COLING Workshop on Discourse Relations and Discourse Markers, pages 86-92.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Aligning packed dependency trees: a theory of composition for distributional semantics", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Weeds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Reffin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Kober", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computational Linguistics", |
|
"volume": "42", |
|
"issue": "4", |
|
"pages": "727--761", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Weir, Julie Weeds, Jeremy Reffin, and Thomas Kober. 2016. Aligning packed dependency trees: a theory of composition for distributional semantics. Computational Linguistics, 42(4):727-761.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Lexicosyntactic inference in neural models", |
|
"authors": [ |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Steven White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Rawlins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4717--4724", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aaron Steven White, Rachel Rudinger, Kyle Rawlins, and Benjamin Van Durme. 2018. Lexicosyntactic inference in neural models. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4717-4724.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Opinion holder and target extraction for verb-based opinion predicates-the problem is not solved", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Schulder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 6th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Wiegand, Marc Schulder, and Josef Ruppen- hofer. 2015. Opinion holder and target extraction for verb-based opinion predicates-the problem is not solved. In Proceedings of the 6th Workshop on Computational Approaches to Subjectivity, Sen- timent and Social Media Analysis, pages 148-155.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Classifying relations via long short term memory networks along shortest dependency paths", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Mou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ge", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunchuan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1785--1794", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yan Xu, Lili Mou, Ge Li, Yunchuan Chen, Hao Peng, and Zhi Jin. 2015. Classifying relations via long short term memory networks along shortest depen- dency paths. In Proceedings of the 2015 conference on empirical methods in natural language process- ing, pages 1785-1794.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "A dependency-anchor graph for a clause (top right)", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Overall architecture of DAnCE.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Original sentences from DeSSE with intrasentential connectives, where the clause preceding the connective contains a relative clause (example 1), or a clausal argument of the main verb (example 2).", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "Example annotation from DeSSE. Annotators first split a sentence into segments (underlined text), then rewrite the segments into complete sentences, omitting connectives.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "Breakdown of F1 scores on DeSSE 5 (top) and DeSSE 8 (bottom) from DAnCE-FA, DisSent and Tree-Attn.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"text": "Connective distributions in DeSSE with threshold 1%Here we present a detailed statistics of connective distributions on DeSSE and Book-Simpl.Figure 7 presents the connectives from DeSSE with distribution above 1%. Among 68 connectives, there are thirteen connectives above the threshold and the rest are low frequency connectives. Table 6 shows the Book-Simpl connective distribution compared to Book corpus. As illustrated, the Book-Simpl shares the same distribution as Book corpus on both Book-Simpl 5 and 8.B Annotation instruction in DeSSEHere we present the instructions for annotators, as shown byFigure 8.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"num": null, |
|
"text": "Instruction for DeSSE annotation", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF7": { |
|
"num": null, |
|
"text": "Interface of splitting the sentence Figure 10: Interface of rewriting the segments from Figure 9 into complete sentences", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Descriptive statistics comparing Book-Simpl and DeSSE, including the number of clause pairs (Size), average clause length, and vocabulary size." |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Group</td><td>Model</td><td>Book-Simpl 5</td><td/><td>Book-Simpl 8</td><td/><td>DeSSE 5</td><td/><td>DeSSE 8</td></tr><tr><td/><td/><td>Acc. (\u03c3)</td><td>F1</td><td>Acc. (\u03c3)</td><td>F1</td><td>Acc. (\u03c3)</td><td>F1</td><td>Acc. (\u03c3)</td><td>F1</td></tr><tr><td colspan=\"7\">BoW 61.89 (1Tree CNN Tree 67.95 (1.10) 59.67 59.69 (1.58) 45.71 20.35 (0.74)</td><td colspan=\"2\">9.84 16.63 (0.77)</td><td>9.29</td></tr><tr><td colspan=\"9\">LSTM 69.08 (0Models Tr-Attn GA 71.51 (1.45) 59.93 65.38 (1.57) 50.28 53.48 (0.46) 14.73 12.01 (0.48)</td><td>9.16</td></tr></table>", |
|
"text": ".64) 49.70 46.31 (1.36) 30.62 53.57 (0.27) 17.70 42.95 (0.22) 9.18 SeqLSTM DisSent 68.58 (1.55) 58.78 62.92 (1.39) 48.11 48.93 (0.31) 25.27 39.86 (0.30) 15.91 .82) 62.30 63.48 (1.40) 49.06 18.51 (0.10) 8.68 17.95 (0.72) 12.01 DAnCE FA 71.83 (0.45) 63.59 65.60 (0.55) 51.26 52.64 (0.38) 22.29 41.75 (0.25) 13.88" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Performance of baselines and our models on Book-Simpl 5 (N=16,538), Book-Simpl 8 (N=18,946), DeSSE 5 (</td></tr></table>", |
|
"text": "" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>compare the observed student's usage (Obs) with the model</td></tr><tr><td>prediction (Pred) from DAnCE-FA trained on Book Simpl, and</td></tr><tr><td>sorted each pattern of observation and prediction by frequency.</td></tr><tr><td>The five most frequent of these patterns are shown here for</td></tr><tr><td>DeSSE 5 and DeSSE 8.</td></tr></table>", |
|
"text": "For all pairs of sentences in the DeSSE test sets, we" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Student</td><td>and</td><td>DAnCE-FA because</td></tr></table>", |
|
"text": "Clause.1 He said he grew up as a Christian. Clause.2 He then converted to Islam. Student and DAnCE-FA but Clause.1 He trusted his faith. Clause.2 It helped him move on." |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Example pairs of clauses from DeSSE 5, showing the connective used by the student alongside the prediction from DAnCE-FA trained on Book Simpl 5." |
|
} |
|
} |
|
} |
|
} |