|
{ |
|
"paper_id": "N18-1018", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:52:57.740009Z" |
|
}, |
|
"title": "Query and Output: Generating Words by Querying Distributed Word Representations for Paraphrase Generation", |
|
"authors": [ |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "MOE Key Lab of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "shumingma@pku.edu.cn" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "MOE Key Lab of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "xusun@pku.edu.cn" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "MOE Key Lab of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "lisujian@pku.edu.cn" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "MOE Key Lab of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "MOE Key Lab of Computational Linguistics", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Most recent approaches use the sequenceto-sequence model for paraphrase generation. The existing sequence-to-sequence model tends to memorize the words and the patterns in the training dataset instead of learning the meaning of the words. Therefore, the generated sentences are often grammatically correct but semantically improper. In this work, we introduce a novel model based on the encoder-decoder framework, called Word Embedding Attention Network (WEAN). Our proposed model generates the words by querying distributed word representations (i.e. neural word embeddings), hoping to capturing the meaning of the according words. Following previous work, we evaluate our model on two paraphrase-oriented tasks, namely text simplification and short text abstractive summarization. Experimental results show that our model outperforms the sequence-to-sequence baseline by the BLEU score of 6.3 and 5.5 on two English text simplification datasets, and the ROUGE-2 F1 score of 5.7 on a Chinese summarization dataset. Moreover, our model achieves state-of-the-art performances on these three benchmark datasets. 1", |
|
"pdf_parse": { |
|
"paper_id": "N18-1018", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Most recent approaches use the sequenceto-sequence model for paraphrase generation. The existing sequence-to-sequence model tends to memorize the words and the patterns in the training dataset instead of learning the meaning of the words. Therefore, the generated sentences are often grammatically correct but semantically improper. In this work, we introduce a novel model based on the encoder-decoder framework, called Word Embedding Attention Network (WEAN). Our proposed model generates the words by querying distributed word representations (i.e. neural word embeddings), hoping to capturing the meaning of the according words. Following previous work, we evaluate our model on two paraphrase-oriented tasks, namely text simplification and short text abstractive summarization. Experimental results show that our model outperforms the sequence-to-sequence baseline by the BLEU score of 6.3 and 5.5 on two English text simplification datasets, and the ROUGE-2 F1 score of 5.7 on a Chinese summarization dataset. Moreover, our model achieves state-of-the-art performances on these three benchmark datasets. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Paraphrase is a restatement of the meaning of a text using other words. Many natural language generation tasks are paraphrase-orientated, such as text simplification and short text summarization. Text simplification is to make the text easier to read and understand, especially for poor readers, while short text summarization is to generate a brief sentence to describe the short texts (e.g. posts on the social media). Most recent approaches use sequence-to-sequence model for paraphrase generation (Prakash et al., 2016; Cao et al., 2017) . It compresses the source text information into dense vectors with the neural encoder, and the neural decoder generates the target text using the compressed vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 501, |
|
"end": 523, |
|
"text": "(Prakash et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 541, |
|
"text": "Cao et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although neural network models achieve success in paraphrase generation, there are still two major problems. One of the problem is that the existing sequence-to-sequence model tends to memorize the words and the patterns in the training dataset instead of the meaning of the words. The main reason is that the word generator (i.e. the output layer of the decoder) does not model the semantic information. The word generator, which consists of a linear transformation and a softmax operation, converts the Recurrent Neural Network (RNN) output from a small dimension (e.g. 500) to a much larger dimension (e.g. 50,000 words in the vocabulary), where each dimension represents the score of each word. The latent assumption of the word generator is that each word is independent and the score is irrelevant to each other. Therefore, the scores of a word and its synonyms may be of great difference, which means the word generator learns the word itself rather than the relationship between words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The other problem is that the word generator has a huge number of parameters. Suppose we have a sequence-to-sequence model with a hidden size of 500 and a vocabulary size of 50,000. The word generator has up to 25 million parameters, which is even larger than other parts of the encoder-decoder model in total. The huge size of parameters will result in slow convergence, because there are a lot of parameters to be learned. Moreover, under the distributed framework, the more parameters a model has, the more bandwidth and memory it consumes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To tackle both of the problems, we propose a novel model called Word Embedding Attention Network (WEAN). The word generator of WEAN is attention based, instead of the simple linear softmax operation. In our attention based word generator, the RNN output is a query, the candidate words are the values, and the corresponding word representations are the keys. In order to predict the word, the attention mechanism is used to select the value matching the query most, by means of querying the keys. In this way, our model generates the words according to the distributed word representations (i.e. neural word embeddings) in a retrieval style rather than the traditional generative style. Our model is able to capture the semantic meaning of a word by referring to its embedding. Besides, the attention mechanism has a much smaller number of parameters compared with the linear transformation directly from the RNN output space to the vocabulary space. The reduction of the parameters can increase the convergence rate and speed up the training process. Moreover, the word embedding is updated from three sources: the input of the encoder, the input of the decoder, and the query of the output layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Following previous work (Cao et al., 2017) , we evaluate our model on two paraphrase-oriented tasks, namely text simplification and short text abstractive summarization. Experimental results show that our model outperforms the sequence-tosequence baseline by the BLEU score of 6.3 and 5.5 on two English text simplification datasets, and the ROUGE-2 F1 score of 5.7 on a Chinese summarization dataset. Moreover, our model achieves state-of-the-art performances on all of the benchmark datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 42, |
|
"text": "(Cao et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a novel model based on the encoderdecoder framework, which generates the words by querying distributed word representations with the attention mechanism. In this section, we first present the overview of the model architecture. Then, we explain the details of the word generation, especially the way to query word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Word Embedding Attention Network is based on the encoder-decoder framework, which consists of two components: a source text encoder, and a target text decoder. Figure 1 is an illustration of our model. Given the source texts, the encoder compresses the source texts into dense representation vectors, and the decoder generates the paraphrased texts. To predict a word, the decoder uses the hidden output to query the word embeddings. The word embeddings assess all the candidate words, and return the word whose embedding matches the query most. The selected word is emitted as the predicted token, and its embedding is then used as the input of the LSTM at the next time step. After the back propagation, the word embedding is updated from three sources: the input of the encoder, the input of the decoder, and the query of the output layer. We show the details of our WEAN in the following subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 168, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overview", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The goal of the source text encoder is to provide a series of dense representation of complex source texts for the decoder. In our model, the source text encoder is a Long Short-term Memory Network (LSTM), which produces the dense representation {h 1 , h 2 , ..., h N } from the source text", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder and Decoder", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "{x 1 , x 2 , ..., x N }:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder and Decoder", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The goal of the target text decoder is to generate a series of paraphrased words from the dense representation of source texts. Fisrt, the LSTM of the decoder compute the dense representation of generated words s t . Then, the dense representations are fed into an attention layer to generate the context vector c t , which captures context information of source texts. Attention vector c t is calculated by the weighted sum of encoder hidden states:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder and Decoder", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c t = N i=1 \u03b1 ti h i (1) \u03b1 ti = e g(st,h i ) N j=1 e g(st,h j )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Encoder and Decoder", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where g(s t , h i ) is an attentive score between the decoder hidden state s t and the encoder hidden state h i . In this way, c t and s t respectively represent the context information of source texts and the target texts at the t th time step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder and Decoder", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For the current sequence-to-sequence model, the word generator computes the distribution of output words y t in a generative style: where W \u2208 R k\u00d7V is a trainable parameter matrix, k is hidden size, and V is the number of words in the vocabulary. When the vocabulary is large, the number of parameters will be huge. Our model generates the words in a retrieval style rather than the traditional generative style, by querying the word embeddings. We denote the combination of the source context vector c t and the target context vector s t as the query q t :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y t ) = sof tmax(W s t )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "q t = tanh(W c [s t ; c t ])", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The candidate words w i and their corresponding embeddings e i are paired as the key-value pairs {w i , e i }(i = 1, 2, ..., n), where n is the number of candidate words. We give the details of how to determine the set of candidate words in Section 2.4. Our model uses q t to query the key-value pairs {w i , e i }(i = 1, 2, ..., n) by evaluating the relevance between the query q t and each word vector e i with a score function f (q t , e i ). The query process can be regarded as the attentive selection of the word embeddings. We borrow the attention energy functions (Luong et al., 2015) as the relevance score function f (q t , e i ):", |
|
"cite_spans": [ |
|
{ |
|
"start": 572, |
|
"end": 592, |
|
"text": "(Luong et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f (q t , e i ) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 q T t e i dot q T t W a e i general v T tanh(W q q t + W e e i ) concat", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where W q and W e are two trainable parameter matrices, and v T is a trainable parameter vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In implementation, we select the general attention function as the relevance score function, based on the performance on the validation sets. The keyvalue pair with the highest score {w t , e t } is selected. At the test stage, the decoder generates the key w t as the t th predicted word, and inputs the value e t to the LSTM unit at the t + 1 th time step. At the training stage, the scores are normalized as the word probability distribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y t ) = sof tmax(f (q t , e i ))", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Word Generation by Querying Word Embedding", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "As described in Section 2.3, the model generates the words in a retrieval style, which selects a word according to its embedding from a set of candidate key-value pairs. We now give the details of how to obtain the set of candidate key-value pairs. We extract the vocabulary from the source text in the training set, and select the n most frequent words as the candidate words. We reuse the embeddings of the decoder inputs as the values of the candidate words, which means that the decoder input and the predicted output share the same vocabulary and word embeddings. Besides, we do not use any pretrained word embeddings in our model, so that all of the parameters are learned from scratch.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection of Candidate Key-value Pairs", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Although our generator is a retrieval style, WEAN is as differentiable as the sequence-to-sequence model. The objective of training is to minimize the cross entropy between the predicted word probability distribution and the golden one-hot distribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = \u2212 i\u0177 i log p(y i )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "We use Adam optimization method to train the model, with the default hyper-parameters: the learning rate \u03b1 = 0.001, and \u03b2 1 = 0.9, \u03b2 2 = 0.999, = 1e \u2212 8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "Following the previous work (Cao et al., 2017) , we test our model on the following two paraphrase orientated tasks: text simplification and short text abstractive summarization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 46, |
|
"text": "(Cao et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The datasets are both from the alignments between English Wikipedia website 2 and Simple English Wikipedia website. 3 The Simple English Wikipedia is built for \"the children and adults who are learning the English language\", and the articles are composed with \"easy words and short sentences\". Therefore, Simple English Wikipedia is a natural public simplified text corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 117, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Hwang et al. (2015) . To build the corpus, they first align the complex-simple sentence pairs, score the semantic similarity between the complex sentence and the simple sentence, and classify each sentence pair as a good, good partial, partial, or bad match. Following the previous work (Nisioi et al., 2017) , we discard the unclassified matches, and use the good matches and partial matches with a scaled threshold greater than 0.45. The corpus contains about 150K good matches and 130K good partial matches. We use this corpus as the training set, and the dataset provided by Xu et al. (Xu et al., 2016) as the validation set and the test set. The validation set consists of 2,000 sentence pairs, and the test set contains 359 sentence pairs. Besides, each complex sentence is paired with 8 reference simplified sentences provided by Amazon Mechanical Turk workers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 21, |
|
"text": "Hwang et al. (2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 310, |
|
"text": "(Nisioi et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 608, |
|
"text": "Xu et al. (Xu et al., 2016)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Following the previous work (Nisioi et al., 2017; Hu et al., 2015) , we evaluate our model with different metrics on two tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 49, |
|
"text": "(Nisioi et al., 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 50, |
|
"end": 66, |
|
"text": "Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "\u2022 Automatic evaluation. We use the BLEU score (Papineni et al., 2002) as the automatic evaluation metric. BLEU is a widely used metric for machine translation and text simplification, which measures the agreement between the model outputs and the gold references. The references can be either single or multiple. In our experiments, the references are single on PWKP, and multiple on EW-SEW.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 69, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "\u2022 Human evaluation. Human evaluation is essential to evaluate the quality of the model outputs. Following Nisioi et al. (2017) and Zhang et al. (2017) , we ask the human raters to rate the simplified text in three dimensions: Fluency, Adequacy and Simplicity. Fluency assesses whether the outputs are grammatically right and well formed. Adequacy represents the meaning preservation of the simplified text. Both the scores of fluency and adequacy range from 1 to 5 (1 is very bad and 5 is very good). Simplicity shows how simpler the model outputs are than the source text, which ranges from 1 to 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 126, |
|
"text": "Nisioi et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 150, |
|
"text": "Zhang et al. (2017)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Our proposed model is based on the encoderdecoder framework. The encoder is implemented on LSTM, and the decoder is based on LSTM with Luong style attention (Luong et al., 2015) . We PWKP BLEU PBMT (Wubben et al., 2012) 46.31 Hybrid (Narayan and Gardent, 2014) 53.94 EncDecA (Zhang and Lapata, 2017) 47.93 DRESS (Zhang and Lapata, 2017) 34.53 DRESS-LS (Zhang and Lapata, 2017) EW-SEW BLEU PBMT-R (Wubben et al., 2012) 67.79 Hybrid (Narayan and Gardent, 2014) 48.97 SBMT-SARI (Xu et al., 2016) 73.62 NTS (Nisioi et al., 2017) 84.70 NTS-w2v (Nisioi et al., 2017) 87.50 EncDecA (Zhang and Lapata, 2017) 88.85 DRESS (Zhang and Lapata, 2017) 77.18 DRESS-LS (Zhang and Lapata, 2017) 80.12 Seq2seq (our implementation)", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 177, |
|
"text": "(Luong et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 219, |
|
"text": "(Wubben et al., 2012)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 260, |
|
"text": "(Narayan and Gardent, 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 299, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 336, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 376, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 417, |
|
"text": "PBMT-R (Wubben et al., 2012)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 458, |
|
"text": "(Narayan and Gardent, 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 492, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 524, |
|
"text": "(Nisioi et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 560, |
|
"text": "(Nisioi et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 599, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 612, |
|
"end": 636, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 676, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "88.97 WEAN (our proposal) 94.45 tune our hyper-parameter on the development set. The model has two LSTM layers. The hidden size of LSTM is 256, and the embedding size is 256. We use Adam optimizer (Kingma and Ba, 2014) to learn the parameters, and the batch size is set to be 64. We set the dropout rate (Srivastava et al., 2014) to be 0.4. All of the gradients are clipped when the norm exceeds 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 329, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "We compare our model with several neural text simplification systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 Seq2seq is our implementation of the sequence-to-sequence model with attention mechanism, which is the most popular neural model for text generation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 NTS and NTS-w2v (Nisioi et al., 2017) are two sequence-to-sequence model with extra mechanism like prediction ranking, and NTS-w2v uses a pretrain word2vec.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 39, |
|
"text": "(Nisioi et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 DRESS and DRESS-LS (Zhang and Lapata, 2017) sentence simplification models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 45, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 EncDecA is a model based on the encoderdecoder with attention, implemented by Zhang and Lapata (2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 103, |
|
"text": "Zhang and Lapata (2017)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 PBMT-R (Wubben et al., 2012) is a phrase based machine translation model which reranks the outputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 Hybrid (Narayan and Gardent, 2014) is a hybrid approach which combines deep semantics and mono-lingual machine translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "\u2022 SBMT-SARI (Xu et al., 2016 ) is a syntaxbased machine translation model which is trained on PPDB dataset (Ganitkevitch et al., 2013) and tuned with SARI.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 28, |
|
"text": "(Xu et al., 2016", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 134, |
|
"text": "(Ganitkevitch et al., 2013)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "We compare WEAN with state-of-the-art models for text simplification. Table 1 and Table 2 summarize the results of the automatic evaluation. On PWKP dataset, we compare WEAN with PBMT, Hybrid, EncDecA, DRESS and DRESS-LS. WEAN achieves a BLEU score of 54.54, outperforming all of the previous systems. On EW-SEW dataset, we compare WEAN with PBMT-R, Hybrid, SBMT-SARI, and the neural models described above. We do not find any public release code of PBMT-R and SBMT-SARI. Fortunately, Xu et al. (2016) provides the predictions of PBMT-R and SBMT-SARI on EW-SEW test set, so that we can compare our model with these systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 501, |
|
"text": "Xu et al. (2016)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 89, |
|
"text": "Table 1 and Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.1.5" |
|
}, |
|
{ |
|
"text": "R-1 R-2 R-L RNN-W (Hu et al., 2015) 17.7 8.5 15.8 RNN (Hu et al., 2015) 21.5 8.9 18.6 RNN-cont-W (Hu et al., 2015) 26.8 16.1 24.1 RNN-cont (Hu et al., 2015) 29.9 17.4 27.2 SRB 33.3 20.0 30.1 CopyNet-W (Gu et al., 2016) 35.0 22.3 32.0 CopyNet (Gu et al., 2016) 34.4 21.6 31.3 RNN-dist 35.2 22.6 32.5 DRGD 37 It shows that the neural models have better performance in BLEU, and WEAN achieves the best BLEU score with 94.45. We perform the human evaluation of WEAN and other related systems, and the results are shown in Table 3 . DRESS-LS is based on the reinforcement learning, and it encourages the fluency, simplicity and relevance of the outputs. Therefore, it achieves a high score in our human evaluation. WEAN gains a even better score than DRESS-LS. Besides, WEAN generates more adequate and simpler outputs than the reference on PWKP. The predictions of SBMT-SARI are the most adequate among the compared systems on EW-SEW. In general, WEAN outperforms all of the other systems, considering the balance of fluency, adequate and simplicity. We conduct significance tests based on t-test. The significance tests suggest that WEAN has a very significant improvement over baseline, with p \u2264 0.001 over DRESS-LS in all of the dimension on PWKP, p \u2264 0.05 over DRESS-LS in the dimension of fluency, p \u2264 0.005 over NTS-w2v in the dimension of simplicity and p \u2264 0.005 over DRESS-LS in the dimension of all.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 35, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 54, |
|
"end": 71, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 114, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 156, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 218, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 259, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 525, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "LCSTS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Large Scale Chinese Social Media Short Text Summarization Dataset (LCSTS): LCSTS is constructed by Hu et al. (2015) . The dataset consists of more than 2,400,000 text-summary pairs, constructed from a famous Chinese social media website called Sina Weibo. 4 It is split into three parts, with 2,400,591 pairs in PART I, 10,666 pairs in PART II and 1,106 pairs in PART III. All the text-summary pairs in PART II and PART III are manually annotated with relevant scores ranged from 1 to 5. We only reserve pairs with scores no less than 3, leaving 8,685 pairs in PART II and 725 pairs in PART III. Following the previous work (Hu et al., 2015) , we use PART I as training set, PART II as validation set, and PART III as test set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 115, |
|
"text": "Hu et al. (2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 641, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Our evaluation metric is ROUGE score (Lin and Hovy, 2003) , which is popular for summarization evaluation. The metrics compare an automatically produced summary against the reference summaries, by computing overlapping lexical units, including unigram, bigram, trigram, and longest common subsequence (LCS). Following previous work (Rush et al., 2015; Hu et al., 2015) , we use ROUGE-1 (unigram), ROUGE-2 (bi-gram) and ROUGE-L (LCS) as the evaluation metrics in the reported experimental results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 57, |
|
"text": "(Lin and Hovy, 2003)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 351, |
|
"text": "(Rush et al., 2015;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 368, |
|
"text": "Hu et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "The vocabularies are extracted from the training sets, and the source contents and the summaries share the same vocabularies. We tune the hyperparameters based on the ROUGE scores on the validation sets. In order to alleviate the risk of word segmentation mistakes, we split the Chinese sentences into characters. We prune the vocabulary size to 4,000, which covers most of the common characters. We set the word embedding size and the hidden size to 512, the number of LSTM layers of the encoder is 2, and the number of LSTM layers of the decoder is 1. The batch size is 64, and we do not use dropout (Srivastava et al., 2014) on this dataset. Following the previous work , we implement a beam search optimization, and set the beam size to 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 602, |
|
"end": 627, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "We compare our model with the state-of-the-art baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 RNN and RNN-cont are two sequence-tosequence baseline with GRU encoder and decoder, provided by Hu et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 114, |
|
"text": "Hu et al. (2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "#Param PWKP EWSEW LCSTS Seq2seq 12.80M 12.80M 2.05M WEAN 0.13M 0.13M 0.52M Table 5 : The number of the parameters in the output layer. The numbers of rest parameters between Seq2seq and WEAN are the same.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 82, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 RNN-dist ) is a distractionbased neural model, which the attention mechanism focuses on the different parts of the source content.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 CopyNet (Gu et al., 2016) incorporates a copy mechanism to allow part of the generated summary is copied from the source content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 27, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 SRB ) is a sequence-tosequence based neural model with improving the semantic relevance between the input text and the output summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 DRGD ) is a deep recurrent generative decoder model, combining the decoder with a variational autoencoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Seq2seq is our implementation of the sequence-to-sequence model with the attention mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "We report the ROUGE F1 score of our model and the baseline models on the test sets. Table 4 summarizes the comparison between our model and the baselines. Our model achieves the score of 37.8 ROUGE-1, 25.6 ROUGE-2, and 35.2 ROUGE-L, outperforming all of the previous models. First, we compare our model with the sequence-to-sequence model. It shows that our model significant outperforms the sequenceto-sequence baseline with a large margin of 5.7 ROUGE-1, 5.7 ROUGE-2, and 6.0 ROUGE-L. Then, we compare our model with other related models. The state-of-the-art model is DRGD , which obtains the score of 37.0 ROUGE-1, 24.2 ROUGE-2, and 34.2 ROUGE-L. Our model has a relative gain of 0.8 ROUGE-1, 1.4 ROUGE-2 and 1.0 ROUGE-L over the stateof-the-art models. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "Our WEAN reduces a large number of the parameters in the output layer. To analyze the parameter reduction, we compare our WEAN model with the sequence-to-sequence model. Table 5 lists the number of the parameters in the output layers of two models. Both PWKP and EWSEWhave the vocabulary size of 50000 words and the hidden size of 256, resulting 50000 \u00d7 256 = 12, 800, 000 parameters. LCSTS has a vocabulary size of 4000 and the hidden size of 512, so the seq2seq has 4000 \u00d7 512 = 2, 048, 000 parameters in the output layers. WEAN only has two parameter matrices and one parameter vector at most in Equation 5, without regard to the vocabulary size. It has 256 \u00d7 256 \u00d7 2 + 256 = 131, 328 parameters on PWKP and EWSEW, and 512\u00d7512\u00d72+512 = 524, 800 parameters on LCSTS. Besides, WEAN does not have any extra parameters in the other part of the model. Figure 2 shows the training curve of WEAN and Seq2seq on the PWKP validation set. WEAN achieve near the optimal score in only 2-3 epochs, while Seq2seq takes more than 15 epochs to achieve the optimal score. Therefore, WEAN has much faster convergence rate, compared with Seq2seq. With the much faster training speed, WEAN does not suffer loss in BLEU, and even improve the BLEU score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 177, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 849, |
|
"end": 857, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reducing Parameters", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Yoghurt or yogurt is a dairy product produced by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Yoghurt or yogurt is a dairy product made by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". or yoghurt is a dairy product produced by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NTS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is made by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NTS-w2v", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Yoghurt or yogurt is a dairy product produced by bacterial fermentation of . SBMT-SARI Yogurt or yogurt is a dairy product drawn up by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PBMT-R", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Yoghurt or yogurt is a dairy product made by bacterial fermentation of milk .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WEAN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Depending on the context, another closely-related meaning of constituent is that of a citizen residing in the area governed, represented, or otherwise served by a politician; sometimes this is restricted to citizens who elected the politician.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The word constituent can also be used to refer to a citizen who lives in the area that is governed, represented, or otherwise served by a politician; sometimes the word is restricted to citizens who elected the politician.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Depending on the context, another closely-related meaning of constituent is that of a citizen living in the area governed, represented, or otherwise served by a politician; sometimes this is restricted to citizens who elected the politician.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NTS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This is restricted to citizens who elected the politician.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NTS-w2v", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Depending on the context and meaning of closely-related siemens-martin -rrb-is a citizen living in the area, or otherwise, was governed by a 1924-1930 shurba; this is restricted to people who elected it. SBMT-SARI In terms of the context, another closely-related sense of the component is that of a citizen living in the area covered, make up, or if not, served by a policy; sometimes this is limited to the people who elected the policy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PBMT-R", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Depending on the context, another closely-related meaning of constituent is that of a citizen who lives in the area governed, represented, or otherwise served by a politician; sometimes the word is restricted to citizens who elected the politician. Table 6 shows two examples of different text simplification system outputs on EW-SEW. For the first example, NTS, NTS-w2v and PBMT-R miss some essential constituents, so that the sentences are incomplete and not fluent. SBMT-SARI generates a fluent sentence, but the output does not preserve the original meaning. The predicted sentence of WEAN is fluent, simple, and the same as the reference. For the second example, NTS-w2v omits so many words that it lacks a lot of information. PBMT-R generates some irrelevant words, like 'siemens-martin', '-rrb-', and 'shurba', which hurts the fluency and adequacy of the generated sentence. SBMT-SARI is able to generate a fluent sentence, but the meaning is different from the source text, and even more difficult to understand. Compared with the statistic model, WEAN generates a more fluent sentence. Besides, WEAN can capture the semantic meaning of the word by querying the word embeddings, so the generated sentence is semantically correct, and very close to the original meaning.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 256, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WEAN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our work is related to the encoder-decoder framework and the attention mechanism . Encoderdecoder framework, like sequence-to-sequence model, has achieved success in machine translation Jean et al., 2015; Luong et al., 2015; ), text summarization (Rush et al., 2015; Chopra et al., 2016; Nallapati et al., 2016; , and other natural language processing tasks . There are many other methods to improve neural attention model (Jean et al., 2015; Luong et al., 2015) . Zhu et al. (2010) constructs a wikipedia dataset, and proposes a tree-based simplification model. Woodsend and Lapata (2011) introduces a datadriven model based on quasi-synchronous grammar, which captures structural mismatches and complex rewrite operations. Wubben et al. 2012presents a method for text simplification using phrase based machine translation with re-ranking the outputs. Kauchak (2013) proposes a text simplification corpus, and evaluates language modeling for text simplification on the proposed corpus. Narayan and Gardent (2014) propose a hybrid approach to sentence simplification which combines deep semantics and monolingual machine translation. Hwang et al. (2015) introduces a parallel simplification corpus by evaluating the similarity between the source text and the simplified text based on WordNet. Glava\u0161 and\u0160tajner 2015propose an unsupervised approach to lexical simplification that makes use of word vectors and require only regular corpora. Xu et al. (2016) design automatic metrics for text simplification. Recently, most works focus on the neural sequenceto-sequence model. Nisioi et al. (2017) present a sequence-to-sequence model, and re-ranks the predictions with BLEU and SARI. Zhang and Lapata (2017) propose a deep reinforcement learning model to improve the simplicity, fluency and adequacy of the simplified texts. Cao et al. (2017) introduce a novel sequence-to-sequence model to join copying and restricted generation for text simplification. Rush et al. (2015) first used an attention-based encoder to compress texts and a neural network language decoder to generate summaries. Following this work, recurrent encoder was introduced to text summarization, and gained better performance (Lopyrev, 2015; Chopra et al., 2016) . Towards Chinese texts, Hu et al. (2015) built a large corpus of Chinese short text summarization. To deal with unknown word problem, Nallapati et al. (2016) proposed a generator-pointer model so that the decoder is able to generate words in source texts. Gu et al. (2016) also solved this issue by incorporating copying mechanism.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 204, |
|
"text": "Jean et al., 2015;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 224, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 266, |
|
"text": "(Rush et al., 2015;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 287, |
|
"text": "Chopra et al., 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 311, |
|
"text": "Nallapati et al., 2016;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 442, |
|
"text": "(Jean et al., 2015;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 462, |
|
"text": "Luong et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 482, |
|
"text": "Zhu et al. (2010)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 589, |
|
"text": "Woodsend and Lapata (2011)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 853, |
|
"end": 867, |
|
"text": "Kauchak (2013)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 987, |
|
"end": 1013, |
|
"text": "Narayan and Gardent (2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1134, |
|
"end": 1153, |
|
"text": "Hwang et al. (2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1439, |
|
"end": 1455, |
|
"text": "Xu et al. (2016)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1574, |
|
"end": 1594, |
|
"text": "Nisioi et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1682, |
|
"end": 1705, |
|
"text": "Zhang and Lapata (2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 1823, |
|
"end": 1840, |
|
"text": "Cao et al. (2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1953, |
|
"end": 1971, |
|
"text": "Rush et al. (2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 2196, |
|
"end": 2211, |
|
"text": "(Lopyrev, 2015;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 2212, |
|
"end": 2232, |
|
"text": "Chopra et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 2258, |
|
"end": 2274, |
|
"text": "Hu et al. (2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 2368, |
|
"end": 2391, |
|
"text": "Nallapati et al. (2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 2490, |
|
"end": 2506, |
|
"text": "Gu et al. (2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We propose a novel model based on the encoderdecoder framework, which generates the words by querying distributed word representations. Experimental results show that our model outperforms the sequence-to-sequence baseline by the BLEU score of 6.3 and 5.5 on two English text simplification datasets, and the ROUGE-2 F1 score of 5.7 on a Chinese summarization dataset. Moreover, our model achieves state-of-the-art performances on these three benchmark datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The code is available at https://github.com/ lancopku/WEAN", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://en.wikipedia.org 3 http://simple.wikipedia.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://weibo.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by National Natural Science Foundation of China (No. 61673028), National High Technology Research and Development Program of China (863 Program, No. 2015AA015404), and the National Thousand Young Talents Program. Xu Sun is the corresponding author of this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. CoRR abs/1409.0473.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Joint copying and restricted generation for paraphrase", |
|
"authors": [ |
|
{ |
|
"first": "Ziqiang", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuwei", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3152--3158", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziqiang Cao, Chuwei Luo, Wenjie Li, and Sujian Li. 2017. Joint copying and restricted generation for paraphrase. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence. pages 3152- 3158.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Distraction-based neural networks for modeling documents", |
|
"authors": [ |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenhua", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 25th International Joint Conference on Artificial Intelligence (IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qian Chen, Xiaodan Zhu, Zhenhua Ling, Si Wei, and Hui Jiang. 2016. Distraction-based neural networks for modeling documents. In Proceedings of the 25th International Joint Conference on Artificial Intelli- gence (IJCAI 2015). AAAI, New York, NY.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural summarization by extracting sentences and words", |
|
"authors": [ |
|
{ |
|
"first": "Jianpeng", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianpeng Cheng and Mirella Lapata. 2016. Neural summarization by extracting sentences and words. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics, ACL 2016, August 7-12, 2016, Berlin, Germany, Volume 1: Long Papers.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merrienboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Aglar G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merrienboer, \u00c7 aglar G\u00fcl\u00e7ehre, Dzmitry Bahdanau, Fethi Bougares, Hol- ger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, EMNLP 2014. pages 1724-1734.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Abstractive sentence summarization with attentive recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "93--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sumit Chopra, Michael Auli, and Alexander M. Rush. 2016. Abstractive sentence summarization with at- tentive recurrent neural networks. In NAACL HLT 2016, The 2016 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies. pages 93- 98.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "PPDB: the paraphrase database", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Human Language Technologies: Conference of the North American Chapter of the Association of Computational Linguistics, Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "758--764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2013. PPDB: the paraphrase database. In Human Language Technologies: Con- ference of the North American Chapter of the Asso- ciation of Computational Linguistics, Proceedings. pages 758-764.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Simplifying lexical simplification: Do we need simplified corpora?", |
|
"authors": [ |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics, ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goran Glava\u0161 and Sanja\u0160tajner. 2015. Simplifying lexical simplification: Do we need simplified cor- pora? In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics, ACL. pages 63-68.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O. K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics, ACL 2016.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "LC-STS: A large scale chinese short text summarization dataset", |
|
"authors": [ |
|
{ |
|
"first": "Baotian", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingcai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fangze", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1967--1972", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baotian Hu, Qingcai Chen, and Fangze Zhu. 2015. LC- STS: A large scale chinese short text summarization dataset. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Process- ing, EMNLP 2015, Lisbon, Portugal, September 17- 21, 2015. pages 1967-1972.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Aligning sentences from standard wikipedia to simple wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "NAACL HLT 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "211--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Hwang, Hannaneh Hajishirzi, Mari Ostendorf, and Wei Wu. 2015. Aligning sentences from stan- dard wikipedia to simple wikipedia. In NAACL HLT 2015. pages 211-217.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "On using very large target vocabulary for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "S\u00e9bastien", |
|
"middle": [], |
|
"last": "Jean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Memisevic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics, ACL 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00e9bastien Jean, KyungHyun Cho, Roland Memisevic, and Yoshua Bengio. 2015. On using very large tar- get vocabulary for neural machine translation. In Proceedings of the 53rd Annual Meeting of the As- sociation for Computational Linguistics, ACL 2015. pages 1-10.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Improving text simplification language modeling using unsimplified text data", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1537--1546", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Kauchak. 2013. Improving text simplification language modeling using unsimplified text data. In Proceedings of the 51st Annual Meeting of the Asso- ciation for Computational Linguistics, ACL. pages 1537-1546.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Deep recurrent generative decoder for abstractive text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2091--2100", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piji Li, Wai Lam, Lidong Bing, and Zihao Wang. 2017. Deep recurrent generative decoder for ab- stractive text summarization. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, EMNLP 2017, Copenhagen, Denmark, September 9-11, 2017. pages 2091-2100.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Automatic evaluation of summaries using n-gram cooccurrence statistics", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Eduard H. Hovy. 2003. Auto- matic evaluation of summaries using n-gram co- occurrence statistics. In Human Language Technol- ogy Conference of the North American Chapter of the Association for Computational Linguistics, HLT- NAACL 2003.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Decoding-history-based adaptive control of attention for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Junyang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyang Lin, Shuming Ma, Qi Su, and Xu Sun. 2018. Decoding-history-based adaptive control of attention for neural machine translation. CoRR abs/1802.01812.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Table-to-text generation by structure-aware seq2seq learning", |
|
"authors": [ |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kexiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baobao", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyu Liu, Kexiang Wang, Lei Sha, Baobao Chang, and Zhifang Sui. 2017. Table-to-text genera- tion by structure-aware seq2seq learning. CoRR abs/1711.09724.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Generating news headlines with recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Konstantin Lopyrev. 2015. Generating news head- lines with recurrent neural networks. CoRR abs/1512.01712.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, EMNLP 2015. pages 1412- 1421.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A semantic relevance based neural network for text summarization and text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuming Ma and Xu Sun. 2017. A semantic rele- vance based neural network for text summarization and text simplification. CoRR abs/1710.02318.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Improving semantic relevance for sequence-to-sequence learning of chinese social media text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "635--640", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuming Ma, Xu Sun, Jingjing Xu, Houfeng Wang, Wenjie Li, and Qi Su. 2017. Improving semantic relevance for sequence-to-sequence learning of chi- nese social media text summarization. In Proceed- ings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancou- ver, Canada, July 30 -August 4, Volume 2: Short Papers. pages 635-640.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Abstractive text summarization using sequence-tosequence rnns and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C\u00edcero", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Aglar G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "280--290", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Bowen Zhou, C\u00edcero Nogueira dos Santos, \u00c7 aglar G\u00fcl\u00e7ehre, and Bing Xiang. 2016. Abstractive text summarization using sequence-to- sequence rnns and beyond. In Proceedings of the 20th SIGNLL Conference on Computational Natural Language Learning, CoNLL 2016, Berlin, Germany, August 11-12, 2016. pages 280-290.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Hybrid simplification using deep semantics and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "435--445", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan and Claire Gardent. 2014. Hybrid sim- plification using deep semantics and machine trans- lation. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguis- tics, ACL. pages 435-445.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Exploring neural text simplification models", |
|
"authors": [ |
|
{ |
|
"first": "Sergiu", |
|
"middle": [], |
|
"last": "Nisioi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Stajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liviu", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dinu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergiu Nisioi, Sanja Stajner, Simone Paolo Ponzetto, and Liviu P. Dinu. 2017. Exploring neural text sim- plification models. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics, ACL. pages 85-91.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Compu- tational Linguistics. pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Neural paraphrase generation with stacked residual LSTM networks", |
|
"authors": [ |
|
{ |
|
"first": "Aaditya", |
|
"middle": [], |
|
"last": "Prakash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sadid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathy", |
|
"middle": [], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vivek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashequl", |
|
"middle": [], |
|
"last": "Datla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [], |
|
"last": "Qadir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oladimeji", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Farri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "COLING 2016, 26th International Conference on Computational Linguistics, Proceedings of the Conference: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2923--2934", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aaditya Prakash, Sadid A. Hasan, Kathy Lee, Vivek V. Datla, Ashequl Qadir, Joey Liu, and Oladimeji Farri. 2016. Neural paraphrase generation with stacked residual LSTM networks. In COLING 2016, 26th International Conference on Computational Lin- guistics, Proceedings of the Conference: Techni- cal Papers, December 11-16, 2016, Osaka, Japan. pages 2923-2934.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "379--389", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, EMNLP 2015, Lisbon, Portugal, September 17-21, 2015. pages 379-389.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Dropout: a simple way to prevent neural 205 networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdi- nov. 2014. Dropout: a simple way to prevent neural 205 networks from overfitting. Journal of Machine Learning Research 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "meprop: Sparsified back propagation for accelerated deep learning with reduced overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3299--3308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Sun, Xuancheng Ren, Shuming Ma, and Houfeng Wang. 2017a. meprop: Sparsified back propaga- tion for accelerated deep learning with reduced over- fitting. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Syd- ney, NSW, Australia, 6-11 August 2017. pages 3299- 3308.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Training simplification and model simplification for deep learning: A minimal effort back propagation method", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingzhen", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Sun, Xuancheng Ren, Shuming Ma, Bingzhen Wei, Wei Li, and Houfeng Wang. 2017b. Training simpli- fication and model simplification for deep learning: A minimal effort back propagation method. CoRR abs/1711.06528.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Label embedding network: Learning label representation for soft training of deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingzhen", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Sun, Bingzhen Wei, Xuancheng Ren, and Shuming Ma. 2017c. Label embedding network: Learning la- bel representation for soft training of deep networks. CoRR abs/1710.10393.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Advances in Neural Information Process- ing Systems 27: Annual Conference on Neural Infor- mation Processing Systems 2014. pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Neural headline generation on abstract meaning representation", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Sho Takase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsutomu", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaaki", |
|
"middle": [], |
|
"last": "Hirao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nagata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1054--1059", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sho Takase, Jun Suzuki, Naoaki Okazaki, Tsutomu Hi- rao, and Masaaki Nagata. 2016. Neural headline generation on abstract meaning representation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, EMNLP 2016, Austin, Texas, USA, November 1-4, 2016. pages 1054-1059.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Affinity-preserving random walk for multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Kexiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baobao", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2017", |
|
"issue": "", |
|
"pages": "210--220", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kexiang Wang, Tianyu Liu, Zhifang Sui, and Baobao Chang. 2017. Affinity-preserving random walk for multi-document summarization. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing, EMNLP 2017, Copen- hagen, Denmark, September 9-11, 2017. pages 210- 220.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Learning to simplify sentences with quasi-synchronous grammar and integer programming", |
|
"authors": [ |
|
{ |
|
"first": "Kristian", |
|
"middle": [], |
|
"last": "Woodsend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "409--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristian Woodsend and Mirella Lapata. 2011. Learn- ing to simplify sentences with quasi-synchronous grammar and integer programming. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, EMNLP. pages 409- 420.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Sentence simplification by monolingual machine translation", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sander Wubben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Den", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Bosch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The 50th Annual Meeting of the Association for Computational Linguistics, Proceedings of the Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1015--1024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sander Wubben, Antal van den Bosch, and Emiel Krahmer. 2012. Sentence simplification by mono- lingual machine translation. In The 50th An- nual Meeting of the Association for Computational Linguistics, Proceedings of the Conference. pages 1015-1024.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Dp-gan: Diversitypromoting generative adversarial network for generating informative and diversified text", |
|
"authors": [ |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Binzhen", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingjing Xu, Xu Sun, Xuancheng Ren, Junyang Lin, Binzhen Wei, and Wei Li. 2018. Dp-gan: Diversity- promoting generative adversarial network for gen- erating informative and diversified text. CoRR abs/1802.01345.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "TACL", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016. Optimizing statistical machine translation for text simplification. TACL 4:401-415.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Sentence simplification with deep reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Xingxing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "584--594", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xingxing Zhang and Mirella Lapata. 2017. Sen- tence simplification with deep reinforcement learn- ing. In Proceedings of the 2017 Conference on Em- pirical Methods in Natural Language Processing, EMNLP 2017, Copenhagen, Denmark, September 9-11, 2017. pages 584-594.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A monolingual tree-based translation model for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Zhemin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delphine", |
|
"middle": [], |
|
"last": "Bernhard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "COLING 2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1353--1361", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhemin Zhu, Delphine Bernhard, and Iryna Gurevych. 2010. A monolingual tree-based translation model for sentence simplification. In COLING 2010. pages 1353-1361.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "An overview of Word Embedding Attention Network.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The training curve of WEAN and Seq2seq on the PWKP validation set.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Automatic evaluation of our model and other related systems on PWKP datasets. The results are reported on the test sets.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Human evaluation of our model and other related systems on PWKP and EW-SEW datasets. The results are reported on the test sets.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>: ROUGE F1 score on the LCSTS test set. R-1, R-2, and R-L denote ROUGE-1, ROUGE-2, and ROUGE-L, respectively. The models with a suffix of 'W' in the table are word-based, while the rest of mod-els are character-based.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Two examples of different text simplification system outputs in EW-SEW dataset. Differences from the source texts are shown in bold.", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |