{ "paper_id": "2020", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T15:35:53.130213Z" }, "title": "IR&TM-NJUST@CLSciSumm 20", "authors": [ { "first": "Heng", "middle": [], "last": "Zhang", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "zhangcz@njust.edu.cn" }, { "first": "Lifan", "middle": [], "last": "Liu", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "" }, { "first": "Ruping", "middle": [], "last": "Wang", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "" }, { "first": "Shaohu", "middle": [], "last": "Hu", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "" }, { "first": "Shutian", "middle": [], "last": "Ma", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "mashutian0608@hotmail.com" }, { "first": "Chengzhi", "middle": [], "last": "Zhang", "suffix": "", "affiliation": { "laboratory": "", "institution": "Nanjing University of Science and Technology", "location": { "postCode": "210094", "settlement": "Nanjing", "country": "China" } }, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "This paper mainly introduces our methods for Task 1A and Task 1B of CL-SciSumm 2020. Task 1A is to identify reference text in reference paper. Traditional machine learning models and MLP model are used. We evaluate the performances of these models and submit the final results from the optimal model. Compared with previous work, we optimize the ratio of positive to negative examples after data sampling. In order to construct features for classification, we calculate similarities between reference text and candidate sentences based on sentence vectors. Accordingly, nine similarities are used, of which eight are chosen from what we used in CL-SciSumm 2019 and a new sentence similarity based on fastText is added. Task 1B is to classify the facets of reference text. Unlike the methods used in CL-SciSumm 2019, we construct inputs of models based on word vectors and add deep learning models for classification this year.", "pdf_parse": { "paper_id": "2020", "_pdf_hash": "", "abstract": [ { "text": "This paper mainly introduces our methods for Task 1A and Task 1B of CL-SciSumm 2020. Task 1A is to identify reference text in reference paper. Traditional machine learning models and MLP model are used. We evaluate the performances of these models and submit the final results from the optimal model. Compared with previous work, we optimize the ratio of positive to negative examples after data sampling. In order to construct features for classification, we calculate similarities between reference text and candidate sentences based on sentence vectors. Accordingly, nine similarities are used, of which eight are chosen from what we used in CL-SciSumm 2019 and a new sentence similarity based on fastText is added. Task 1B is to classify the facets of reference text. Unlike the methods used in CL-SciSumm 2019, we construct inputs of models based on word vectors and add deep learning models for classification this year.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "The rapid growth of papers has provided scholars with various knowledge and methods, which can offer references for development or innovation of the research. But it makes difficult for researchers to get brief summaries quickly from such massive amount of papers (Radev et al., 2002) . Automatic summarization can solve this problem. Researchers express their views on reference paper through citation text. So, citation text can be used to generate summary of paper (Cohan & Goharian, 2018; Qazvinian & Radev, 2008) . However, as a result of researchers' different views (citation), the quality of the summary is not guaranteed and the summary cannot fully restore the original \uf02a Corresponding Author.", "cite_spans": [ { "start": 264, "end": 284, "text": "(Radev et al., 2002)", "ref_id": "BIBREF26" }, { "start": 468, "end": 492, "text": "(Cohan & Goharian, 2018;", "ref_id": "BIBREF4" }, { "start": 493, "end": 517, "text": "Qazvinian & Radev, 2008)", "ref_id": "BIBREF24" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "1 https://github.com/WING-NUS/scisumm-corpus/ information of paper. Therefore, CL-SciSumm proposes to generate summary by the original text corresponding to citation. CL-SciSumm is the first medium-scale shared task on scientific document summarization, with over 500 annotated documents 1 . This competition is organized annually from 2016, and we can view details about CL-SciSumm2020 at the website: https://ornlcda.github.io/SDProc/sharedtasks.html #clscisumm. The introduction of CL-SciSumm2020 is as follows:", "cite_spans": [ { "start": 288, "end": 289, "text": "1", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Given: A topic consisting of a Reference Paper (RP) and Citing Papers (CPs) that all contain citations to the RP. In each CP, the text spans (i.e., citances) have been identified that pertain to a particular citation to the RP.", "cite_spans": [ { "start": 141, "end": 157, "text": "(i.e., citances)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Task 1A: For each citance, identify the spans of text (cited text spans) in the RP that most accurately reflect the citance. These are of the granularity of a sentence fragment, a full sentence, or several consecutive sentences (no more than 5).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Task 1B: For each cited text span, identify what facet of the paper it belongs to, from a predefined set of facets.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Task 2 (optional bonus task): Finally, generate a structured summary of the RP from the cited text spans of the RP. The length of the summary should not exceed 250 words.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In Figure 1 , The blue text span in the citing paper shows the citation text, and the green text span in the reference paper shows the reference text which most accurately reflects the citance. Our team has participated in the CL-SciSumm competition in 2017 (Ma et al., 2017) , 2018 and 2019 (Ma et al., 2019) . For Task 1A, a similarity-based negative sampling strategy is applied to construct the training set. Nine similarity features and sentence vectors are used to represent citation text and candidate sentences. Then we employ traditional machine learning methods and build MLP model to identify the reference text in reference papers. For Task 1B, sentence vectors are generated based on word frequency and word vector. Traditional machine learning models and deep learning models are built to identify the facets. As for Task 2, cosine similarity is calculated between reference sentences and the original abstract based on their sentence vectors. Then sentences are selected to construct summary according to their similarities, and length of the summary does not exceed 250 words.", "cite_spans": [ { "start": 258, "end": 275, "text": "(Ma et al., 2017)", "ref_id": "BIBREF13" }, { "start": 292, "end": 309, "text": "(Ma et al., 2019)", "ref_id": null } ], "ref_spans": [ { "start": 3, "end": 11, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Compared with previous work, we make changes in following steps. In Task 1A, we optimize ratio of positive to negative examples after negative sampling. The structure and parameters of MLP model are adjusted to get better results. For Task 1B, we first try to use word vector to construct inputs of models. And the result has been improved about 10% at accuracy score.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "As for the related work of Task 1A, most previous teams solved it by using classification models, and they constructed different features as input of models. Some researchers used three types of classification features, namely similarity-based features, rule-based features and location-based features (Jaidka et al., 2017) . Ma et al. (2017) extracted several features at the words level from the citation text spans in the training set to calculate the corresponding similarities, such as IDF similarity, Jaccard similarity, Dice similarity, Word2Vec similarity and so on.", "cite_spans": [ { "start": 302, "end": 323, "text": "(Jaidka et al., 2017)", "ref_id": "BIBREF10" }, { "start": 326, "end": 342, "text": "Ma et al. (2017)", "ref_id": "BIBREF13" } ], "ref_spans": [], "eq_spans": [], "section": "Identification of the citation text spans", "sec_num": "2.1" }, { "text": "In recent years, machine learning models are mostly used for the identification of citation text spans. Mei and Zhai (2008) highlighted the importance of citance, and they proposed a method to generate the abstract of the cited document by extracting the most influential sentences in the document. The machine learning models mainly include classification models and ranking models. Yeh et al. (2017) used classification models, such as SVM (Support Vector Machines), DT (decision trees), KNN (K-Nearest Neighbors) and so on in the identification of citances. Their method performed well with competitive results when it was evaluated using the CL-SciSumm 2016 datasets. In ranking models, sentences were sorted based on the integration of multiple features. Lu et al. (2016) constructed word-level (e.g. TF-IDF similarity and Jaccard similarity) and topic-level features (based on LDA model) separately and used the learning-to-rank algorithm to identify cited text spans. Their results showed that Jaccard similarity achieved better F measures, and the performance of topic similarity features varies slightly among different number of topics. Additionally, Moraes et al. (2016) investigated cosine similarity with multiple incremental modifications and SVMs with a tree kernel. They calculated the similarity not only between reference and citance sentences, but also between the reference spans and the citance sentences.", "cite_spans": [ { "start": 104, "end": 123, "text": "Mei and Zhai (2008)", "ref_id": "BIBREF21" }, { "start": 384, "end": 401, "text": "Yeh et al. (2017)", "ref_id": "BIBREF30" }, { "start": 760, "end": 776, "text": "Lu et al. (2016)", "ref_id": "BIBREF12" }, { "start": 1161, "end": 1181, "text": "Moraes et al. (2016)", "ref_id": "BIBREF23" } ], "ref_spans": [], "eq_spans": [], "section": "Identification of the citation text spans", "sec_num": "2.1" }, { "text": "In summary, the current research about identification of citation text spans mainly includes feature construction and model selection. Most of the researches attempt to construct a huge feature system for model training and learning. As for model selection, most of the works are based on traditional machine learning models or sorting algorithms.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Identification of the citation text spans", "sec_num": "2.1" }, { "text": "Task 1B is to identify the facets of reference text. It provides 5 facets in this task. Most teams in previous CL-SciSumm competitions used rulebased methods, because the amounts of different facets of reference text are imbalanced . In the learning process of the classification algorithms, the result tends to focus on the facets with most samples. This problem will have a huge impact on model training (He & Garcia, 2009) . He et al. (2008) reviewed researches about learning from imbalanced data, then they highlighted that the opportunities and challenges to solve this problem would be a new research field in the future research. combined the NN algorithm with the SMOTE algorithm to make training data and extend the penalty factor in the processing of imbalanced datasets, and NN algorithm behaved best on testing data.", "cite_spans": [ { "start": 406, "end": 425, "text": "(He & Garcia, 2009)", "ref_id": "BIBREF6" }, { "start": 428, "end": 444, "text": "He et al. (2008)", "ref_id": "BIBREF7" } ], "ref_spans": [], "eq_spans": [], "section": "Identification of the facets of reference text", "sec_num": "2.2" }, { "text": "There are plenty of researches about identifying the facets of reference text, rule-based methods and statistical-based methods are widely used. Wang et al. (2012) proposed an orderly clue phrase matching method and got 62% accuracy and 42% recall. S\u00e1ndor et al. (2006) presented two natural language processing systems to help researchers rapidly accessing relevant knowledge in text. Agarwal et al. (2011) used two statistical machine learning models, SVM and NB, to classify the facets of reference. And they found that the classification result of SVM was better. Aggarwal and Sharma (2016) determined the facets based on the location of the cited text spans. Li et al. (2019) used the Word2Vec and the CNN model to calculate the sentence similarity, and further apply CNN to classify the facets of reference texts. They indicated that the features of high frequency word and subtitle are important in the identification of facets.", "cite_spans": [ { "start": 145, "end": 163, "text": "Wang et al. (2012)", "ref_id": "BIBREF29" }, { "start": 249, "end": 269, "text": "S\u00e1ndor et al. (2006)", "ref_id": "BIBREF28" }, { "start": 386, "end": 407, "text": "Agarwal et al. (2011)", "ref_id": "BIBREF0" }, { "start": 568, "end": 594, "text": "Aggarwal and Sharma (2016)", "ref_id": "BIBREF2" }, { "start": 664, "end": 680, "text": "Li et al. (2019)", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Identification of the facets of reference text", "sec_num": "2.2" }, { "text": "In summary, in the researches about classification of facets, the approaches applied in this task mainly include rule-based methods and statistical-based methods. However, because of the limited experimental dataset and the imbalance in the number of samples in different facets, these two methods are difficult to learn the relevant features of the facets more accurately and efficiently.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Identification of the facets of reference text", "sec_num": "2.2" }, { "text": "Before introducing the methodology of each task, we define some concepts to avoid ambiguity in the following description. ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Methodology", "sec_num": "3" }, { "text": "It is \"Citance\" in introduction of Task 1A, and it consists of one or several sentences from citing paper. See blue highlighted span in Figure 1 .", "cite_spans": [], "ref_spans": [ { "start": 136, "end": 144, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Citation text", "sec_num": null }, { "text": "It is \"cited text spans\" in introduction of Task 1B, and it consists of one or several sentences from reference paper. See green highlighted span in Figure 1 .", "cite_spans": [], "ref_spans": [ { "start": 149, "end": 157, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Reference text", "sec_num": null }, { "text": "It is the type of reference text, there is a predefined set of facets: \"Method_Citation\", \"Result_Citation\", \"Aim_Citation\", \"Implication_Citation\", \"Hypothesis_Citation\".", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Facets", "sec_num": null }, { "text": "Citation text and candidate sentences as a pair of input to models. And candidate sentences contain reference text as positive samples and sentences selected from reference paper as negative samples.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Candidate sentences", "sec_num": null }, { "text": "In Task 1A, we are given citation text to find the corresponding sentences in the reference paper. This task can be regarded as a binary classification task. For a citation text, it is need to identify the classification labels of all sentences in the reference paper. There are two classification labels: \"1\" or \"0\". If \"1\", it means that the sentence belongs to the correct reference text. If \"0\", it means that the sentence is not. Figure 2 shows our research framework of Task 1A. Firstly, preprocessing is conducted for the data extracted from data set. Secondly, training data is constructed by negative sampling. Then, nine similarities are calculated between citation text and candidate sentences, which are used as features to construct input of traditional machine learning models. Additionally, MLP model is built based on sentence vector. Finally, these models are evaluated with Precision (P), Recall (R), and F1value (F1). Negative sampling: 753 pairs of citation text and reference text are extracted from annotation in \"Training-Set-2018\", and they are used as positive samples (label \"1\"). Citation text and other arbitrary sentences in reference papers can be regarded as negative samples (label \"0\"), but the number of negative samples is too huge. In order to balance positive and negative samples, negative sampling based on sentence vector similarity is performed. We calculate the average of all word vectors in the sentence and obtain a new vector to represent the sentence. Then, cosine similarities are calculated between the citation text and all sentences in reference paper (apart from the reference text annotated). Next, sentences are chosen from the highest, lowest, and middle similarity levels to form negative samples. Through comparative experiments, the ratio of the number of positive to negative samples is finally determined as 1:6 (two sentences with the highest similarity, two sentences with the lowest similarity, and two sentences with medium similarity as negative samples).", "cite_spans": [], "ref_spans": [ { "start": 435, "end": 443, "text": "Figure 2", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "Using traditional machine learning models to identify reference text: The first idea is to use traditional machine learning methods to solve Task 1A. We calculate multiple similarities between citation text and candidate sentences as features. It is worth noting that candidate sentences contain reference text and 6 negative samples, citation text and reference text are regarded as a whole respectively to calculate their sentence vectors. Nine similarity indicators are selected and they are showed in Table 2 . Then several machine learning models are trained for classification. These models contain Support Vector Machine (SVM) (Cortes and Vapnik, 1995) , Naive Bayesian (NB) (McCallum et al., 1998) , K-Nearest Neighbor (KNN) (Altman, 1992) , Decision Tree (DT) (Quinlan, 1987) , Random Forest (RF) (Ho, 1995) and ensemble learning tool (Xgboost 2 ). Jaccard similarity Segment setence1 and setence2 into set of words, denoted as s1 and s2 respectively, and calculate the division of the intersection and union between two sets. Its formulation is as follows:", "cite_spans": [ { "start": 634, "end": 659, "text": "(Cortes and Vapnik, 1995)", "ref_id": "BIBREF5" }, { "start": 682, "end": 705, "text": "(McCallum et al., 1998)", "ref_id": "BIBREF19" }, { "start": 733, "end": 747, "text": "(Altman, 1992)", "ref_id": "BIBREF3" }, { "start": 769, "end": 784, "text": "(Quinlan, 1987)", "ref_id": "BIBREF25" }, { "start": 806, "end": 816, "text": "(Ho, 1995)", "ref_id": "BIBREF8" } ], "ref_spans": [ { "start": 505, "end": 512, "text": "Table 2", "ref_id": "TABREF1" } ], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "J( 1 , 2 )= ( 1 \u2229 2 ) ( 1 ) + ( 2 ) \u2212 ( 1 \u2229 2 )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "Dice similarity Segment setence1 and setence2 into sets of words( 1 , 2 ). Its formulation is as follows:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "2 * ( 1 , 2 ) \u210e( 1 ) + \u210e( 2 )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "Word Overlap Segment setence1 and setence2 into sets of words, and calculate the number of overlaps between them.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task 1A based on negative sampling", "sec_num": "3.1" }, { "text": "Segment setence1 and setence2 into sets of bigrams, and calculate the number of overlaps between them.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Bigram Overlap", "sec_num": null }, { "text": "Denote setence1 and setence2 as two sets of sequences with words as basic unit, find the longest subsequence (not necessarily consecutive in original sequences) common of them.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Longest Common Subsequen ce", "sec_num": null }, { "text": "Denote setence1 and setence2 as two sets of strings with words as basic units, and find the longest string(s) that is a substring(s) (required to occupy consecutive positions within the original strings) of them.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Longest Common Substring", "sec_num": null }, { "text": "2 https://github.com/dmlc/xgboost", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Longest Common Substring", "sec_num": null }, { "text": "Calculate the average of Levenshtein distance (the minimum number of single character edits required to change one to the other) for all the words between setence1 and setence2.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Levenshtei n distance", "sec_num": null }, { "text": "Represent words as lowdimensional and dense distributed representation by Word2Vec algorithm and calculate the average of the similarity between words from two sentences via cosine value.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Word2Vec similarity", "sec_num": null }, { "text": "fastText 3 similarity", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Word2Vec similarity", "sec_num": null }, { "text": "words as lowdimensional and dense distributed representation by fastText algorithm and calculate the average of the similarity between words from two sentences via cosine value.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Represent", "sec_num": null }, { "text": "The second idea is to use deep learning models. Word2Vec (Mikolov et al., 2013) and fastText are used to train word vectors. And we calculate the average of all word vectors in sentence to get sentence vectors. Vector of citation text and vector of candidate sentence are concatenated as input of models. We build MLP model and adjust hidden layers and parameters for optimization. The framework of MLP model is shown in Figure 3 . The input of the model is concatenated sentence vector from citation text and reference text. Concatenated sentence vector passes through two hidden layers, and then passes through the sigmoid layer. We get the probability of two labels through the output layer and set a threshold to determine which label the candidate sentence belongs to. It should be noted that the activation function of the hidden layer is Relu, and the number of neural nodes is 128 and 64 respectively. These parameters are finally determined based on comparative experiments.", "cite_spans": [ { "start": 57, "end": 79, "text": "(Mikolov et al., 2013)", "ref_id": "BIBREF22" } ], "ref_spans": [ { "start": 421, "end": 429, "text": "Figure 3", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Using MLP model to identify reference text:", "sec_num": null }, { "text": "In Task 1B, it is a multi-label classification task. There are five labels (facets): \"Method_Citation\", \"Result_Citation\", \"Aim_Citation\", \"Implication_Citation\", \"Hypothesis_Citation\". The research framework of Task 1B is shown in Figure 4 . Firstly, 753 pairs of citation text and reference text is extracted from data set. Secondly, training set and test set are split from the extracted data by sampling. Then, sentence vectors are generated from word frequency and word vector based on which traditional machine learning models are used to classify the facets. In addition, the word embedding matrix is used as input, and deep learning models are also applied in Task 1B. In order to test the effects of different models, accuracy score is used. Data sampling: The number of samples in five facets varies greatly (see Figure 5) . Training set and test set should not be divided from all the samples directly. In order to balance all kinds of samples in training set and test set, we randomly select 80% of samples from each label to form training set, and the remaining 20% of the samples are used as test set. Using traditional machine learning models to identify the facets based on sentence vector: As illustrated in the framework, traditional machine learning models are employed in Task 1B based on the input of sentence vectors. By the way, sentence vectors are generated from word frequency and word vector separately. In the first way, nouns, verbs, adverbs, adjectives are selected after part-ofspeech tagging. Then, sentence vectors are generated by One-hot or TF (Term Frequency) based on the selected words. In the second way, fastText and BERT 4 are used to train word vector. And we calculate the average of all word vectors in the sentence to generate the sentence vector. After that, traditional machine learning models introduced in Task 1A are used for the multi-label classification. Besides, we add another ensemble learning tool LightGBM 5 . During testing, if the model cannot assign a label to a sample, we will set the sample's label to \"Method_Citation\".\".", "cite_spans": [], "ref_spans": [ { "start": 232, "end": 240, "text": "Figure 4", "ref_id": "FIGREF3" }, { "start": 823, "end": 832, "text": "Figure 5)", "ref_id": "FIGREF4" } ], "eq_spans": [], "section": "Task 1B based on sentence vector and word embedding", "sec_num": "3.2" }, { "text": "Using deep learning models to identify the facets based on word embedding: We also build deep learning models for the multi-label classification in Task 1B. In this scheme, word embedding matrix is used as input. Long Short-Term Memory (LSTM) (Hochreiter & Schmidhuber, 1997) and Recurrent Neural Network (RNN) (Rumelhart et al., 1986) are applied in the feature selection layer separately. They convert the word embedding matrix into a 128-dimensional vector. Then the vector passes through a hidden layer, and we get the probabilities that the sample belongs to five labels. When the probability is greater than 0.5, we assign the corresponding label to the sample. If the sample fails to obtain a label, we set its label to \"Method_Citation\". In Figure 5 , We build an MLP model for Task 1B. The word embedding matrix is flatted into a vector, and the vector pass through two hidden layers. Finally, the model outputs the probabilities that the sample belongs to five labels.", "cite_spans": [ { "start": 243, "end": 275, "text": "(Hochreiter & Schmidhuber, 1997)", "ref_id": "BIBREF9" }, { "start": 311, "end": 335, "text": "(Rumelhart et al., 1986)", "ref_id": "BIBREF27" } ], "ref_spans": [ { "start": 749, "end": 757, "text": "Figure 5", "ref_id": "FIGREF4" } ], "eq_spans": [], "section": "Task 1B based on sentence vector and word embedding", "sec_num": "3.2" }, { "text": "In Task 2, we select sentences from reference text by calculating cosine similarity between the sentence and the original abstract to generate abstract. The steps are as follows: a. Word vector is trained by fastText. b. Sentence vectors of reference sentences (identified in Task 1A) and the original abstract are generated by calculating the average of vectors of all words in the sentence. c. Calculate cosine similarity between reference sentences and the original abstract based on their sentence vectors. d. Select sentences according to their similarities to generate summary, and length of the summary does not exceed 250 words.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Task 2 based on sentence similarity", "sec_num": "3.3" }, { "text": "In this section, we report the results of different models in Task 1A and Task 1B.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments and results analysis", "sec_num": "4" }, { "text": "For task 1A, we use nine similarities as features and applied traditional machine learning models to identify reference text. MLP model is also employed based on the input of sentence vector. In this section, we report and analysis the results of these models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental result of Task 1A", "sec_num": "4.1" }, { "text": "Results of traditional machine learning models: Input of sentence vector is generated based on nine similarities. And five classification models in Scikit-learn 6 : Random Forest, Decision Tree, SVM, NB, KNN are applied. In addition, ensemble learning model by Xgboost is employed. Precision, Recall, and F1-value are used to evaluate their performance. The results of 5-fold cross validation are shown in Table 3 . From Table 3 , we can see that ensemble learning method by Xgboost achieves the optimal F1-value.", "cite_spans": [], "ref_spans": [ { "start": 406, "end": 413, "text": "Table 3", "ref_id": "TABREF2" }, { "start": 421, "end": 428, "text": "Table 3", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "Experimental result of Task 1A", "sec_num": "4.1" }, { "text": "Results of MLP model: Word vectors are trained through two tools: Word2Vec and fastText. The training corpus consists of two parts:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental result of Task 1A", "sec_num": "4.1" }, { "text": "(1) Full-text of reference papers and citing papers from \"Training-Set-2018\". (2) Full-text of reference papers from \"ScisummNet-2019\". The vector dimension is set to 200. Through comparative experiments, we finally determined the optimal parameter settings under these two kinds of word vector, as shown in Table 4 . The evaluation results of these two models are shown in Table 5 . As surfaced in Table 5 , the results based on fastText is better than Word2Vec. F1-value of the best result is 0.64. Compared with the results of machine learning models, MLP works better.", "cite_spans": [], "ref_spans": [ { "start": 308, "end": 315, "text": "Table 4", "ref_id": "TABREF3" }, { "start": 374, "end": 381, "text": "Table 5", "ref_id": "TABREF4" }, { "start": 399, "end": 406, "text": "Table 5", "ref_id": "TABREF4" } ], "eq_spans": [], "section": "Experimental result of Task 1A", "sec_num": "4.1" }, { "text": "But when we use the trained models to identify the sentences in reference papers for citation text, the models output far more than 5 sentences. In order to ensure the effect of the final test, we develop a sentence filtering strategy in reference papers: a. We pick out nouns in citation text and sentences of reference papers. b. In reference paper, sentences with the same noun as citation text are filtered out. c. We use trained models to identify the filtered sentences. Because we find that 609 of the 753 pairs of citation text and reference text have the same nouns. d. When the final test, if there is no sentence with the same noun as citation text in the reference paper, we will test all sentences in the reference paper.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental result of Task 1A", "sec_num": "4.1" }, { "text": "For Task 1B, sentence vector and word embedding matrix are used as input. Then traditional machine learning models and deep learning models are applied for the multi-label classification. Now, we report and analysis the results of these models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental results of Task 1B", "sec_num": "4.2" }, { "text": "Accuracy score of traditional machine learning models based on one-hot: Sentence vectors are generated by one-hot in two ways. 1Nouns, verbs, adverbs and adjectives are only selected in citation text. (2) Nouns, verbs, adverbs and adjectives are selected in both citation text and reference text. Many machine learning models in Scikit-learn and ensemble learning models by Xgboost and LightGBM are applied for classification. Accuracy score is used to evaluate these models. Random Forest and two ensemble models work better, and their accuracy scores are demonstrated in Table 6 . Table 6 , when sentence vectors are generated by One-hot based on citation text and reference text, Random Forest works better and its accuracy score is 0.8025.", "cite_spans": [], "ref_spans": [ { "start": 573, "end": 580, "text": "Table 6", "ref_id": "TABREF5" }, { "start": 583, "end": 590, "text": "Table 6", "ref_id": "TABREF5" } ], "eq_spans": [], "section": "Experimental results of Task 1B", "sec_num": "4.2" }, { "text": "We also use TF to generate vectors in two ways: citation text, citation text and reference text. Evaluation results of Random Forest, Xgboost and LightGBM are shown in Table 7 . As suggested in Table 7 , when sentence vectors are generated by TF based on citation text and reference text, Random Forest and LightGBM achieve higher accuracy score.", "cite_spans": [], "ref_spans": [ { "start": 168, "end": 175, "text": "Table 7", "ref_id": "TABREF6" }, { "start": 194, "end": 201, "text": "Table 7", "ref_id": "TABREF6" } ], "eq_spans": [], "section": "Accuracy score of traditional machine learning models based on TF (Term Frequency):", "sec_num": null }, { "text": "Accuracy_score of traditional machine learning models based on fastText: Sentence vectors are generated based on fastText word vector. Sentence vector of citation text is recorded as v1 = (x1, x2 ... xn) , and sentence vector of reference text is recorded as v2 = (y1, y2 ... yn). We also calculate |v1-v2| = (|x1-y1|, |x2-y2| ... |xn-yn|) and v1*v2 = (x1*y1, x2*y2 ... xn*yn) . We make four combinations of v1 and v2: a. (v1, v2) = (x1, x2 ... xn, y1, y2 ... yn) b. (v1, v2, |v1-v2|) = (x1, x2 ... xn, y1, y2 ... yn, |x1-y1|, |x2-y2| ... |xn-yn|) c. (v1, v2, v1*v2) = (x1, x2 ... xn, y1, y2 ... yn, x1*y1, x2*y2 ... xn*yn) d. (v1, v2, |v1-v2|, v1*v2) = (x1, x2 ... xn, y1, y2 ... yn, |x1-y1|, |x2-y2| ... |xn-yn|, x1*y1, x2*y2 ... xn*yn) In each combination, vectors are concatenated as input of different models. Evaluation results of Random Forest, Xgboost and LightGBM are shown in Figure 7 . As shown in Figure 7 , under different conditions, LightGBM performs better than the other two models. When v1, v2, |v1-v2| and v1*v2 are concatenated as input, LightGBM reaches the highest accuracy score (0.8280).", "cite_spans": [ { "start": 186, "end": 203, "text": "= (x1, x2 ... xn)", "ref_id": null }, { "start": 299, "end": 376, "text": "|v1-v2| = (|x1-y1|, |x2-y2| ... |xn-yn|) and v1*v2 = (x1*y1, x2*y2 ... xn*yn)", "ref_id": null }, { "start": 422, "end": 738, "text": "(v1, v2) = (x1, x2 ... xn, y1, y2 ... yn) b. (v1, v2, |v1-v2|) = (x1, x2 ... xn, y1, y2 ... yn, |x1-y1|, |x2-y2| ... |xn-yn|) c. (v1, v2, v1*v2) = (x1, x2 ... xn, y1, y2 ... yn, x1*y1, x2*y2 ... xn*yn) d. (v1, v2, |v1-v2|, v1*v2) = (x1, x2 ... xn, y1, y2 ... yn, |x1-y1|, |x2-y2| ... |xn-yn|, x1*y1, x2*y2 ... xn*yn)", "ref_id": null } ], "ref_spans": [ { "start": 886, "end": 894, "text": "Figure 7", "ref_id": "FIGREF6" }, { "start": 909, "end": 917, "text": "Figure 7", "ref_id": "FIGREF6" } ], "eq_spans": [], "section": "Accuracy score of traditional machine learning models based on TF (Term Frequency):", "sec_num": null }, { "text": "Accuracy score of traditional machine learning models based on BERT: We train word vector by BERT and calculate sentence vectors. Evaluation results of three models are shown in Figure 8 . As illustrated in Figure 8 , under different conditions, Xgboost performs better than the other two models. When v1, v2, and |v1-v2| are concatenated as input, Xgboost get the highest accuracy score (0.8217). But its performance is slightly worse than LightGBM with fastText word vectors (see Figure 7) .", "cite_spans": [], "ref_spans": [ { "start": 178, "end": 186, "text": "Figure 8", "ref_id": "FIGREF7" }, { "start": 207, "end": 215, "text": "Figure 8", "ref_id": "FIGREF7" }, { "start": 482, "end": 491, "text": "Figure 7)", "ref_id": "FIGREF6" } ], "eq_spans": [], "section": "Accuracy score of traditional machine learning models based on TF (Term Frequency):", "sec_num": null }, { "text": "Accuracy score of deep learning models based on word embedding: Word vectors trained by fastText and BERT are used to construct word embedding matrix of citation text and reference text. Then three deep learning models: LSTM, RNN and MLP are applied with the input of word embedding matrix. Accuracy score of the three models are shown in Figure 9 . Figure 9 , we can see that MLP performs best among the three models. But its accuracy score is lower than the previous results of LightGBM and Xgboost (see Figure 7 and Figure 8 ).", "cite_spans": [], "ref_spans": [ { "start": 339, "end": 347, "text": "Figure 9", "ref_id": "FIGREF8" }, { "start": 350, "end": 358, "text": "Figure 9", "ref_id": "FIGREF8" }, { "start": 506, "end": 514, "text": "Figure 7", "ref_id": "FIGREF6" }, { "start": 519, "end": 528, "text": "Figure 8", "ref_id": "FIGREF7" } ], "eq_spans": [], "section": "Accuracy score of traditional machine learning models based on TF (Term Frequency):", "sec_num": null }, { "text": "In Task 1A, training data and test data are constructed by negative sampling. And the ratio of positive to negative examples has been optimized. Next, we use deep learning model (MLP) with the input of sentence vectors and traditional machine learning models based on nine similarity features to identify the reference text. The effect of MLP is proved to be better than that of traditional machine learning models. As for Task 1B, we calculate different combinations of sentence vectors as input. Traditional machine learning models and deep learning models have been evaluated on classifying the facets of reference text. In this process, the effect of using pre-training model (BERT) to obtain word vector is worse than that of using fastText to train word vector based on training set. And traditional machine models (LightGBM and Xgboost) work better than deep learning models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and future work", "sec_num": "5" }, { "text": "Generally, word vectors can reflect more semantic information compared to traditional machine learning features. We create a suitable number of training data by negative sampling in Task 1A, so deep learning model (MLP) works better. While in Task 1B, insufficient training data makes deep learning models inferior to traditional machine learning models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and future work", "sec_num": "5" }, { "text": "In future work, we can optimize training set through Data Augmentation Technology and apply other deep learning models for Task 1A. As for Task 1B, its recognition result is affected by the imbalance of data. We will try to expand the training data for the facets with smaller data scale from other data sources, such as structured abstract.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion and future work", "sec_num": "5" }, { "text": "https://github.com/facebookresearch/fastText", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "This work is supported by National Natural Science Foundation of China (Grant No. 72074113).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgements", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "A context-based investigation into source use by information seekers", "authors": [ { "first": "N", "middle": [ "K" ], "last": "Agarwal", "suffix": "" }, { "first": "Y", "middle": [], "last": "Xu", "suffix": "" }, { "first": ")", "middle": [], "last": "Calvin", "suffix": "" }, { "first": "D", "middle": [ "C C" ], "last": "Poo", "suffix": "" } ], "year": 2011, "venue": "Journal of the American Society for Information Science and Technology", "volume": "62", "issue": "6", "pages": "1087--1104", "other_ids": {}, "num": null, "urls": [], "raw_text": "Agarwal, N. K., Xu, Y. (Calvin), & Poo, D. C. C. (2011). A context-based investigation into source use by information seekers. Journal of the American Society for Information Science and Technology, 62(6), 1087-1104.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Lexical and Syntactic cues to identify Reference Scope of Citance", "authors": [ { "first": "P", "middle": [], "last": "Aggarwal", "suffix": "" }, { "first": "R", "middle": [], "last": "Sharma", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL)", "volume": "", "issue": "", "pages": "103--112", "other_ids": {}, "num": null, "urls": [], "raw_text": "Aggarwal, P., & Sharma, R. (2016). Lexical and Syntactic cues to identify Reference Scope of Citance. Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL), 103-112.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "An introduction to kernel and nearest-neighbor nonparametric regression", "authors": [ { "first": "N", "middle": [ "S" ], "last": "Altman", "suffix": "" } ], "year": 1992, "venue": "The American Statistician", "volume": "46", "issue": "3", "pages": "175--185", "other_ids": {}, "num": null, "urls": [], "raw_text": "Altman, N. S. (1992). An introduction to kernel and nearest-neighbor nonparametric regression. The American Statistician, 46(3), 175-185.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Scientific document summarization via citation contextualization and scientific discourse", "authors": [ { "first": "A", "middle": [], "last": "Cohan", "suffix": "" }, { "first": "N", "middle": [], "last": "Goharian", "suffix": "" } ], "year": 2018, "venue": "International Journal on Digital Libraries", "volume": "19", "issue": "2-3", "pages": "287--303", "other_ids": {}, "num": null, "urls": [], "raw_text": "Cohan, A., & Goharian, N. (2018). Scientific document summarization via citation contextualization and scientific discourse. International Journal on Digital Libraries, 19(2-3), 287-303.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Support-vector networks", "authors": [ { "first": "C", "middle": [], "last": "Cortes", "suffix": "" }, { "first": "V", "middle": [], "last": "Vapnik", "suffix": "" } ], "year": 1995, "venue": "Machine Learning", "volume": "20", "issue": "3", "pages": "273--297", "other_ids": { "DOI": [ "10.1007/BF00994018" ] }, "num": null, "urls": [], "raw_text": "Cortes, C., & Vapnik, V. (1995). Support-vector networks. Machine Learning, 20(3), 273-297. https://doi.org/10.1007/BF00994018", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Learning from Imbalanced Data", "authors": [ { "first": "H", "middle": [], "last": "He", "suffix": "" }, { "first": "E", "middle": [ "A" ], "last": "Garcia", "suffix": "" } ], "year": 2009, "venue": "IEEE Transactions on Knowledge and Data Engineering", "volume": "21", "issue": "9", "pages": "1263--1284", "other_ids": { "DOI": [ "10.1109/TKDE.2008.239" ] }, "num": null, "urls": [], "raw_text": "He, H., & Garcia, E. A. (2009). Learning from Imbalanced Data. IEEE Transactions on Knowledge and Data Engineering, 21(9), 1263-1284. https://doi.org/10.1109/TKDE.2008.239", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "ADASYN: Adaptive synthetic sampling approach learning", "authors": [ { "first": "H", "middle": [], "last": "He", "suffix": "" }, { "first": "Yang", "middle": [], "last": "Bai", "suffix": "" }, { "first": "E", "middle": [ "A" ], "last": "Garcia", "suffix": "" }, { "first": "", "middle": [], "last": "Shutao Li", "suffix": "" } ], "year": 2008, "venue": "IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence", "volume": "", "issue": "", "pages": "1322--1328", "other_ids": { "DOI": [ "10.1109/IJCNN.2008.4633969" ] }, "num": null, "urls": [], "raw_text": "He, H., Yang Bai, Garcia, E. A., & Shutao Li. (2008). ADASYN: Adaptive synthetic sampling approach learning. 2008 IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), 1322- 1328. https://doi.org/10.1109/IJCNN.2008.4633969", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Random decision forests", "authors": [ { "first": "T", "middle": [ "K" ], "last": "Ho", "suffix": "" } ], "year": 1995, "venue": "Proceedings of 3rd International Conference on Document Analysis and Recognition", "volume": "1", "issue": "", "pages": "278--282", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ho, T. K. (1995). Random decision forests. Proceedings of 3rd International Conference on Document Analysis and Recognition, 1, 278-282.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Long Short-Term Memory", "authors": [ { "first": "S", "middle": [], "last": "Hochreiter", "suffix": "" }, { "first": "J", "middle": [], "last": "Schmidhuber", "suffix": "" } ], "year": 1997, "venue": "Neural Comput", "volume": "9", "issue": "8", "pages": "1735--1780", "other_ids": { "DOI": [ "10.1162/neco.1997.9.8.1735" ] }, "num": null, "urls": [], "raw_text": "Hochreiter, S., & Schmidhuber, J. (1997). Long Short- Term Memory. Neural Comput., 9(8), 1735-1780. https://doi.org/10.1162/neco.1997.9.8.1735", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "The CL-SciSumm Shared Task 2017: Results and Key Insights. BIRNDL@ SIGIR", "authors": [ { "first": "K", "middle": [], "last": "Jaidka", "suffix": "" }, { "first": "M", "middle": [ "K" ], "last": "Chandrasekaran", "suffix": "" }, { "first": "D", "middle": [], "last": "Jain", "suffix": "" }, { "first": "M.-Y", "middle": [], "last": "Kan", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jaidka, K., Chandrasekaran, M. K., Jain, D., & Kan, M.-Y. (2017). The CL-SciSumm Shared Task 2017: Results and Key Insights. BIRNDL@ SIGIR (2).", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "CIST@ CLSciSumm-19: Automatic Scientific Paper Summarization with Citances and Facets. BIRNDL@ SIGIR", "authors": [ { "first": "L", "middle": [], "last": "Li", "suffix": "" }, { "first": "Y", "middle": [], "last": "Zhu", "suffix": "" }, { "first": "Y", "middle": [], "last": "Xie", "suffix": "" }, { "first": "Z", "middle": [], "last": "Huang", "suffix": "" }, { "first": "W", "middle": [], "last": "Liu", "suffix": "" }, { "first": "X", "middle": [], "last": "Li", "suffix": "" }, { "first": "Y", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "196--207", "other_ids": {}, "num": null, "urls": [], "raw_text": "Li, L., Zhu, Y., Xie, Y., Huang, Z., Liu, W., Li, X., & Liu, Y. (2019). CIST@ CLSciSumm-19: Automatic Scientific Paper Summarization with Citances and Facets. BIRNDL@ SIGIR, 196-207.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Recognizing reference spans and classifying their discourse facets", "authors": [ { "first": "K", "middle": [], "last": "Lu", "suffix": "" }, { "first": "J", "middle": [], "last": "Mao", "suffix": "" }, { "first": "G", "middle": [], "last": "Li", "suffix": "" }, { "first": "J", "middle": [], "last": "Xu", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL)", "volume": "", "issue": "", "pages": "139--145", "other_ids": {}, "num": null, "urls": [], "raw_text": "Lu, K., Mao, J., Li, G., & Xu, J. (2016). Recognizing reference spans and classifying their discourse facets. Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL), 139-145.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "NJUST @ CLSciSumm-17", "authors": [ { "first": "S", "middle": [], "last": "Ma", "suffix": "" }, { "first": "J", "middle": [], "last": "Xu", "suffix": "" }, { "first": "J", "middle": [], "last": "Wang", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ma, S., Xu, J., Wang, J., & Zhang, C. (2017). NJUST @ CLSciSumm-17. Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL 2017).", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Automatic identification of cited text spans: A multi-classifier approach over imbalanced dataset", "authors": [ { "first": "S", "middle": [], "last": "Ma", "suffix": "" }, { "first": "J", "middle": [], "last": "Xu", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2018, "venue": "Scientometrics", "volume": "116", "issue": "2", "pages": "1303--1330", "other_ids": { "DOI": [ "10.1007/s11192-018-2754-2" ] }, "num": null, "urls": [], "raw_text": "Ma, S., Xu, J., & Zhang, C. (2018). Automatic identification of cited text spans: A multi-classifier approach over imbalanced dataset. Scientometrics, 116(2), 1303-1330. https://doi.org/10.1007/s11192- 018-2754-2", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "NJUST @ CLSciSumm-18. BIRNDL@ SIGIR", "authors": [ { "first": "S", "middle": [], "last": "Ma", "suffix": "" }, { "first": "H", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "J", "middle": [], "last": "Xu", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ma, S., Zhang, H., Xu, J., & Zhang, C. (2018). NJUST @ CLSciSumm-18. BIRNDL@ SIGIR.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "A comparison of event models for naive bayes text classification", "authors": [ { "first": "A", "middle": [], "last": "Mccallum", "suffix": "" }, { "first": "K", "middle": [], "last": "Nigam", "suffix": "" } ], "year": 1998, "venue": "Proceedings of the AAAI-98", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "McCallum, A., Nigam, K., & others. (1998). A comparison of event models for naive bayes text classification. Proceedings of the AAAI-98", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Generating impact-based summaries for scientific literature", "authors": [ { "first": "Q", "middle": [], "last": "Mei", "suffix": "" }, { "first": "C", "middle": [], "last": "Zhai", "suffix": "" } ], "year": 2008, "venue": "Proceedings of ACL-08: HLT", "volume": "", "issue": "", "pages": "816--824", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mei, Q., & Zhai, C. (2008). Generating impact-based summaries for scientific literature. Proceedings of ACL-08: HLT, 816-824.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Efficient Estimation of Word Representations in Vector Space", "authors": [ { "first": "T", "middle": [], "last": "Mikolov", "suffix": "" }, { "first": "K", "middle": [], "last": "Chen", "suffix": "" }, { "first": "G", "middle": [], "last": "Corrado", "suffix": "" }, { "first": "J", "middle": [], "last": "Dean", "suffix": "" } ], "year": 2013, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mikolov, T., Chen, K., Corrado, G., & Dean, J. (2013). Efficient Estimation of Word Representations in Vector Space. ArXiv:1301.3781 [Cs].", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "University of Houston at CL-SciSumm 2016: SVMs with tree kernels and Sentence Similarity", "authors": [ { "first": "L", "middle": [], "last": "Moraes", "suffix": "" }, { "first": "S", "middle": [], "last": "Baki", "suffix": "" }, { "first": "R", "middle": [], "last": "Verma", "suffix": "" }, { "first": "D", "middle": [], "last": "Lee", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the Joint Workshop on Bibliometric-Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL)", "volume": "", "issue": "", "pages": "113--121", "other_ids": {}, "num": null, "urls": [], "raw_text": "Moraes, L., Baki, S., Verma, R., & Lee, D. (2016). University of Houston at CL-SciSumm 2016: SVMs with tree kernels and Sentence Similarity. Proceedings of the Joint Workshop on Bibliometric- Enhanced Information Retrieval and Natural Language Processing for Digital Libraries (BIRNDL), 113-121.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Scientific paper summarization using citation summary networks", "authors": [ { "first": "V", "middle": [], "last": "Qazvinian", "suffix": "" }, { "first": "D", "middle": [ "R" ], "last": "Radev", "suffix": "" } ], "year": 2008, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Qazvinian, V., & Radev, D. R. (2008). Scientific paper summarization using citation summary networks. ArXiv Preprint ArXiv:0807.1560.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Simplifying decision trees", "authors": [ { "first": "J", "middle": [ "R" ], "last": "Quinlan", "suffix": "" } ], "year": 1987, "venue": "International Journal of Man-Machine Studies", "volume": "27", "issue": "3", "pages": "221--234", "other_ids": {}, "num": null, "urls": [], "raw_text": "Quinlan, J. R. (1987). Simplifying decision trees. International Journal of Man-Machine Studies, 27(3), 221-234.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Introduction to the special issue on summarization", "authors": [ { "first": "D", "middle": [ "R" ], "last": "Radev", "suffix": "" }, { "first": "E", "middle": [], "last": "Hovy", "suffix": "" }, { "first": "K", "middle": [], "last": "Mckeown", "suffix": "" } ], "year": 2002, "venue": "Computational Linguistics", "volume": "28", "issue": "4", "pages": "399--408", "other_ids": {}, "num": null, "urls": [], "raw_text": "Radev, D. R., Hovy, E., & McKeown, K. (2002). Introduction to the special issue on summarization. Computational Linguistics, 28(4), 399-408.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Learning representations by backpropagating errors", "authors": [ { "first": "D", "middle": [ "E" ], "last": "Rumelhart", "suffix": "" }, { "first": "G", "middle": [ "E" ], "last": "Hinton", "suffix": "" }, { "first": "R", "middle": [ "J" ], "last": "Williams", "suffix": "" } ], "year": 1986, "venue": "Nature", "volume": "323", "issue": "6088", "pages": "533--536", "other_ids": { "DOI": [ "10.1038/323533a0" ] }, "num": null, "urls": [], "raw_text": "Rumelhart, D. E., Hinton, G. E., & Williams, R. J. (1986). Learning representations by back- propagating errors. Nature, 323(6088), 533-536. https://doi.org/10.1038/323533a0", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Discourse and citation analysis with conceptmatching", "authors": [ { "first": "\u00c1", "middle": [], "last": "S\u00e1ndor", "suffix": "" }, { "first": "A", "middle": [], "last": "Kaplan", "suffix": "" }, { "first": "G", "middle": [], "last": "Rondeau", "suffix": "" } ], "year": 2006, "venue": "International Symposium: Discourse and Document (ISDD)", "volume": "", "issue": "", "pages": "15--16", "other_ids": {}, "num": null, "urls": [], "raw_text": "S\u00e1ndor, \u00c1., Kaplan, A., & Rondeau, G. (2006). Discourse and citation analysis with concept- matching. International Symposium: Discourse and Document (ISDD), 15-16.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Analysis of reference relationships among research papers, based on citation context", "authors": [ { "first": "W", "middle": [], "last": "Wang", "suffix": "" }, { "first": "P", "middle": [], "last": "Villavicencio", "suffix": "" }, { "first": "T", "middle": [], "last": "Watanabe", "suffix": "" } ], "year": 2012, "venue": "International Journal on Artificial Intelligence Tools", "volume": "21", "issue": "02", "pages": "", "other_ids": { "DOI": [ "10.1142/S0218213012400040" ] }, "num": null, "urls": [], "raw_text": "Wang, W., Villavicencio, P., & Watanabe, T. (2012). Analysis of reference relationships among research papers, based on citation context. International Journal on Artificial Intelligence Tools, 21(02), 1240004. https://doi.org/10.1142/S0218213012400040", "links": null }, "BIBREF30": { "ref_id": "b30", "title": "Reference Scope Identification for Citances by Classification with Text Similarity Measures", "authors": [ { "first": "J.-Y", "middle": [], "last": "Yeh", "suffix": "" }, { "first": "T.-Y", "middle": [], "last": "Hsu", "suffix": "" }, { "first": "C.-J", "middle": [], "last": "Tsai", "suffix": "" }, { "first": "P.-C", "middle": [], "last": "Cheng", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the 6th International Conference on Software and Computer Applications", "volume": "", "issue": "", "pages": "87--91", "other_ids": { "DOI": [ "10.1145/3056662.3056692" ] }, "num": null, "urls": [], "raw_text": "Yeh, J.-Y., Hsu, T.-Y., Tsai, C.-J., & Cheng, P.-C. (2017). Reference Scope Identification for Citances by Classification with Text Similarity Measures. Proceedings of the 6th International Conference on Software and Computer Applications, 87-91. https://doi.org/10.1145/3056662.3056692", "links": null } }, "ref_entries": { "FIGREF0": { "num": null, "text": "Citation text in citing paper and reference text in reference paper", "type_str": "figure", "uris": null }, "FIGREF1": { "num": null, "text": "Framework of Task 1A", "type_str": "figure", "uris": null }, "FIGREF2": { "num": null, "text": "Framework of MLP model in Task 1A", "type_str": "figure", "uris": null }, "FIGREF3": { "num": null, "text": "Framework of Task 1B", "type_str": "figure", "uris": null }, "FIGREF4": { "num": null, "text": "Number of samples in each label 4 https://github.com/google-research/bert", "type_str": "figure", "uris": null }, "FIGREF5": { "num": null, "text": "Framework of MLP model in Task 1B 5 https://github.com/microsoft/", "type_str": "figure", "uris": null }, "FIGREF6": { "num": null, "text": "Evaluation results of models based on fastText", "type_str": "figure", "uris": null }, "FIGREF7": { "num": null, "text": "Evaluation results of models based on BERT 0", "type_str": "figure", "uris": null }, "FIGREF8": { "num": null, "text": "Evaluation results of deep learning modelsFrom", "type_str": "figure", "uris": null }, "TABREF0": { "num": null, "text": "", "html": null, "type_str": "table", "content": "" }, "TABREF1": { "num": null, "text": "Nine similarities as features Similarity Description", "html": null, "type_str": "table", "content": "
" }, "TABREF2": { "num": null, "text": "Evaluation results of models", "html": null, "type_str": "table", "content": "
ModelPRF1
Xgboost0.51240.54490.5280
Random Forest0.67320.40870.5084
Decision Tree0.46800.44420.4550
SVM0.64150.31680.4230
6 https://scikit-learn.org/stable/index.html
" }, "TABREF3": { "num": null, "text": "Parameters of MLP models", "html": null, "type_str": "table", "content": "
ModelMLP_FTMLP_FT
Word vectorfastTextWord2Vec
OptimizeradamRMSprop
Lossbinary_cross entropymse
Epoch2020
HiddenRule (128)Rule (128)
layerRule (64)Rule (64)
Threshold0.5770.602
" }, "TABREF4": { "num": null, "text": "Evaluation results of MLP models", "html": null, "type_str": "table", "content": "
ModelPRF1
MLP_FT0.64860.63160.6400
MLP_W2Vs0.64280.56840.6034
" }, "TABREF5": { "num": null, "text": "Evaluation results of models based on One-hotFrom", "html": null, "type_str": "table", "content": "" }, "TABREF6": { "num": null, "text": "Evaluation results of models based on TF", "html": null, "type_str": "table", "content": "
" } } } }