|
{ |
|
"paper_id": "Y16-2009", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:47:34.727032Z" |
|
}, |
|
"title": "Supervised Word Sense Disambiguation with Sentences Similarities from Context Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Shoma", |
|
"middle": [], |
|
"last": "Yamaki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ibaraki University", |
|
"location": { |
|
"addrLine": "Sciences 4-12-1 Nakanarusawa", |
|
"postCode": "316-8511", |
|
"settlement": "Hitachi", |
|
"region": "Ibaraki JAPAN" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shinnou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ibaraki University", |
|
"location": { |
|
"addrLine": "Sciences 4-12-1 Nakanarusawa", |
|
"postCode": "316-8511", |
|
"settlement": "Hitachi", |
|
"region": "Ibaraki JAPAN" |
|
} |
|
}, |
|
"email": "hiroyuki.shinnou.0828@vc.ibaraki.ac.jp" |
|
}, |
|
{ |
|
"first": "Kanako", |
|
"middle": [], |
|
"last": "Komiya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ibaraki University", |
|
"location": { |
|
"addrLine": "Sciences 4-12-1 Nakanarusawa", |
|
"postCode": "316-8511", |
|
"settlement": "Hitachi", |
|
"region": "Ibaraki JAPAN" |
|
} |
|
}, |
|
"email": "kanako.komiya.nlp@vc.ibaraki.ac.jp" |
|
}, |
|
{ |
|
"first": "Minoru", |
|
"middle": [], |
|
"last": "Sasaki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ibaraki University", |
|
"location": { |
|
"addrLine": "Sciences 4-12-1 Nakanarusawa", |
|
"postCode": "316-8511", |
|
"settlement": "Hitachi", |
|
"region": "Ibaraki JAPAN" |
|
} |
|
}, |
|
"email": "minoru.sasaki.01@vc.ibaraki.ac.jp" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we propose a method that employs sentences similarities from context word embeddings for supervised word sense disambiguation. In particular, if N example sentences exist in training data, an N-dimensional vector with N similarities between each pair of example sentences is added to a basic feature vector. This new feature vector is used to train a classifier and identification. We evaluated the proposed method using the feature vectors based on Bag-of-Words, SemEval-2 baseline as basic feature vectors and SemEval-2 Japanese task. The experimental results suggest that the method is more effective than the method with only basic vectors.", |
|
"pdf_parse": { |
|
"paper_id": "Y16-2009", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we propose a method that employs sentences similarities from context word embeddings for supervised word sense disambiguation. In particular, if N example sentences exist in training data, an N-dimensional vector with N similarities between each pair of example sentences is added to a basic feature vector. This new feature vector is used to train a classifier and identification. We evaluated the proposed method using the feature vectors based on Bag-of-Words, SemEval-2 baseline as basic feature vectors and SemEval-2 Japanese task. The experimental results suggest that the method is more effective than the method with only basic vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Conventionally, the meaning of a word has been represented using a high-dimensional sparse Bag-of-Words (BoW) vector. Recently, there has been considerable interest in word embeddings, where words meanings are represented by low-dimensional and dense vectors using deep learning. With word embeddings, the distance between words can be measured more precisely than that provided by a vector based on the BoW model. Therefore, word embeddings has been used effectively for various natural language processing tasks. With regard to word sense disambiguation (WSD) tasks, some studies have considered that the word embeddings comprise embeddings of word senses (Chen et al., 2014) (Neelakantan et al., 2014) (Sakaizawa and Komachi, 2015) (Bhingardive et al., 2015) ;however, these studies only consider unsupervised WSD. To the best of our knowledge, the only study that addresses supervised WSD with word embeddings is by Sugawara(Sugawara et al., 2015) . In Sugawara's method, one BoW-based vector and one vector based on context word embeddings (CWE) are merged, and they are used for training a classifier and identification. The method proposed by Sugawara is more effective than the method that only uses a vector based on the BoW model. However, we have found two problems with this method. First, it restricts the position of the word in the context. Second, it includes function words. In this paper, we propose a method that addresses both problems. Specifically, if N example sentences exist in training data, an N-dimensional vector that consists of the similarities between each pair of example sentences is added to a basic feature vector. This new feature vector is used for training a classifier and identification. The similarity between sentences is calculated using CWE. This solves the first problem. In addition, the proposed method only uses content words to calculate similarities between example sentences, which solves the second problem. We used SemEval-2 Japanese task to compare Sugawara's method and the proposed method. We found that the proposed method demonstrated higher precision. Furthermore, we performed experiments with basic features used in SemEval-2 baseline system and determined that the proposed method gave better results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 658, |
|
"end": 677, |
|
"text": "(Chen et al., 2014)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 704, |
|
"text": "(Neelakantan et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 734, |
|
"text": "(Sakaizawa and Komachi, 2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 735, |
|
"end": 761, |
|
"text": "(Bhingardive et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 920, |
|
"end": 951, |
|
"text": "Sugawara(Sugawara et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Feature vectors can be created using the words around a target word in a sentence. This method can present a context of the target word with the vector in a binary representation. Therefore, unknown words cannot be handled.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding for WSD", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To address this problem, superordinate concepts in a thesaurus are used because it provides the similarities between different words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding for WSD", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Thus, using a thesaurus is effective for WSD. In this paper, we propose to increase the accuracy of WSD using word embedding as a thesaurus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding for WSD", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Sugawara's supervised WSD method represents features using one vector based on the BoW model and another vector that consists of CWE (the context is five words before and after a target word). For example, when the five words before a target word are (w \u22125 , w \u22124 , w \u22123 , w \u22122 , w \u22121 ) and the five words after the target word are (w 1 , w 2 , w 3 , w 4 , w 5 ), the features vector comprises a binary vector based on the BoW model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentences Similarities", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(1, 0, 0, 1, 0, \u2022 \u2022 \u2022 , 1) and a vector with word embeddings (v w\u22121 , v w\u22122 , \u2022 \u2022 \u2022 , v w4 , v w5 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentences Similarities", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "as shown in Figure 1 . Sugawara's experimental results suggested that word embeddings useful for WSD. However, we found following 2 problems in his method;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentences Similarities", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. It restricts a position of a word in the context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentences Similarities", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Therefore, we propose a method that uses the similarities between example sentences from word embeddings to address these problems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "It includes function words.", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The similarities between two sentences are defined as the average of the cosine of each word embedding in sentences, then i-th sentence (V i ) and jth sentence (V j ) in training data, and the similarities between V i and V j are expressed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "It includes function words.", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "V i = (v wi\u22121 , v wi\u22122 , ..., v wi4 , v wi5 ) V j = (v wj\u22121 , v wj\u22122 , ..., v wj4 , v wj5 ) sim(i, j) = 8 i L iw 8 j L jw cos(v iw , v jw ) |V i | \u2022 |V j |", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "It includes function words.", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "When only content words are used to calculate similarities, all function words are removed from", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "It includes function words.", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "V i , V j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "It includes function words.", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The proposed method employs a new features vector comprising the basic vector and a vector using the similarities between example sentences with word embedding. As mentioned previously, Sugawara's method employs a features vector comprising a vector based on the BoW model and a vector comprising CWE. However, the proposed method employs a new features vector comprising a vector based on the BoW model and a vector comprising the similarities between sentences from CWE ( Figure 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 474, |
|
"end": 482, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In our experiments, we denote the method that includes content words and function words in features words to calculate similarities as \"Proposed Method (1)\" and the method that does not include function words as \"Proposed Method (2).\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The grain size of thesaurus is the important problem in WSD (Shinnou et al., 2015) . On the other hand, concepts of words are continuance because distance between words can be calculated with word embeddings. Therefore, it is assumed that using word embedding instead of thesaurus can increase accuracy of WSD.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 82, |
|
"text": "(Shinnou et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features with Thesaurus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We implement the SemEval-2 baseline system as a general method using thesaurus. The training algorithm is linear SVM (Support Vector Machine) and features are following twenty things (PoS; Part of Speech, w i ; a word positioned in context) We use only the five character ID in thesaurus although both of the four and five character ID are used in the conventional baseline system. Moreover, the features vector for e17, e18, e19, e20 are multiple because there are several ID for one word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features with Thesaurus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "5 4 3 2 1 1 2 3 4 5 w w w w w w w w w w w \u2212 \u2212 \u2212 \u2212 \u2212 5 4 3 2 1 1 2 3 4 5 v v v v v w v v v v v \u2212 \u2212 \u2212 \u2212 \u2212 : i v i w i v i w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features with Thesaurus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "This features can be divided into two features; non-thesaurus features from e1 to e16 (std-0) and thesaurus features from e1 to e20 (std-1). We use two vectors based on std-0 and std-1 as the basic vectors to create the new features vector that the each of basic vectors and the similarities vector are merged. The new features vector are used in the experiments to confirm whether it can increase accuracy of WSD using word embeddings instead of thesaurus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features with Thesaurus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We used the SemEval-2 Japanese task in the experiments. This data consists of fifty multivocals. Fifty training data and fifty test data are for each multivocals. Both of training data and test data are adopted morpheme analysis and saved as XML format.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Set-up", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Word embeddings are 200-dimensional vectors calculated by word2vec 1 with Japanese articles in wikipedia.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Set-up", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We used the linearSVC of scikit-learn 2 to make the classifier and set its normalize parameter C to 1.0 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Set-up", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In addition, we defined content words to the words whose the part of speech is noun, verb, adjective or adverb.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Set-up", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "First, we performed an experiment to confirm that Sugawara's method is to determine whether it is valid for the SemEval-2 Japanese task. The accu- The result suggested that the method can obtain higher accuracy than the BoW.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "1 s 1 BoW 1 CWE 2 s 2 BoW 2 CWE i s i BoW i CWE N s N BoW N CWE i s i BoW ) 1 , (i sim ) 2 , (i sim ) , ( N i sim i s", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Second we performed an experiment to compare the method using the BoW+CWE and our proposed method. The result is shown in The result suggested that the proposed method can obtain better accuracy than the BoW+CWE method. It was found that the proposed method (2) has obtained higher accuracy than proposed method (1). The accuracy for each of the target words is summarized in Table 4 . The numbers in bold represents the maximum values for each of the target words, and the underlined numbers represents the number of the strictly larger by comparing the proposed method and the BoW+CWE.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 383, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Likewise, the experimental result using std-0 and std-1 as the basic vectors are shown in Table 3 features accuracy std-0 0.757 std-1 0.769 std-0 + similarities 0.761 std-1 + similarities 0.771 Table 3 : Accuracy of std-0, std-1 and similarities", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 201, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The result suggested that using the vectors comprising the each of basic vectors and the similarities vector can be obtained the higher accuracy than only using the basic vector. The accuracy for each of the target words is summarized in Table 5 7 Discussions", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 245, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We performed the experiment using the vectors based on the BoW, std-0 and std-1 as the basic vec-tors, it was found that the vector merged the basic vectors and sentence similarities vectors can produce higher accuracy than only the basic vectors. By comparing the result of BoW+CWE and the proposed method for each of the target words, the proposed method got strictly higher accuracy than the BoW+CWE in sixteen words and got lower accuracy in twelve words. Furthermore, by comparing the result of the std-0 and the proposed method, the proposed method got strictly higher accuracy than std-0 in ten words and got lower accuracy in three words. Likewise, by comparing the result of std-1 and the proposed method, the proposed method got higher accuracy in five words and got lower accuracy in one word. Therefore, the proposed method is considered to be effective in improving accuracy of WSD.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "By comparing the result of the proposed method (1) and (2) in Table 4 , the proposed method (1) got higher accuracy than the proposed method (2) in three words and got lower accuracy in four words. The accuracy rate of the method (2) was higher than the method (1) by 0.001. Therefore, we found that the superiority of the proposed method (2) was very slight.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 69, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "A purpose of this experiment is to confirm whether that using word embeddings instead of a thesaurus can improve the accuracy of WSD. According to the accuracy rate in Table 3 , the accuracy of the std-1 (0.769) is lower than the accuracy of the std-0 + similarities (0.761). This result suggested that the method using thesaurus is more effective for WSD than the method using the similarities between example sentences. However, it is assumed that the method using word embeddings instead of a thesaurus can improve the accuracy of WSD because of following reasons; there are a lot of methods other our proposing, and the quality of word embeddings depend on quality and quantity of text corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 175, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this paper, we have proposed a method that uses sentences similarities from CWE for supervised WSD. Specifically, if N example sentences exist in training data, an N-dimensional vector with N similarities between each pair of example sentences is added to a basic feature vector. We performed ex-periments with basic features used in a SemEval-2 baseline system and determined that the proposed method gave more accurate results than a previous method with only the basic features vector. The results suggested that the proposed method improves the accuracy of WSD. In future, we plan to confirm whether the method can further improve WSD by using word embeddings trained from other text corpora. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "30th Pacific Asia Conference on Language, Information and Computation (PACLIC 30)Seoul, Republic of Korea, October 28-30, 2016", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "e1=2 previous word, e2=the PoS, e3=the sub PoS, e4=1 previous word, e5=the PoS, e6=the sub PoS, e7=target word, e8=the PoS, e9=the sub PoS, e10=1 following word, e11=the PoS, e12=the sub PoS, e13=2 following word, e14=the PoS, e15=the sub PoS, e16=relation, e17=ID of 2 previous word", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://code.google.com/p/word2vec/ 2 http://scikit-learn.org/stable/index.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised Most Frequent Sense Detection using Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Bhingardive", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhirendra", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanumant", |
|
"middle": [], |
|
"last": "V Redkar Murthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Redkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "HLT-NAACL-2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1238--1243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sudha Bhingardive, Dhirendra Singh, V Redkar Murthy, Hanumant Redkar, and Pushpak Bhattacharyya. 2015. Unsupervised Most Frequent Sense Detection using Word Embeddings. In HLT-NAACL-2015, pages 1238-1243.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A Unified Model for Word Sense Representation and Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Xinxiong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP-2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1025--1035", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinxiong Chen, Zhiyuan Liu, and Maosong Sun. 2014. A Unified Model for Word Sense Representation and Disambiguation. In EMNLP-2014, pages 1025-1035.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Efficient Nonparametric Estimation of Multiple Embeddings per Word in Vector Space", |
|
"authors": [ |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeevan", |
|
"middle": [], |
|
"last": "Shankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP-2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1059--1069", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arvind Neelakantan, Jeevan Shankar, Alexandre Pas- sos, and Andrew McCallum. 2014. Efficient Non- parametric Estimation of Multiple Embeddings per Word in Vector Space. In EMNLP-2014, pages 1059- 1069.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Pragraph vector wo mochiita kyoushi nashi gogi aimaisei kaishou no kousatsu (in japanese)", |
|
"authors": [ |
|
{ |
|
"first": "Yuya", |
|
"middle": [], |
|
"last": "Sakaizawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mamoru", |
|
"middle": [], |
|
"last": "Komachi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "NLP 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuya Sakaizawa and Mamoru Komachi. 2015. Pra- graph vector wo mochiita kyoushi nashi gogi aimai- sei kaishou no kousatsu (in japanese). In NLP 2015, P1-29.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Gogi aimaisei kaishou ni okeru thesaurus riyou no mondai bunseki", |
|
"authors": [ |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shinnou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minoru", |
|
"middle": [], |
|
"last": "Sasaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kanako", |
|
"middle": [], |
|
"last": "Komiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroyuki Shinnou, Minoru Sasaki, and Kanako Komiya. 2015. Gogi aimaisei kaishou ni okeru thesaurus riyou no mondai bunseki (in japanese). In NLP 2015, P1-15.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Context Representation with Word Embeddings for WSD", |
|
"authors": [ |
|
{ |
|
"first": "Hiromu", |
|
"middle": [], |
|
"last": "Sugawara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroya", |
|
"middle": [], |
|
"last": "Takamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryohei", |
|
"middle": [], |
|
"last": "Sasano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manabu", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "PACLING-2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiromu Sugawara, Hiroya Takamura, Ryohei Sasano, and Manabu Okumura. 2015. Context Representation with Word Embeddings for WSD. In PACLING-2015, pages 149-155.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Feature vector in Sugawara's method in thesaurus, e18=ID of 1 previous word in thesaurus, e19=ID of 1 following word in thesaurus, e20=ID of 2 following word in thesaurus" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"content": "<table><tr><td>features</td><td>accuracy</td></tr><tr><td>BoW</td><td>0.716</td></tr><tr><td>BoW + CWE</td><td>0.745</td></tr></table>", |
|
"text": "Figure 2: Features vector of training data in the proposal method racy of the BoW features and the BoW+CWE features are shown inTable 1.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"content": "<table><tr><td/><td>.</td></tr><tr><td>features</td><td>accuracy</td></tr><tr><td>BoW + CWE</td><td>0.745</td></tr><tr><td>Proposal method (1)</td><td>0.753</td></tr><tr><td>Proposal method (2)</td><td>0.754</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Result of the BoW+CWE and the proposed method", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"content": "<table><tr><td>target words</td><td colspan=\"4\">BoW BoW+CWE proposed method (1) proposed method(2)</td></tr><tr><td>(aite)</td><td>0.82</td><td>0.82</td><td>0.82</td><td>0.82</td></tr><tr><td>(au)</td><td>0.60</td><td>0.70</td><td>0.70</td><td>0.70</td></tr><tr><td>(ageru)</td><td>0.36</td><td>0.36</td><td>0.44</td><td>0.42</td></tr><tr><td>(ataeru)</td><td>0.64</td><td>0.64</td><td>0.66</td><td>0.68</td></tr><tr><td>(ikiru)</td><td>0.94</td><td>0.94</td><td>0.94</td><td>0.94</td></tr><tr><td>(imi)</td><td>0.38</td><td>0.52</td><td>0.64</td><td>0.68</td></tr><tr><td>(ireru)</td><td>0.72</td><td>0.74</td><td>0.74</td><td>0.74</td></tr><tr><td>(ookii)</td><td>0.94</td><td>0.94</td><td>0.94</td><td>0.94</td></tr><tr><td>(oshieru)</td><td>0.22</td><td>0.34</td><td>0.38</td><td>0.38</td></tr><tr><td>(kanou)</td><td>0.68</td><td>0.74</td><td>0.62</td><td>0.60</td></tr><tr><td>(kangaeru)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(kankei)</td><td>0.82</td><td>0.88</td><td>0.96</td><td>0.96</td></tr><tr><td>(gijutsu)</td><td>0.84</td><td>0.84</td><td>0.86</td><td>0.86</td></tr><tr><td>(keizai)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(genba)</td><td>0.74</td><td>0.74</td><td>0.74</td><td>0.74</td></tr><tr><td>(kodomo)</td><td>0.60</td><td>0.54</td><td>0.44</td><td>0.42</td></tr><tr><td>(jikan)</td><td>0.86</td><td>0.84</td><td>0.88</td><td>0.88</td></tr><tr><td>(shijou)</td><td>0.58</td><td>0.64</td><td>0.60</td><td>0.60</td></tr><tr><td>(shakai)</td><td>0.86</td><td>0.86</td><td>0.86</td><td>0.86</td></tr><tr><td>(jouhou)</td><td>0.70</td><td>0.76</td><td>0.82</td><td>0.82</td></tr><tr><td>(susumeru)</td><td>0.44</td><td>0.58</td><td>0.86</td><td>0.86</td></tr><tr><td>(suru)</td><td>0.54</td><td>0.66</td><td>0.72</td><td>0.72</td></tr><tr><td>(takai)</td><td>0.86</td><td>0.86</td><td>0.86</td><td>0.86</td></tr><tr><td>(dasu)</td><td>0.40</td><td>0.46</td><td>0.40</td><td>0.40</td></tr><tr><td>(tatsu)</td><td>0.46</td><td>0.50</td><td>0.58</td><td>0.60</td></tr><tr><td>(tsuyoi)</td><td>0.92</td><td>0.92</td><td>0.92</td><td>0.92</td></tr><tr><td>(te)</td><td>0.78</td><td>0.78</td><td>0.78</td><td>0.78</td></tr><tr><td>(deru)</td><td>0.62</td><td>0.66</td><td>0.58</td><td>0.58</td></tr><tr><td>(denwa)</td><td>0.78</td><td>0.78</td><td>0.78</td><td>0.78</td></tr><tr><td>(toru)</td><td>0.24</td><td>0.26</td><td>0.32</td><td>0.32</td></tr><tr><td>(noru)</td><td>0.56</td><td>0.58</td><td>0.60</td><td>0.60</td></tr><tr><td>(baai)</td><td>0.86</td><td>0.88</td><td>0.84</td><td>0.84</td></tr><tr><td>(hairu)</td><td>0.66</td><td>0.66</td><td>0.66</td><td>0.66</td></tr><tr><td>(hajime)</td><td>0.90</td><td>0.96</td><td>0.96</td><td>0.96</td></tr><tr><td>(hajimeru)</td><td>0.78</td><td>0.80</td><td>0.78</td><td>0.78</td></tr><tr><td>(basho)</td><td>0.94</td><td>0.96</td><td>0.96</td><td>0.96</td></tr><tr><td>(hayai)</td><td>0.58</td><td>0.66</td><td>0.62</td><td>0.62</td></tr><tr><td>(ichi)</td><td>0.92</td><td>0.92</td><td>0.92</td><td>0.92</td></tr><tr><td>(hiraku)</td><td>0.90</td><td>0.90</td><td>0.88</td><td>0.88</td></tr><tr><td>(bunka)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(hoka)</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>(mae)</td><td>0.66</td><td>0.76</td><td>0.78</td><td>0.78</td></tr><tr><td>(mieru)</td><td>0.60</td><td>0.60</td><td>0.58</td><td>0.58</td></tr><tr><td>(mitomeru)</td><td>0.80</td><td>0.80</td><td>0.78</td><td>0.78</td></tr><tr><td>(miru)</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.80</td></tr><tr><td>(motsu)</td><td>0.64</td><td>0.74</td><td>0.76</td><td>0.76</td></tr><tr><td colspan=\"2\">(motomeru) 0.76</td><td>0.74</td><td>0.74</td><td>0.76</td></tr><tr><td>(mono)</td><td>0.88</td><td>0.88</td><td>0.88</td><td>0.88</td></tr><tr><td>(yaru)</td><td>0.94</td><td>0.96</td><td>0.96</td><td>0.96</td></tr><tr><td>(yoi)</td><td>0.36</td><td>0.40</td><td>0.38</td><td>0.38</td></tr><tr><td>average</td><td>0.716</td><td>0.745</td><td>0.753</td><td>0.754</td></tr></table>", |
|
"text": "Accuracy of the each target words (1)", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"content": "<table><tr><td/><td colspan=\"3\">: Accuracy of each target words (2)</td><td/></tr><tr><td>target words</td><td colspan=\"4\">std-0 std-1 std-0 + similarities std-1 + similarities</td></tr><tr><td>(aite)</td><td>0.78</td><td>0.80</td><td>0.78</td><td>0.80</td></tr><tr><td>(au)</td><td>0.88</td><td>0.92</td><td>0.90</td><td>0.92</td></tr><tr><td>(ageru)</td><td>0.44</td><td>0.52</td><td>0.48</td><td>0.56</td></tr><tr><td>(ataeru)</td><td>0.76</td><td>0.70</td><td>0.74</td><td>0.70</td></tr><tr><td>(ikiru)</td><td>0.94</td><td>0.94</td><td>0.94</td><td>0.94</td></tr><tr><td>(imi)</td><td>0.48</td><td>0.44</td><td>0.46</td><td>0.46</td></tr><tr><td>(ireru)</td><td>0.74</td><td>0.74</td><td>0.74</td><td>0.74</td></tr><tr><td>(ookii)</td><td>0.94</td><td>0.94</td><td>0.94</td><td>0.94</td></tr><tr><td>(oshieru)</td><td>0.36</td><td>0.52</td><td>0.40</td><td>0.52</td></tr><tr><td>(kanou)</td><td>0.68</td><td>0.64</td><td>0.68</td><td>0.64</td></tr><tr><td>(kangaeru)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(kankei)</td><td>0.96</td><td>0.96</td><td>0.96</td><td>0.96</td></tr><tr><td>(gijutsu)</td><td>0.84</td><td>0.82</td><td>0.84</td><td>0.82</td></tr><tr><td>(keizai)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(genba)</td><td>0.74</td><td>0.76</td><td>0.74</td><td>0.76</td></tr><tr><td>(kodomo)</td><td>0.60</td><td>0.62</td><td>0.60</td><td>0.60</td></tr><tr><td>(jikan)</td><td>0.86</td><td>0.84</td><td>0.86</td><td>0.86</td></tr><tr><td>(shijou)</td><td>0.52</td><td>0.56</td><td>0.52</td><td>0.56</td></tr><tr><td>(shakai)</td><td>0.86</td><td>0.86</td><td>0.86</td><td>0.86</td></tr><tr><td>(jouhou)</td><td>0.86</td><td>0.84</td><td>0.86</td><td>0.84</td></tr><tr><td>(susumeru)</td><td>0.92</td><td>0.92</td><td>0.92</td><td>0.92</td></tr><tr><td>(suru)</td><td>0.64</td><td>0.72</td><td>0.66</td><td>0.72</td></tr><tr><td>(takai)</td><td>0.86</td><td>0.88</td><td>0.86</td><td>0.88</td></tr><tr><td>(dasu)</td><td>0.40</td><td>0.50</td><td>0.42</td><td>0.50</td></tr><tr><td>(tatsu)</td><td>0.52</td><td>0.50</td><td>0.52</td><td>0.52</td></tr><tr><td>(tsuyoi)</td><td>0.92</td><td>0.90</td><td>0.92</td><td>0.90</td></tr><tr><td>(te)</td><td>0.78</td><td>0.78</td><td>0.78</td><td>0.78</td></tr><tr><td>(deru)</td><td>0.52</td><td>0.52</td><td>0.52</td><td>0.52</td></tr><tr><td>(denwa)</td><td>0.84</td><td>0.78</td><td>0.80</td><td>0.78</td></tr><tr><td>(toru)</td><td>0.26</td><td>0.28</td><td>0.26</td><td>0.28</td></tr><tr><td>(noru)</td><td>0.78</td><td>0.78</td><td>0.78</td><td>0.78</td></tr><tr><td>(baai)</td><td>0.84</td><td>0.84</td><td>0.84</td><td>0.84</td></tr><tr><td>(hairu)</td><td>0.54</td><td>0.56</td><td>0.54</td><td>0.56</td></tr><tr><td>(hajime)</td><td>0.88</td><td>0.88</td><td>0.88</td><td>0.88</td></tr><tr><td>(hajimeru)</td><td>0.88</td><td>0.86</td><td>0.88</td><td>0.86</td></tr><tr><td>(basho)</td><td>0.90</td><td>0.96</td><td>0.92</td><td>0.96</td></tr><tr><td>(hayai)</td><td>0.70</td><td>0.70</td><td>0.72</td><td>0.72</td></tr><tr><td>(ichi)</td><td>0.92</td><td>0.90</td><td>0.92</td><td>0.90</td></tr><tr><td>(hiraku)</td><td>0.78</td><td>0.84</td><td>0.80</td><td>0.84</td></tr><tr><td>(bunka)</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.98</td></tr><tr><td>(hoka)</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>(mae)</td><td>0.76</td><td>0.76</td><td>0.76</td><td>0.76</td></tr><tr><td>(mieru)</td><td>0.68</td><td>0.70</td><td>0.68</td><td>0.70</td></tr><tr><td>(mitomeru)</td><td>0.76</td><td>0.82</td><td>0.78</td><td>0.82</td></tr><tr><td>(miru)</td><td>0.78</td><td>0.78</td><td>0.78</td><td>0.78</td></tr><tr><td>(motsu)</td><td>0.78</td><td>0.80</td><td>0.78</td><td>0.80</td></tr><tr><td colspan=\"2\">(motomeru) 0.64</td><td>0.76</td><td>0.68</td><td>0.76</td></tr><tr><td>(mono)</td><td>0.88</td><td>0.88</td><td>0.88</td><td>0.88</td></tr><tr><td>(yaru)</td><td>0.96</td><td>0.96</td><td>0.96</td><td>0.96</td></tr><tr><td>(yoi)</td><td>0.56</td><td>0.54</td><td>0.56</td><td>0.54</td></tr><tr><td>average</td><td colspan=\"2\">0.757 0.769</td><td>0.761</td><td>0.771</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |