|
{ |
|
"paper_id": "2016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:03:12.268268Z" |
|
}, |
|
"title": "Detecting Most Frequent Sense using Word Embeddings and BabelNet", |
|
"authors": [ |
|
{ |
|
"first": "Harpreet", |
|
"middle": [ |
|
"Singh" |
|
], |
|
"last": "Arora", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academy of Technology", |
|
"location": { |
|
"settlement": "Hooghly", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "harpreet.singharora@aot.edu.in" |
|
}, |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Bhingardive", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IIT Bombay", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IIT Bombay", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Since the inception of the SENSEVAL evaluation exercises there has been a great deal of recent research into Word Sense Disambiguation (WSD). Over the years, various supervised, unsupervised and knowledge based WSD systems have been proposed. Beating the first sense heuristics is a challenging task for these systems. In this paper, we present our work on Most Frequent Sense (MFS) detection using Word Embeddings and BabelNet features. The semantic features from BabelNet viz., synsets, gloss, relations, etc. are used for generating sense embeddings. We compare word embedding of a word with its sense embeddings to obtain the MFS with the highest similarity. The MFS is detected for six languages viz., English, Spanish, Russian, German, French and Italian. However, this approach can be applied to any language provided that word embeddings are available for that language.", |
|
"pdf_parse": { |
|
"paper_id": "2016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Since the inception of the SENSEVAL evaluation exercises there has been a great deal of recent research into Word Sense Disambiguation (WSD). Over the years, various supervised, unsupervised and knowledge based WSD systems have been proposed. Beating the first sense heuristics is a challenging task for these systems. In this paper, we present our work on Most Frequent Sense (MFS) detection using Word Embeddings and BabelNet features. The semantic features from BabelNet viz., synsets, gloss, relations, etc. are used for generating sense embeddings. We compare word embedding of a word with its sense embeddings to obtain the MFS with the highest similarity. The MFS is detected for six languages viz., English, Spanish, Russian, German, French and Italian. However, this approach can be applied to any language provided that word embeddings are available for that language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Word Sense Disambiguation or WSD refers to the task of computationally identifying the sense of a word in a given context. It is one of the oldest and toughest problems in the area of Natural Language Processing (NLP). WSD is considered to be an AIcomplete problem (Navigli et al., 2009) i.e., it is one of the hardest problems in the field of Artificial Intelligence. Various approaches for word sense disambiguation have been explored in recent years. Two of the widely used approaches for WSD aredisambiguation using the annotated training data called as supervised WSD and disambiguation without the annotated training data called as unsupervised WSD.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 287, |
|
"text": "(Navigli et al., 2009)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "MFS is considered to be a very powerful heuristics for word sense disambiguation. Even with sophisticated methods, it is difficult to outperform its baseline. The MFS baseline for English language is created with the help of a sense annotated corpus wherein the frequencies of individual senses are learnt. It is found that, only 5 out of 26 WSD systems submitted to SENSEVAL-3, were able to beat this baseline. The success of the MFS baseline is mainly due to the frequency distribution of senses, with the shape of the sense rank versus frequency graph being a Zipfian curve. Unsupervised approaches were found very difficult to beat the MFS baseline, while supervised approaches generally perform better than the MFS baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our paper, we have extended the work done by Bhingardive et al. (2015) . They used word embeddings along with features from WordNet for the detection of MFS. We used word embeddings and features from BabelNet for detecting MFS. Our approach works for all part-of-speech (POS) categories and is currently implemented for six different languages viz., English, Spanish, Russian, German, French and Italian. This approach can be easily extended to other languages if word embeddings for the specific language are available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 73, |
|
"text": "Bhingardive et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is organized as follows: Section 2 briefs the related work. Section 3 explains BabelNet. Our approach is given in section 4. Experiments are presented in section 5 followed by conclusion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "McCarthy et al. (2007) proposed an unsupervised approach for finding the predominant sense using an automatic thesaurus. They used WordNet similarity for identifying the predominant sense. This approach outperforms the SemCor baseline for words with SemCor frequency below five. Bhingardive et al. (2015) compared the word embedding of a word with all its sense embedding to obtain the predominant sense with the highest similarity. They created sense embeddings using various features of WordNet. Preiss et al. (2009) refine the most frequent sense baseline for word sense disambiguation using a number of novel word sense disambiguation techniques.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 22, |
|
"text": "(2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 304, |
|
"text": "Bhingardive et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 518, |
|
"text": "Preiss et al. (2009)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "BabelNet (Navigli et al., 2012 ) is a multilingual encyclopedic dictionary, with lexicographic and encyclopedic coverage of terms, and a semantic network. It connects concepts and named entities in a very large network of semantic relations, made up of more than 13 million entries, called Babel synsets. Each Babel synset represents a given meaning and contains all the synonyms which express that meaning in a range of different languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 30, |
|
"text": "(Navigli et al., 2012", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "BabelNet v3.0 covers 271 languages and is obtained from the automatic integration of:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 WordNet 1 -a popular computational lexicon of English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Open Multilingual WordNet 2 -a collection of WordNets available in different languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Wikipedia 3 -the largest collaborative multilingual Web encyclopedia.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 OmegaWiki 4 -a large collaborative multilingual dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Wiktionary 5 -a collaborative project to produce a free-content multilingual dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Wikidata 6 -a free knowledge base that can be read and edited by humans and machines alike.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "BabelNet provides API for Java, Python, PHP, Javascript, Ruby and SPARQL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BabelNet", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We propose an approach for detecting the MFS which is an extension of the work done by Bhingardive et al. (2015) . Our approach follows an iterative procedure to detect the MFS of any word given its POS and language. It works for six different languages viz., English, Spanish, Russian, German, French and Italian. We used BabelNet as a lexical resource, as it contains additional information as compared to WordNet. This approach uses pre-trained Google Word Embeddings 7 for English language, and for all other languages Polyglot 8 Word Embeddings are used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 112, |
|
"text": "Bhingardive et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Approach", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The steps followed by our approach as shown in figure 1 are as follows -1. The system takes a word, POS and language code as an input. 2. For every sense of a word, features such as synset members, gloss, hypernym, etc. are extracted from BabelNet. 3. Sense embeddings or sense vectors are calculated by using this feature set. 4. Cosine similarity is computed between word vector (word embedding) of an input word and its sense vectors. 5. Sense vector which has maximum cosine similarity with the input word vector is treated as the MFS for that word. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 1. Steps followed by our approach", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Filtering of BOWs are done to reduce the noise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering BOW", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "The following procedure is used to filter BOWs: 1. Words for which word embeddings are not available are excluded from BOW. 2. From this BOW, the most relevant words are picked using following steps: a. Select a word from BOW b. The cosine similarity of that word with each of the remaining words in the BOW is computed. c. If the average cosine similarity lies between the threshold values 0.35 and 0.4, then we keep the word in the BOW else it is discarded. It is found that values above 0.4 were discarding many useful words while the values below 0.35 were accepting irrelevant words resulting in increasing the noise. Hence, the threshold range of 0.35 -0.4 was chosen by performing several experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering BOW", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "For example, consider the input as -Word: cricket POS: NOUN Language code: EN Let BOWG1 be the BOW of a gloss feature for the sport sense (S1) of a word cricket.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering BOW", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "After removing stop words and words for which word embeddings are not available, we get the updated BOWG1 as, BOWG1 = {bat ball game played two teams} Now, the cosine similarity of each word in BOWG1 with other words in BOWG1 is computed to get the most relevant words which can represent the sense S1. For instance, for a word game, the average cosine similarity was found to be 0.38 which falls in the selected threshold. Hence, the word game is not filtered from the BOWG1. Table 1 shows how the word game is selected based on the average cosine similarity score. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 477, |
|
"end": 484, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BOWG1 = {Cricket is a bat and ball game played between two teams of 11 players each on a field at the center of which is a rectangular 22-yard long pitch}", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our approach we are detecting MFS in an iterative fashion. In each iteration we are checking which type of BOWs (BOWS, BOWG, BOWHS, and BOWHG) are sufficient to detect the MFS. This can be observed in figure 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In figure 2, we can see how BOWs are used to create sense vectors in an iterative fashion to get the MFS. If synset members (S) are sufficient to get the MFS then our algorithm prints the MFS and stops, otherwise other BOWs of various features like gloss (G), synset members of the hypernym synsets (HS) and content words in the gloss of the hypernym synsets (HG) are used iteratively to get the MFS. The algorithm is as follows: 1. For each sense i of a word: a. VEC(i) = Create_sense_vector (BOWSi) Where, BOWSi is bag of words of synset members of sense Si b. SCORE(i) = cosine_similarity (VEC(i), VEC(W)) where, VEC(W) is the word vector of the input word 2. Arrange these SCORES in descending order according to the similarity score. 3. If (SCORE(0) -SCORE(1)) > threshold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Goto step 6 Else:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Run Steps 1 to 2 by considering (BOWSi + BOWGi) for Create_sense_vector function 4. If (SCORE(0) -SCORE(1)) > threshold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Goto step 6 Else:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Run Steps 1 to 2 by considering (BOWSi + BOWGi + BOWHSi) for Create_sense_vector function 5. If (SCORE(0) -SCORE(1)) > threshold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Goto step 6 Else:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Run Steps 1 to 2 by considering (BOWSi + BOWGi + BOWHSi + BOWHGi) for Create_sense_vector function 6. MFS=Sense(SCORE(0)) 7. Print MFS 8. End Where,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 VEC(i) denotes sense vector of an input word. \u2022 SCORE (v1, v2) is cosine similarity between word vector v1 and sense vector v2. \u2022 SENSE (SCORE(i)) is the sense corresponding to SCORE(i). Ambiguity is resolved by comparing the score of most similar sense and second most similar sense, obtained after Step 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Step 3 checks if the difference between their score is above threshold \uf0e00.02 (This threshold was chosen after conducting various experiments with other threshold figures. The average difference between two most similar senses was found to be 0.02). There is a net speed-up in the procedure, as the computation time is significantly abridged as compared to Bhingardive et al. (2015) . As we are using an iterative procedure for detecting the MFS, our approach, most of the times gives a better result as compared to Bhingardive et al. (2015) which we have manually verified.", |
|
"cite_spans": [ |
|
{ |
|
"start": 356, |
|
"end": 381, |
|
"text": "Bhingardive et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 540, |
|
"text": "Bhingardive et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Detecting MFS", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We used pre-trained Google's word vectors as word embedding for English language, for all other languages Polyglot's word embeddings are used. Due to lack of availability of gold data, we could not compare our results with MFS results obtained from BabelNet. Upon considering Princeton WordNet as gold data, we cannot equate our results with it because they might be semantically similar but not syntactically. Table 2 shows the MFS result using our approach for some selected words of English language.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 418, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "MFS obtained using our approach analysis bn:00003795n: A form of literary criticism in which the structure of a piece of writing is analyzed data bn:00025314n: A collection of facts from which conclusions may be drawn law bn:00048655n:The collection of rules imposed by authority fact bn:00032655n: A statement or assertion of verified information about something that is the case or has happened theory bn:00045632n: A tentative insight into the natural world; a concept that is not yet verified but that if true would explain certain facts or phenomena ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "word", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We proposed an approach for detecting the most frequent sense for a word using BabelNet as a lexical resource. BabelNet is preferred as a resource since it incorporates data not only from", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://wordnet.princeton.edu/ 2 http://compling.hss.ntu.edu.sg/omw/ 3 http://www.wikipedia.org/ 4 http://www.omegawiki.org/ 5 http://www.wiktionary.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.wikidata.org/ 7 https://code.google.com/p/word2vec/ 8 http://polyglot.readthedocs.org/en/latest/Embed dings.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Princeton WordNet but also from sources. Hence the volume of ambiguity is reduced by a significant proportion. Our approach follows an iterative procedure until a suitable context is found to detect the MFS of a word. It is currently working for English, Russian, Italian, French, German, and Spanish languages. However, it can be easily ported across multiple languages. An API is developed for detecting MFS using BabelNet which can be publically made available in future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Finding the Most Frequent Sense of a Word by the Length of Its Definition. Human-Inspired Computing and its Applications", |
|
"authors": [ |
|
{ |
|
"first": "Calvo", |
|
"middle": [], |
|
"last": "Hiram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Calvo Hiram and Alexander Gelbukh. 2014. Finding the Most Frequent Sense of a Word by the Length of Its Definition. Human-Inspired Computing and its Applications. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Unsupervised Acquisition of Predominant Word Senses", |
|
"authors": [ |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Koeling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Weeds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Carroll", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics", |
|
"volume": "33", |
|
"issue": "4", |
|
"pages": "553--590", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana McCarthy, Rob Koeling, Julie Weeds and John Carroll. 2007. Unsupervised Acquisition of Predominant Word Senses. Computational Linguistics, 33 (4) pp 553-590.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "WordNet: A Lexical Database for English", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller. 1995. WordNet: A Lexical Database for English. Communications of the ACM Vol. 38, No. 11: 39-41.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Refining the most frequent sense baseline", |
|
"authors": [ |
|
{ |
|
"first": "Judita", |
|
"middle": [], |
|
"last": "Preiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judita Preiss. 2009. Refining the most frequent sense baseline. Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Word sense disambiguation: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli. 2009. Word sense disambiguation: A survey. ACM Computing Surveys (CSUR) 41.2: 10.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Artificial Intelligence", |
|
"volume": "193", |
|
"issue": "", |
|
"pages": "217--250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012. BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network. Artificial Intelligence 193 (2012): 217-250", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Unsupervised Most Frequent Sense Detection using Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Bhingardive", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhirendra", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Rudramurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanumant", |
|
"middle": [], |
|
"last": "Redkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sudha Bhingardive, Dhirendra Singh, Rudramurthy V, Hanumant Redkar, and Pushpak Bhattacharyya. 2015. Unsupervised Most Frequent Sense Detection using Word Embeddings. North American Chapter of the Association for Computational Linguistics (NAACL), Denver, Colorado.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Iterative process of detecting MFS", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td>Word Embeddings: Word embedding or word</td></tr><tr><td>vector is a low dimensional real valued vector</td></tr><tr><td>which captures semantic and syntactic features of</td></tr><tr><td>a word.</td></tr><tr><td>Sense Embeddings: Sense embedding or sense</td></tr><tr><td>vector is similar to word embedding which is also</td></tr><tr><td>a low dimensional real valued vector. It is created</td></tr><tr><td>by taking average of word embeddings of each</td></tr><tr><td>word in the BOW.</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Bag of words for each sense of a word are created by extracting context words from each individual feature from BabelNet. BOWs obtained for each feature are, BOWS for synset members (S), BOWG for content words in the gloss (G), BOWHS for synset members of the hypernym synset (HS), BOWHG for content words in the gloss of hypernym synsets (HG).", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>Average Cosine Score (game) =</td></tr><tr><td>(0.51 + 0.49 + 0.30 + 0.17 + 0.44)/5 = 0.38</td></tr><tr><td>Similar process is carried out for each word of</td></tr><tr><td>BOW.</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |