|
{ |
|
"paper_id": "N10-1047", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:50:26.452179Z" |
|
}, |
|
"title": "Information Content Measures of Semantic Similarity Perform Better Without Sense-Tagged Text", |
|
"authors": [ |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Minnesota", |
|
"location": { |
|
"postCode": "55812", |
|
"settlement": "Duluth Duluth", |
|
"region": "MN" |
|
} |
|
}, |
|
"email": "tpederse@d.umn.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents an empirical comparison of similarity measures for pairs of concepts based on Information Content. It shows that using modest amounts of untagged text to derive Information Content results in higher correlation with human similarity judgments than using the largest available corpus of manually annotated sense-tagged text.", |
|
"pdf_parse": { |
|
"paper_id": "N10-1047", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents an empirical comparison of similarity measures for pairs of concepts based on Information Content. It shows that using modest amounts of untagged text to derive Information Content results in higher correlation with human similarity judgments than using the largest available corpus of manually annotated sense-tagged text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Measures of semantic similarity based on WordNet have been widely used in Natural Language Processing. These measures rely on the structure of WordNet to produce a numeric score that quantifies the degree to which two concepts (represented by a sense or synset) are similar (or not). In their simplest form these measures use path length to identify concepts that are physically close to each other and therefore considered to be more similar than concepts that are further apart.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While this is a reasonable first approximation to semantic similarity, there are some well known limitations. Most significant is that path lengths between very specific concepts imply much smaller distinctions in semantic similarity than do comparable path lengths between very general concepts. One proposed improvement is to augment concepts in Word-Net with Information Content values derived from sense-tagged corpora or from raw unannotated corpora (Resnik, 1995) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 455, |
|
"end": 469, |
|
"text": "(Resnik, 1995)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper shows that Information Content measures based on modest amounts of unannotated corpora have greater correlation with human similarity judgements than do those based on the largest corpus of sense-tagged text currently available. 1 The key to this success is not in the specific type of corpora used, but rather in increasing the number of concepts in WordNet that have counts associated with them. These results show that Information Content measures of semantic similarity can be significantly improved without requiring the creation of sensetagged corpora (which is very expensive).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Information Content (IC) is a measure of specificity for a concept. Higher values are associated with more specific concepts (e.g., pitch fork), while those with lower values are more general (e.g., idea). Information Content is computed based on frequency counts of concepts as found in a corpus of text. The frequency associated with a concept is incremented in WordNet each time that concept is observed, as are the counts of the ancestor concepts in the Word-Net hierarchy (for nouns and verbs). This is necessary because each occurrence of a more specific concept also implies the occurrence of the more general ancestor concepts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Content", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "When a corpus is sense-tagged, mapping occurrences of a word to a concept is straightforward (since each sense of a word corresponds with a concept or synset in WordNet). However, if the text has not been sense-tagged then all of the possible senses of a given word are incremented (as are their ancestors). For example, if tree (as a plant) occurs in a sense-tagged text, then only the concept associated with tree as a kind of plant would be incremented. If the text is untagged, then all of the possible senses of tree would be incremented (such as the mathematical sense of tree, a shoe tree, a plant, etc.) In this case the frequency of all the occurrences of a word are divided equally among the different possible senses. Thus, if a word occurs 42 times in a corpus and there are six possible senses (concepts), each sense and all of their ancestors would have their frequency incremented by seven. 2 For each concept (synset) c in WordNet, Information Content is defined as the negative log of the probability of that concept (based on the observed frequency counts):", |
|
"cite_spans": [ |
|
{ |
|
"start": 906, |
|
"end": 907, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Content", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "IC(c) = \u2212logP (c)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Content", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Information Content can only be computed for nouns and verbs in WordNet, since these are the only parts of speech where concepts are organized in hierarchies. Since these hierarchies are separate, Information Content measures of similarity can only be applied to pairs of nouns or pairs of verbs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Information Content", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "There are three Information Content measures implemented in WordNet::Similarity: (res) (Resnik, 1995) , (jcn) (Jiang and Conrath, 1997) , and (lin) (Lin, 1998) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 101, |
|
"text": "(Resnik, 1995)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 135, |
|
"text": "(Jiang and Conrath, 1997)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 159, |
|
"text": "(Lin, 1998)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "These measures take as input two concepts c 1 and c 2 (i.e., senses or synsets in WordNet) and output a numeric measure of similarity. These measures all rely to varying degrees on the idea of a least common subsumer (LCS); this is the most specific concept that is a shared ancestor of the two concepts. For example, the LCS of automobile and scooter is vehicle.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Resnik (res) measure simply uses the Information Content of the LCS as the similarity value:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "res(c 1 , c 2 ) = IC(LCS(c 1 , c 2 ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Resnik measure is considered somewhat coarse, since many different pairs of concepts may share the same LCS. However, it is less likely to suffer from zero counts (and resulting undefined values) since in general the LCS of two concepts will not be a very specific concept (i.e., a leaf node in WordNet), but will instead be a somewhat more general concept that is more likely to have observed counts associated with it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Both the Lin and Jiang & Conrath measures attempt to refine the Resnik measure by augmenting it with the Information Content of the individual concepts being measured in two different ways:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "lin(c 1 , c 2 ) = 2 * res(c 1 ,c 2 ) IC(c 1 )+IC(c 2 ) jcn(c 1 , c 2 ) = 1 IC(c 1 )+IC(c 2 )\u22122 * res(c 1 ,c 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "All three of these measures have been widely used in the NLP literature, and have tended to perform well in a wide range of applications such as word sense disambiguation, paraphrase detection, and Question Answering (c.f., (Resnik, 1999) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 238, |
|
"text": "(Resnik, 1999)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Similarity Measures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Information Content in WordNet::Similarity is (by default) derived from SemCor (Miller et al., 1993) , a manually sense-tagged subset of the Brown Corpus. It is made up of approximately 676,000 words, of which 226,000 are sense-tagged. SemCor was originally created using sense-tags from version 1.6 of WordNet, and has been mapped to subsequent versions to stay current. 3 This paper uses version 3.0 of WordNet and SemCor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 100, |
|
"text": "(Miller et al., 1993)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "WordNet::Similarity also includes a utility (raw-textFreq.pl) that allows a user to derive Information Content values from any corpus of plain text. This utility is used with the untagged version of SemCor and with various portions of the English GigaWord corpus (1st edition) to derive alternative Information Content values.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "English GigaWord contains more than 1.7 billion words of newspaper text from the 1990's and early 21st century, divided among four different sources: Agence France Press English Service (afe), Associated Press Worldstream English Service (apw), The New York Times Newswire Service (nyt), and The Xinhua News Agency English Service (xie).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This paper compares the ranking of pairs of concepts according to Information Content measures in WordNet::Similarity with a number of manually created gold standards. These include the (RG) (Rubenstein and Goodenough, 1965) collection of 65 noun (Miller and Charles, 1991) collection of 30 noun pairs (a subset of RG), and the (WS) WordSimilarity-353 collection of 353 pairs (Finkelstein et al., 2002) . RG and MC have been scored for similarity, while WS is scored for relatedness, which is a more general and less well-defined notion than similarity. For example aspirin and headache are clearly related, but they aren't really similar. Table 1 shows the Spearman's rank correlation of several other measures of similarity and relatedness in WordNet::Similarity with the gold standards discussed above. The WordNet::Similarity vector relatedness measure achieves the highest correlation, followed closely by the adapted lesk measure. These results are consistent with previous findings (Patwardhan and Pedersen, 2006) . This table also shows results for several path-based measures. 4 Table 2 shows the correlation of jcn, res, and lin when Information Content is derived from 1) the sense-tagged version of SemCor (semcor), 2) Sem-Cor without sense tags (semcor-raw), and 3) steadily increasing subsets of the 133 million word xie portion of the English GigaWord corpus. These subsets start with the entire first month of xie (199501, from January 1995) and then two months (199501-02), three months (199501-03), up through all of 1995 . Thereafter the increments are annual, with two years of data (1995) (1996) , then three (1995) (1996) (1997) , and so on until the entire xie corpus is used (1995) (1996) (1997) (1998) (1999) (2000) (2001) . The afe, apw, and nyt portions of GigaWord are also used individually and then combined all together along with xie (all).", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 273, |
|
"text": "(Miller and Charles, 1991)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 402, |
|
"text": "(Finkelstein et al., 2002)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1005, |
|
"end": 1020, |
|
"text": "Pedersen, 2006)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1603, |
|
"end": 1609, |
|
"text": "(1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1610, |
|
"end": 1616, |
|
"text": "(1996)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1630, |
|
"end": 1636, |
|
"text": "(1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1637, |
|
"end": 1643, |
|
"text": "(1996)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1644, |
|
"end": 1650, |
|
"text": "(1997)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1699, |
|
"end": 1705, |
|
"text": "(1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1706, |
|
"end": 1712, |
|
"text": "(1996)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1713, |
|
"end": 1719, |
|
"text": "(1997)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1720, |
|
"end": 1726, |
|
"text": "(1998)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1727, |
|
"end": 1733, |
|
"text": "(1999)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1734, |
|
"end": 1740, |
|
"text": "(2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1741, |
|
"end": 1747, |
|
"text": "(2001)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 647, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 1088, |
|
"end": 1095, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The size (in tokens) of each corpus is shown in the second column of Table 2 (size), which is expressed in thousands (k), millions (m), and billions (b).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The third column (cover) shows what percentage of the 96,000 noun and verb synsets in WordNet receive a non-zero frequency count when Information Content is derived from the specified corpus. These values show that the 226,000 sense-tagged instances in SemCor cover about 24%, and the untagged version of SemCor covers 37%. As it happens the correlation results for semcor-raw are somewhat better than semcor, suggesting that coverage is at least as important (if not more so) to the performance of Information Content measures than accurate mapping of words to concepts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A similar pattern can be seen with the xie results in Table 2 . This again shows that an increase in WordNet coverage is associated with increased performance of the Information Content measures. As coverage increases the correlation improves, and in fact the results are better than the path-based measures and approach those of lesk and vector (see Table 1 ). The one exception is with respect to the WS gold standard, where vector and lesk perform much better than the Information Content measures. However, this seems reasonable since they are relatedness measures, and the WS corpus is annotated for relatedness rather than similarity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 61, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 358, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As a final test of the hypothesis that coverage matters as much or more than accurate mapping of words to concepts, a simple baseline method was created that assigns each synset a count of 1, and then propagates that count up to the ancestor concepts. This is equivalent to doing add-1 smoothing without any text (add1only). This results in correlation nearly as high as the best results with xie and semcor-raw, and is significantly better than semcor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This paper shows that semantic similarity measures based on Information Content can be significantly improved by increasing the coverage of the frequency counts used to derive Information Content. Increased coverage can come from unannotated text or simply assigning counts to every concept in WordNet and does not require sense-tagged text. 35 .35 .78 .57 .37 .75 .63 .37 .73 .68 199501-02 2.3 m .39 .31 .79 .65 .32 .75 .67 .36 .73 .68 199501-03 3.8 m .42 .34 .88 .69 .34 .81 .70 .37 .75 .69 199501-06 7.9 m .46 .36 .88 .69 .36 .81 .70 .37 .75 .69 199501-09 12 m .49 .36 .88 .69 .36 .81 .70 .37 .75 .69 199501-12 16 m .51 .37 .87 .73 .36 .81 .71 .37 .75 .69 1995-1996 34 m .56 .37 .88 .73 .36 .81 .72 .37 .75 .69 1995-1997 53 m .58 .37 .88 .73 .36 .81 .71 .37 .75 .69 1995-1998 73 m .60 .37 .89 .73 .36 .81 .72 .37 .75 .69 1995-1999 94 m .62 .36 .88 .73 .36 .81 .72 .37 .76 .69 1995-2000 115 m .63 .36 .89 .73 .36 .81 .71 .37 .76 .70 1995-2001 133 m .64 .36 .88 .73 .36 .81 .71 .37 .76 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 342, |
|
"end": 986, |
|
"text": "35 .35 .78 .57 .37 .75 .63 .37 .73 .68 199501-02 2.3 m .39 .31 .79 .65 .32 .75 .67 .36 .73 .68 199501-03 3.8 m .42 .34 .88 .69 .34 .81 .70 .37 .75 .69 199501-06 7.9 m .46 .36 .88 .69 .36 .81 .70 .37 .75 .69 199501-09 12 m .49 .36 .88 .69 .36 .81 .70 .37 .75 .69 199501-12 16 m .51 .37 .87 .73 .36 .81 .71 .37 .75 .69 1995-1996 34 m .56 .37 .88 .73 .36 .81 .72 .37 .75 .69 1995-1997 53 m .58 .37 .88 .73 .36 .81 .71 .37 .75 .69 1995-1998 73 m .60 .37 .89 .73 .36 .81 .72 .37 .75 .69 1995-1999 94 m .62 .36 .88 .73 .36 .81 .72 .37 .76 .69 1995-2000 115 m .63 .36 .89 .73 .36 .81 .71 .37 .76 .70 1995-2001 133 m .64 .36 .88 .73 .36 .81 .71 .37 .76", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "These experiments were done with version 2.05 of Word-Net::Similarity(Pedersen et al., 2004).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This is the -resnik counting option in WordNet::Similarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.cse.unt.edu/\u02dcrada/downloads.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "wup is the Wu & Palmer measure, lch is the Leacock & Chodorow measure, path relies on edge counting, and random provides a simple sanity check.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Many thanks to Siddharth Patwardhan and Jason Michelizzi for their exceptional work on Word-Net::Similarity over the years, which has made this and a great deal of other research possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Placing search in context: The concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "116--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Finkelstein, E. Gabrilovich, Y. Matias, E. Rivlin, Z. Solan, G. Wolfman, and E. Ruppin. 2002. Plac- ing search in context: The concept revisited. ACM Transactions on Information Systems, 20(1):116-131.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Semantic similarity based on corpus statistics and lexical taxonomy", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Conrath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings on International Conference on Research in Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Jiang and D. Conrath. 1997. Semantic similarity based on corpus statistics and lexical taxonomy. In Proceed- ings on International Conference on Research in Com- putational Linguistics, pages 19-33, Taiwan.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An information-theoretic definition of similarity", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Lin. 1998. An information-theoretic definition of similarity. In Proceedings of the International Con- ference on Machine Learning, Madison, August.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Contextual correlates of semantic similarity", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Charles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Language and Cognitive Processes", |
|
"volume": "6", |
|
"issue": "1", |
|
"pages": "1--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G.A. Miller and W.G. Charles. 1991. Contextual corre- lates of semantic similarity. Language and Cognitive Processes, 6(1):1-28.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A semantic concordance", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Leacock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Tengi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Bunker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Proceedings of the Workshop on Human Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G.A. Miller, C. Leacock, R. Tengi, and R. Bunker. 1993. A semantic concordance. In Proceedings of the Work- shop on Human Language Technology, pages 303- 308.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Using WordNetbased Context Vectors to Estimate the Semantic Relatedness of Concepts", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Patwardhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the EACL 2006 Workshop on Making Sense of Sense: Bringing Computational Linguistics and Psycholinguistics Together", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Patwardhan and T. Pedersen. 2006. Using WordNet- based Context Vectors to Estimate the Semantic Relat- edness of Concepts. In Proceedings of the EACL 2006 Workshop on Making Sense of Sense: Bringing Com- putational Linguistics and Psycholinguistics Together, pages 1-8, Trento, Italy, April.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Wordnet::Similarity -Measuring the relatedness of concepts", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Patwardhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Michelizzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Fifth Annual Meeting of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Pedersen, S. Patwardhan, and J. Michelizzi. 2004. Wordnet::Similarity -Measuring the relatedness of concepts. In Proceedings of Fifth Annual Meeting of the North American Chapter of the Association for Computational Linguistics, pages 38-41, Boston, MA.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using information content to evaluate semantic similarity in a taxonomy", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the 14th International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "448--453", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Resnik. 1995. Using information content to evaluate semantic similarity in a taxonomy. In Proceedings of the 14th International Joint Conference on Artificial Intelligence, pages 448-453, Montreal, August.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Semantic similarity in a taxonomy: An information-based measure and its application to problems of ambiguity in natural language", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "95--130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Resnik. 1999. Semantic similarity in a taxonomy: An information-based measure and its application to prob- lems of ambiguity in natural language. Journal of Ar- tificial Intelligence Research, 11:95-130.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Contextual correlates of synonymy. Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rubenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goodenough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "627--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Rubenstein and J.B. Goodenough. 1965. Contextual correlates of synonymy. Computational Linguistics, 8:627-633.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "Rank Correlation of Existing Measures", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">measure WS MC RG</td></tr><tr><td>vector</td><td>.46 .89 .73</td></tr><tr><td>lesk</td><td>.42 .83 .68</td></tr><tr><td>wup</td><td>.34 .74 .69</td></tr><tr><td>lch</td><td>.28 .71 .70</td></tr><tr><td>path</td><td>.26 .68 .69</td></tr><tr><td colspan=\"2\">random -.20 -.16 .15</td></tr><tr><td>pairs, the (MC)</td><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "Rank Correlation of Information Content Measures From Different Corpora jcn lin res corpus size cover WS MC RG WS MC RG WS MC RG semcor 226 k .24 .21 .72 .51 .30 .73 .58 .38 .74 .69 semcor-raw 670 k .37 .26 .82 .58 .32 .79 .65 .38 .76 .", |
|
"html": null, |
|
"content": "<table><tr><td>70</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": ".85 .79 .35 .80 .78 .37 .77 .79 add1only 96 k 1.00 .36 .85 .73 .37 .77 .73 .39 .76 .70", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td/><td>.70</td></tr><tr><td>afe</td><td>174 m</td><td>.66</td><td>.36 .88 .81 .36 .80 .78 .37 .77 .79</td></tr><tr><td>apw</td><td>560 m</td><td>.75</td><td>.36 .84 .78 .36 .79 .78 .37 .76 .79</td></tr><tr><td>nyt</td><td>963 m</td><td>.83</td><td>.36 .84 .78 .36 .79 .77 .37 .77 .80</td></tr><tr><td>all</td><td>1.8 b</td><td>.85</td><td>.34</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |