|
{ |
|
"paper_id": "Y13-1009", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:31:57.874102Z" |
|
}, |
|
"title": "A Novel Schema-Oriented Approach for Chinese New Word Identification", |
|
"authors": [ |
|
{ |
|
"first": "Zhao", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "East China Normal University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhixian", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Samsung Research America, Silicon Valley", |
|
"institution": "", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "zhixian.yan@samsung.com" |
|
}, |
|
{ |
|
"first": "Junzhong", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "jzgu@cs.ecnu.edu.cn" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "With the popularity of network applications, new words become more common and bring the poor performance of natural language processing related applications including web search. Identifying new words automatically from texts is still a very challenging problem, especially for Chinese. In this paper, we propose a novel schemaoriented approach for Chinese new word identification (named \"ChNWI\"). This approach has three main steps: (1) we suggest three composition schemas that cover nearly all two-character up to four-character Chinese word surfaces; (2) we employ support vector machine (SVM) to classify Chinese new words of three schemas using their unique linguistic characteristics; and (3) we design various rules to filter identified Chinese new words of three schemas. Our extensive evaluations with two corpora (Chinese news titles and CIPS-SIGHAN 2012 CSMB) show ChNWI's efficiency on Chinese new word identification.", |
|
"pdf_parse": { |
|
"paper_id": "Y13-1009", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "With the popularity of network applications, new words become more common and bring the poor performance of natural language processing related applications including web search. Identifying new words automatically from texts is still a very challenging problem, especially for Chinese. In this paper, we propose a novel schemaoriented approach for Chinese new word identification (named \"ChNWI\"). This approach has three main steps: (1) we suggest three composition schemas that cover nearly all two-character up to four-character Chinese word surfaces; (2) we employ support vector machine (SVM) to classify Chinese new words of three schemas using their unique linguistic characteristics; and (3) we design various rules to filter identified Chinese new words of three schemas. Our extensive evaluations with two corpora (Chinese news titles and CIPS-SIGHAN 2012 CSMB) show ChNWI's efficiency on Chinese new word identification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With the rapid development of information technology, as well as the growth of social networks (e.g., Chinese Microblog, WeChat), Chinese new words are constantly being created and their usages have become an inevitable phenomenon. Automatic identification of new words plays an important role in a number of areas in Chinese language processing, such as automatic segmentation, information retrieval and machine translation (Zhang et al., 2010; Duan et al., 2012) . In the Chinese new word identification (NWI) task, new words refer to new composition words that are not registered in the dictionary of a Chinese segmenter.", |
|
"cite_spans": [ |
|
{ |
|
"start": 425, |
|
"end": 445, |
|
"text": "(Zhang et al., 2010;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 464, |
|
"text": "Duan et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Statistical approaches are the most widely used methods in NWI. The previous methods extract some linguistic features of new word compositions, i.e., word composition probability, cooccurrence probability, mutual information, and word frequency, while they assume above linguistic features playing the same impact on various word surfaces (Chang and Lee, 2003; Li et al., 2008; Zhang et al., 2010) . Some methods also have binary decision, either \"new words\" or \"not new words\". A SVM-based method (Li et al., 2008) aims at two word surfaces, NW11 and N-W21, and the method uses same linguistic features for the two surfaces. Other statistical models, for instance, a latent discriminative model (Pang et al., 2009) , a linear-time incremental model (Zhang et al., 2012) and conditional random fields (CRFs) model (Wang et al., 2012) , are designed for NWI.", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 360, |
|
"text": "(Chang and Lee, 2003;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 377, |
|
"text": "Li et al., 2008;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "Zhang et al., 2010)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 515, |
|
"text": "(Li et al., 2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 696, |
|
"end": 715, |
|
"text": "(Pang et al., 2009)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 770, |
|
"text": "(Zhang et al., 2012)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 833, |
|
"text": "(Wang et al., 2012)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, some hybrid methods have been suggested. These hybrid methods employ more or fewer rules for statistical methods to obtain an optimal efficiency of identification. However, the rules these methods used are created by the people, which cause these methods are not suitable for other new word composition schemas (Zhang et al., 2006; Jiang et al., 2011; Xi et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 321, |
|
"end": 341, |
|
"text": "(Zhang et al., 2006;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 361, |
|
"text": "Jiang et al., 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 378, |
|
"text": "Xi et al., 2012)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Despite the wide studies of new word identifications, accurately identifying Chinese new words from texts automatically is still a very challenging task because of the following reasons:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Most existing studies focus on English and these methods are not suitable for Chinese. Chinese new words have less morphology variations than many other languages, and there is a lack of capital clues as in English. In Chinese, there are not special symbols implying boundaries between two words and any adjacent characters can form a word. This is one main reason of the difficulty to recognize new words from texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A survey of the literature indicates that there are eleven surfaces of four-tuple Chinese words, while those methods focus on two surfaces (i.e., NW11 and NW21). They use same linguistic characteristics and same filtering rules for the two surfaces. The two aspects cause the lower accurate rate and the problem of data sparseness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address these challenges, in this paper, we propose a schema-oriented Chinese new word identification approach which combining SVM and rules, it is called \"ChNWI\". The ChNWI approach has two main parts, i.e., (1) ChNWI training process, in which we first define three word composition schemas, their particular linguistic characteristics, and one basic feature model with other three feature models for three schemas;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(2) ChNWI testing process, in which we identifying new words of three schemas from segmented fragments using various filtering rules of three schemas. Concluded, this paper has the following three main contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We classify eight of eleven surfaces of fourtuple Chinese words into three composition schemas, i.e., single-character schema, affix schema and NW22 schema. We study their special linguistic characteristics of the three schemas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We design a rich set of features models for the three schemas by analyzing their linguistic characterises. We hereinafter apply SVM as our basic classifier due to its robustness, efficiency and higher performance than other classifiers, for instance, Perceptron, Naive Bayes and kNN (Li et al., 2008) . Furthermore, we design filter rules for the three schemas to refine the NWI decision.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 302, |
|
"text": "(Li et al., 2008)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We evaluate ChNWI on two corpora, i.e., a collected Chinese news title dataset and a popular MicroBlog dataset. The experimental results show the efficiency of ChNWI on Chinese new word identification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remaining sections of this paper are organized as follows. Section 2 presents the main framework of our ChNWI approach. Section 3 introduces three new word composition schemas, their linguistic characteristics and their feature models. Section 4 discusses the training process and the test process of ChNWI. We conduct several experiments and analyze experimental evaluations in section 5. Finally, we conclude this paper and discuss future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we first formulate the task in this paper, then we present our approach in general.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The task of identifying Chinese new words in this paper is concluded as: after extracting strings of three kinds of schemas from segmented fragments, we compute the confidence degree of these strings using both their special linguistic characteristics, together with SVM; we select these strings with their confidence degree larger than a certain threshold as new word candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The confidence degree of a Chinese new word with the feature set x belongs to the category y is defined as the co-occurrence probability p(x, y) of the category y and the feature set x. The category y refers to \"Chinese new words\" or \"not Chinese new words\", and x is the feature vectors of a new word. Formally, given a training sample set,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T = {(x 1 , y 1 ), (x 2 , y 2 ), ..., (x n , y n )}, where x i \u2208 R n , y i \u2208 {\u22121, 1}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "x i refers to the feature vectors of new words, y i is the category of a new word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The framework of ChNWI is shown in Figure 1 . Two main parts of the suggested ChNWI approach are the training process and the testing process.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 43, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The training process includes: (1) we first segment and POS tagging the training corpus using a Chinese word segmenter; (2) After extracting linguistic characteristics of three schemas, we generate three feature vectors for three schemas using their positive samples and negative samples;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(3) Three feature models for the three schemas are generated using the SVM classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The steps of the ChNWI testing process are: (1) we segment and POS tagging the test corpus and extract potential strings of three schemas using two suggested algorithms; (2) we extract three feature vectors for three schemas using the extracted linguistics characteristics during the ChNWI training process; (3) we identify new word candidates of three schemas using the three generated SVM models. Finally, we suggest various rules to filter all candidates. In this section, we present three schemas of Chinese new words and define their feature models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our ChNWI appraoch", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Literature (Jiang et al., 2011) shows that, all fourtuple Chinese new words can be classified into 11 compositions, i.e., 53 % new words of NW11, 31% new words of NW21, 5% new words of N-W12 and NW31, and 11% other schemas. Here, NW is the abbreviation of New Word, 1 refers to a single character, 2 refers to a binary word, 3 refers to a ternary word. After investigating the features of these compositions, we classify fourtuple Chinese new words into three schemas, i.e., single-character schema, affix schema and NW22 schema. The three schemas cover nearly above 11 compositions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 31, |
|
"text": "(Jiang et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Various surfaces of Chinese new words", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The new words of single-character schema are composed of up to four consecutive single characters. The single-character schema includes, N-W11, NW111 and NW1111. Some examples of single-character schema are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single-character schema", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "(1) NW11, \u00ac/ng x/n (yi/ng zu/n) (2) NW111, \u00b2/n \u2022/n '/n (jing/n shi/n fang/n) (3) NW1111, \u2021/n \u00d5/d r/v \u00da/vi (fan/n du/d cuc/v tong/vi).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single-character schema", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "There are less linguistic characteristics for new words of single-character schema mainly because of most of all single characters have no combined features with their neighboring ones, thus up to four adjacent characters can be viewed as a new word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single-character schema", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "The second surface type is \"affix schema\". A new word of affix schema is composed by a single character and an existing word. Affix schema can be further classified as prefix schema and suffix schema. Prefix schema includes NW12 (a single character with an existing binary word) and NW13 (a single character with an existing ternary word), e.g., \u2021\u00cf\u00e4(Anti inflation). Suffix schema includes NW21 (an existing binary word with a single character) and NW31 (an existing ternary word with a single character), for example, I\u00e8 \u00dc(Ministry of Land and Resources).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affix schema", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Both prefix schema and suffix schema have strong linguistic characteristics. The first character is easy to combine with a binary word to compose a ternary new word, or with a ternary word to constitute a four tuple new word. These kinds of first characters are viewed as prefix letters, such as, \"(zero),^(soft) and \u2021(anti).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affix schema", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "The last character (or the tail character) of suffix schema is easy to combine a binary word to form a ternary new word, or with a ternary word to form a four tuple new word. We view the kinds of tail characters as suffix letters, for instance, \u00dc(department), \u00c7(rate) and \u00aa(style).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affix schema", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "New words of NW22 schema are mainly composed by two binary words. Some examples are < \u2022\u00ca (Census) and 9'\u00ba (Tropical storm). Unlike single-character schema and affix schema, this kind of new words have less special linguistic characteristics for the reason of two adjacent binary words can compose a new word of NW22. Since there are not significant characteristics of N-W22, it is difficult to identify these new words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NW22 schema", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "We first suggest a base feature model for three schemas, then we propose a special feature model for each schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature models for three schemas", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For new words of three schemas, some linguist characteristics are important, i.e., co-occurrence, mutual information, word frequency and adjunct categories. The base feature model (Base F ) for three schemas is defined as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Base F {F F , F COP , F M I , F AV } (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "here, F F refers to word frequency, F COP is cooccurrence probability or average co-occurrence probability, F M I is mutual information or average mutual information, F AV is adjacent categories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Word frequency is a basic characteristic of new words, especially for NW22 schema. This characteristic is an important aspect of determining whether a string is a new word or not. We view a string S in a corpus is a new word candidate if its frequency is larger than a pre-defined threshold. In this paper, we set the threshold to 2 for the aim of covering mostly new word candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Co-occurrence probability show the tightness degree of two Chinese characters or two words A and B. The higher their co-occurrence probability, the higher the tightness degree of A and B is. The greater the tightness degree is, the more easier A with B to compose a new word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Mutual information indicates the relevant degree of two continuous strings A and B. Mutual information not only reflects the possibility of the combination of two continuous strings to be a word, but also measures the internal relevant degree of a word. We use average mutual information to indicate the coupling degree of continuous characters or words in a string S (Luo and Sun, 2003) . The higher the average mutual information of S is, the higher its coupling degree is. Which means the higher possibility of S is to be a new word (Zhou, 2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 387, |
|
"text": "(Luo and Sun, 2003)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 548, |
|
"text": "(Zhou, 2005)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Adjacent category represents the relevant degree among a word (or a string) with its context. Adjacent category AV (S) can be further divided into left adjacent category (L AV ) and right adjacent category (R AV ). Given a Chinese string S, its adjacent category is defined as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "AV (S) = min{L AV (S), R AV (S)} (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "here, L AV (S) and R AV (S) refer to the numbers of the words in which the string S appearing in the left or in the right of the words respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "In a sentence, a string is viewed as a word if it satisfies that, its cohesive degree is higher and its coupling degree with its context is lower. For a term, its various contexts cause its left adjacent category and its right adjacent category are large numbers. From this consideration, for a Chinese string S, if its left adjacent category value or its right adjacent categories are larger than a predetermined threshold, which means that the string S is loose with its context and it is higher possibility of being a Chinese new word. That is the reason we view the two adjacent categories with lower values to be the adjacent category of the string S in Equation (2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic feature model", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "As a new word of single-character schema is a string of continuous characters in a segmented fragment, for single-character schema, we add independent word probability (IW P ) into the base feature model to get a new feature model, which is called as the feature model of single-character schema (F single ) as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature model for single-character schema", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "F single {F IW P , F F , F COP , F M I , F AV }", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Feature model for single-character schema", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Independent word probability of a string S (S = c 1 , c 2 , ..., c n ) is defined as the joint probability of all characters in the string. We assume that, the higher the independent word probability of a string S is, the higher the probability of S being a new word is. Based on the assumption, we take a string as a new word candidate if its IW P (S) is larger than a pre-defined threshold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature model for single-character schema", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "New words of affix schema have relatively significant linguistic features. That is the probability of the affix characters appearing in the head or the tail of a word is very high. That is to say, the affix characters are easy to compose new words together with existing words or other characters. From this observation, we can compute the head-character word probability IW P (C, f ) and the tail-character word probability IW P (C, l) for a word of affix schema. We further classify the feature model of affix schema into two categories, the prefix feature model (F pref ix ) and the suffix feature model (F suf f ix ), as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature models for affix schema", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "F pref ix {F IW P (f ), F F , F COP , F M I , F AV } (4) F suf f ix {F IW P (l), F F , F COP , F M I , F AV } (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature models for affix schema", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "here, F IW P (f ) and F IW P (l) refer to the headcharacter word probability and the tail-character word probability respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature models for affix schema", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "The third schema type is NW22 schema. The new word of NW22 schema is a combination of two existing words. It is obvious that, word probability, head-character word probability or tail-character word probability do not reflect the unique characteristics of NW22 schema. To NW22 schema, both the degree of combination between two existing words and the context of two words are important features. Therefore, we use the base feature model for NW22 schema only.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature model for NW22 schema", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "4 ChNWI training and testing process", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature model for NW22 schema", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "We first determine positive samples and negative samples for three schemas respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The training process of ChNWI", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For single-character schema, positive samples mainly refer to words up to four characters in the dictionary of a segmenter, i.e., ICTCLAS, and any substrings of these words are not words. For example, < \u00ba(escape), \u00f0 N(McDonald's) and n o(make irresponsible remarks) are words in the dictionary, while any sub-strings of the three words are not registered in the dictionary. Negative samples are the extracted continuous strings of NW11, NW111 and NW1111 in segmented fragments, while these strings are not registered as words in the segmenter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The training process of ChNWI", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The positive samples of affix schema are ternary words or quaternary words in the dictionary of a segmenter, and parts of these words are words also. For example, the first two characters of the word r\u2022 (hukou ben) is a word, and the last three characters of the word \u00d5\u00dc \u00ba(he xibeifeng) is a word also. The negative samples of affix schema are these strings combined with a character and a word of NW12, NW13, NW21 and NW31, while they are not words in the dictionary. For NW22 schema, positive samples are quaternary words in the dictionary, and half of these words are words also. Such as { \u00a4 V \u00b9(historical record) and \u00c7 \u0160 \u00a9 \u00d1(Chinese pinyin), parts of the two words are binary words. Negative samples are these strings combined by two binary words while they are not words in the dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The training process of ChNWI", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Then we use LibSVM (Chang and Lin, 2011) to gain three SVM models for three schemas using positive samples and negative samples. In order to improve the accuracy of the SVM training model, we manually choose some negative samples as positive sample for three schemas respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 40, |
|
"text": "(Chang and Lin, 2011)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The training process of ChNWI", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We suggest three methods to extract new word candidates of three schemas respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "(1) Extract new word candidates of singlecharacter schema As we discussed above, a new word of singlecharacter schema is made up of two or more continuous characters in segmented fragments. That is, given a segment fragment T = Given three strings of single-character schema, A, B and C, if there is A = B +C, and the lengths of B and C are smaller than or equal to 2, A is viewed as the parent string of B and C and both B and C are viewed as two sub-strings of A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "{X 1 X 2 ...X i ...X n }(1 \u2264 i \u2264 n),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We present the process of extracting new word candidates of single-character schema as follows: firstly, we extract the longest new word candidates from the segmented test corpus, and count their frequencies using Algorithm 1; then, for each longest new word candidates, it's all substrings are extracted and their frequencies are counted using Algorithm 2. We use Algorithm 1 to extract all longest new word candidates of single-character schema in a segmented text SST C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Here, a = {w[0]w[1]...w[k]} is a segmented fragment in n SST C. w is a part of a, it is maybe a word, a Chinese character, a number or an English character. N (w) is the frequencies of w in the segmented fragments, length(w) is the length of w, slpuw is the longest new word candidate set of singlecharacter schema, spuw is the new word candidate set of affix schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Algorithm 2 is a sliding window algorithm which is used to extract all sub-strings of each longest new word candidate and their frequencies. The input and output of Algorithm 2are spluw (the longest new word candidate set) and the new word candidate set. The main idea of Algorithm 2 is to traverse each longest new word candidate using a sliding window algorithm to extract all substrings with their lengths are larger than or equal to 2, and to count their frequencies. schema All new word candidates of two kinds of affix schema are collected using Algorithm 1 also. The main steps of extracting new word candidates of affix schema are: firstly, we traverse each segmented fragment, collect all strings of NW21 or NW31 as new word candidates and add them into the new word candidate set of suffix schema; then we collect all strings of NW12 or NW13 as new word candidates, and add them into the new word candidate set of prefix schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "(3) Extract new word candidates of NW22 For new words of NW22 schema, the extraction process is: collect all strings of NW22 schema as new word candidates, and add them into the new word candidate set of NW22 schema also.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting new word candidates of three schemas", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "After extracting all new word candidates of three schemas, we will further eliminate all new word candidates with their frequencies less than 2, and eliminate all redundant new word candidates. For all new word candidates of single-character schema, since we collect all longest new word candidates and their sub-strings as new word candidates, there are redundant candidates in the collection. The main steps of eliminating these redundant strings are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating redundant new word candidates", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "given a parent string C i C i+1 ...C i+j+1 , its two substrings C i+1 C i+2 ...C i+j+1 and C i C i+1 ...C i+j , the differences between the frequency of N (C i C i+1 ...C i+j+1 ) and the frequencies of its substrings, N (C i+1 C i+2 ...C i+j+1 ) and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating redundant new word candidates", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "N (C i C i+1 ...C i+j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating redundant new word candidates", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": ", is marked as a. If a is smaller than a predefined threshold b, then we view the two sub-strings are redundant. We remove the two strings and only keep the parent string. On the contrary, if the frequency N (C i C i+1 ...C i+j ) is larger than the frequency N (C i C i+1 ...C i+j+1 ), or the frequency N (C i+1 C i+2 ...C i+j+1 ) is larger than the frequency N (C i C i+1 ...C i+j+1 ), and the difference between them is larger than b, then we remove the parent string \u00a7and keep two sub-strings. In this paper, we set b = 2 for the minimum length of Chinese word is 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating redundant new word candidates", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "We design various filtering rules for the singlecharacter schema and the affix schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering new word candidates", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "For single-character schema, we use stop words to filter new word candidates. For example, 3(zai), \u00f2(jiang), \u00a1(chen) are often used in texts, while they with other characters or words cannot compose new words. So, for all candidates of single-character schema, if a word starts or ends with these characters, we will eliminate these candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering new word candidates", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "For affix schema, we use a head-character list and a tail-character list for the aim of filtering new word candidates. Some prefix characters, examples including \"B, C, #\" (fu, jin, xin) are often used in prefix schema. During the training process, we have added the top N of characters with their IW P (f ) values are bigger into the head-character list. For suffix schema, some suffix characters, for example, \" \u20ac, 9, \u203a\" (men, re, kong) are used often. During the training process, we also add the top N characters with their IW P (l) values are bigger into the tail-character list. We design the filtering rules of affix schema as: if the first character in a new word candidate of prefix schema is found in the tail-character list, then we ignore the new word candidate. For example, the first character Y(an) of a new word candidate Y8\u00bc(an zhua huo) is in the tail-character list, so we remove the candidates. Similarly, if the tail character in a new word candidate of suffix schema is found in the head-character list, then we ignore the candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering new word candidates", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "For NW22 schema, there are not special rules to filter new word candidates of NW22 schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Filtering new word candidates", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "As we discussed above, new words studied in this paper related to the dictionary of the ICTCLAS segmenter 1 , a popular segmenter developed by the Chinese Academy of Sciences. To test the efficiency of our approach, we design three experiments on three corpora. The first corpus is set of the domestic news titles on Sina.com.cn from June 2010 to July 2012, which contains 0.12 Million news titles. We divide the corpus into two parts, one is the testing corpus and the other is the training corpus. The second one is the MicroBlog corpora of CIPS-SIGHAN CLP 2012 Chinese Segmentation on MicroBlog Bakeoff (CSMB) (Duan et al., 2012) , which contains 5,000 sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 613, |
|
"end": 632, |
|
"text": "(Duan et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental results and analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In each ChNWI training process, we use crossvalidation method to obtain the optimal training parameters. We divide the training corpus averagely into 10 parts, one is used to verify, the others are used for training. The numbers of features are: four features for single-character schema, six features for affix schema and four features for NW22 schema. The training time of every experiment for ChNWI models is not more than 4 minutes and the testing time is not more than 5 second using a laptop with an Intel(R) Core(TM) i3 CPU and 2.92G RAM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental results and analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For evaluation, we adopt the same evaluation method defined in the CSMB bake-off task, precision (P ), recall(R) and F-measure. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental results and analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the first experiment, we investigate the related contributions of each feature model of each schema of ChNWI. The experimental results are shown in Figure 2 . In Figure 2 , #1 and #2 refer to Base F and F single +Filtering rules of Single-character schema, #3 and #4 are Base F and F pref ix +Filtering rules of Prefix schema, #5 and #6 refer to Base F and F suf f ix +Filtering rules of Suffix schema, and #7 refers to Base F of NW22 schema respectively. Test on single-character schema In the feature model F single , independent word probability is a special linguistic characteristic. To show the effectiveness of independent word probability, we first use the base model, Base F , then we use the feature model F single of single-character schema, together with the corresponding filtering rules.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 159, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 173, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments on the first corpus", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In Figure 2 , #1 and #2 are the experimental results of single-character schema. To some extent, our approach can identify new words of single-character schema effectively. Especially we add the feature F IW P and the relevant filtering rules to the base model, the precision rates improves 11.6% and F-value also increase 6.39%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments on the first corpus", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Affix schema can be divided into prefix schema and suffix schema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test on Affix schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The feature model for prefix schema is F pref ix . In which, the first word probability is an important linguistic characteristics. To show the contribution of first word probability, we first use the base model, Base F , then, we use F pref ix with the corresponding filtering rules. The experimental results of prefix schema are shown as #3 and #4 of Figure 2 . Our approach has good effectiveness of identifying new words of prefix schema also. Using F IW P and filtering rules, the correct rate improves 7.27%, while F value improves 4.21%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 362, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test on Affix schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the feature model F suf f ix of suffix schema, the tail word probability is also an important feature. We first use the base model Base F , then we employ F suf f ix and the relevant filtering rule. The experimental results of suffix schema are #5 and %6 of Figure 2 . Similar to prefix schema, our method has better efficiency on identifying new words of suffix schema. After using F IW P (l) and filtering rules, the correct rate improves 7.18% and F-value improves 0.8%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test on Affix schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As we discussed above, there are less linguistic characteristics of NW22 schema, so we use the base model Base F as the feature model of NW22 schema. The experimental result of NW22 schema is #7 of Figure 2 . #7 shows that our method has better effectiveness on identifying new words of NW22 schema. The F-score of NW22 schema is more than 67%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 206, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test on NW22 schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We perform the second experiment to find how ChNWI improves the performance of a Chinese segmenter. We test ChNWI on the MicroBlog Corpora suggested by CIPS-SIGHAN-2012 CSM-B. The corpora includes 294 new words (14%) and 252 rule-based combination of words (12%). Both the two words are unregistered words to a segmenter. The performance of the two test points is, the max correct numbers of the two test points are 65 (22.1%) of new words and near 70 (27.8%) of rule-based combination of words (Duan et al., 2012) . Which shows that the systems submitted may not deal with unregistered words well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 514, |
|
"text": "(Duan et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment on MicroBlog Corpora", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The CIPS-SIGHAN-2012 CSMB provides no training set, we train ChNWI on the training corpus used in the first experiment. In the second experiment, we select the ICTCLAS segmenter and the suggested ChNWI is used as post processing. The experimental results are shown in Figure 3 and Figure 4 . All data of the maximal (Max) and the average (Avg) performance of Figure 3 and Figure 4 are from the report (Duan et al., 2012) . Figure 4 shows the numbers (and percentages) of correct sentences segmented by ICTCLAS's and ICTCLAS's+ChNWI. The number and percentage of correct sentences are improved 485 and 9.7% respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 420, |
|
"text": "(Duan et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 276, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 289, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 367, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 380, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 431, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment on MicroBlog Corpora", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In this paper, we propose the ChNWI approach to identify Chinese new words of three schemas. We first summarize three schemas based on eight surfaces, they are single-character schema (covers N-W11, NW111 and NW1111), affix schema (spans NW21, NW31, NW12 and NW13) and NW22 schema. Next, we represent that four linguistics features, i.e., word frequency, co-occurrence probability, mutual information and adjacent category, play same impacts on the three schemas, while independent word probability is important to singlecharacter schema, head-character word probability and tail-character word probability are key facts to prefix schema and suffix schema respectively. Our experimental results on two corpora show that, new words are categorized into three schemas and employing their unique features not only improve the accuracy score but also improve the recall rate of identification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We also test the ChNWI approach on the domain-related (Mobile Communication) corpus with 80 thousand sentences. All these sentences are collected from Baidubaike and Wikipedia. With the development of new business in the Mobile Communication domain, there are a considerable amount of new words which are not registered in the dictionary of a segmenter. We test our approach in identifying new words of the three schemas contained in the domain-related corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The ChNWI approach gets three accuracy rates, 80%, 68% and 71%, for single-character schema, affix schema and NW22 schema respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future, we further improve the ChNWI approach from the following three aspects: (1) apply automatic feature selection and check the performance; (2) consider the combination of different schemas for other surfaces (i.e., NW211 and N-W112). (3) study additional schemas rather than the three suggested schemas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "ICTCLAS, http://www.ictclas.org/ PACLIC-27", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is sponsored by the grant from the Shanghai Science and Technology Foundation (No. 11511504002).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "F-scores of ICTCLAS's + ChNWI are improved 0", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F-scores of ICTCLAS's + ChNWI are improved 0.6% and near 6 % respectively. 1370(0.274)", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Ave Max ICTCLAS's ICTCLAS's+HNWI", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ave Max ICTCLAS's ICTCLAS's+HNWI", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "of correct sentences References Chih-Chung Chang, and Chih-Jen Lin", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Num", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ACM Transactions on Intelligent Systems and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Num. of correct sentences References Chih-Chung Chang, and Chih-Jen Lin. 2011. Libsvm: A library for support vector machines. ACM Trans- actions on Intelligent Systems and Technology.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A chunking strategy towards unknown word detection in Chinese word segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Lecture Notes in Computer Science", |
|
"volume": "3651", |
|
"issue": "", |
|
"pages": "530--541", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guodong Zhou. 2005. A chunking strategy towards unknown word detection in Chinese word segmenta- tion. Lecture Notes in Computer Science, 3651:530- 541.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Survey of Chinese New Words Identification", |
|
"authors": [ |
|
{ |
|
"first": "Haijun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shumin", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaoyong", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computer Science", |
|
"volume": "37", |
|
"issue": "3", |
|
"pages": "6--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haijun Zhang, Shumin Shi, Chaoyong Zhu, and Heyan Huang. 2010. Survey of Chinese New Words Iden- tification. Computer Science, 37(3):6-11.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The use of SVM for Chinese new word identification", |
|
"authors": [ |
|
{ |
|
"first": "Hongqiao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang-Ning", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaozhou", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "First international joint conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "723--732", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hongqiao Li, Chang-Ning Huang, Jianfeng Gao, and Xiaozhou Fan. 2005. The use of SVM for Chinese new word identification. First international joint conference on Natural Language Processing, 723- 732.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The CIPS-SIGHAN CLP 2012 ChineseWord Segmentation onMicroBlog Corpora Bakeoff. Second CIPS-SIGHAN Joint Conference on Chinese Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Huiming", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huiming Duan, Zhifang Sui, Ye Tian, and Wenjie Li. 2012. The CIPS-SIGHAN CLP 2012 ChineseWord Segmentation onMicroBlog Corpora Bakeoff. Sec- ond CIPS-SIGHAN Joint Conference on Chinese Language Processing, 35-40.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A Study on Automatic Identification for Internet New Words According to word-building Rule", |
|
"authors": [ |
|
{ |
|
"first": "Jiahen", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhua", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Journal of Shanxi University (Natural Science Edition)", |
|
"volume": "25", |
|
"issue": "2", |
|
"pages": "115--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiahen Zheng, and Wenhua Li. 2002. A Study on Au- tomatic Identification for Internet New Words Ac- cording to word-building Rule. Journal of Shanx- i University (Natural Science Edition), 25(2):115- 119.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Segmentation on Chinese Mirco-Blog Data with a Linear-Time Incremental Model. Second CIPS-SIGHAN Joint Conference on Chinese Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Kaixu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changle", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaixu Zhang, Maosong Sun, and Changle Zhou. 2012. Word Segmentation on Chinese Mirco-Blog Data with a Linear-Time Incremental Model. Second CIPS-SIGHAN Joint Conference on Chinese Lan- guage Processing, 41-46.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "CRFs-Based Chinese Word Segmentation for Micro-Blog with Small-Scale Data", |
|
"authors": [ |
|
{ |
|
"first": "Longye", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Derek", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidia", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Chao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junwen", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Second CIPS-SIGHAN Joint Conference on Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Longye Wang, Derek F. Wong, Lidia S. Chao, and Jun- wen Xing. 2012. CRFs-Based Chinese Word Seg- mentation for Micro-Blog with Small-Scale Data. Second CIPS-SIGHAN Joint Conference on Chinese Language Processing, 51-57.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adapting Conventional ChineseWord Segmenter for Segmenting Micro-blog Text", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Second CIPS-SIGHAN Joint Conference on Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen. 2012. Adapting Conventional ChineseWord Segmenter for Segmenting Micro-blog Text. Second CIPS-SIGHAN Joint Conference on Chinese Lan- guage Processing, 63-68.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Two character Chinese word extraction based on hybrid of internal and contextual measure", |
|
"authors": [ |
|
{ |
|
"first": "Shengfen", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Second SIGHAN Workshop on Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shengfen Luo, and Maosong Sun. 2003. Two char- acter Chinese word extraction based on hybrid of internal and contextual measure. Second SIGHAN Workshop on Chinese Language Processing, 24-30.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Automatic Chinese unknown word extraction using small-corpus-based methodmeasure", |
|
"authors": [ |
|
{ |
|
"first": "Tao-Hsing", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chia-Hoang", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "1st IEEE International Conference on Natural Language Processing and Knowledge Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "459--464", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao-Hsing Chang, and Chia-Hoang Lee. 2003. Au- tomatic Chinese unknown word extraction using small-corpus-based methodmeasure. 1st IEEE In- ternational Conference on Natural Language Pro- cessing and Knowledge Engineering, 459-464.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Chinese Unknown Words Extraction Based on Word Level Characteristics. 9th International Conference on Hybrid Intelligent System", |
|
"authors": [ |
|
{ |
|
"first": "Wenbo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaozhong", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yijun", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangde", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "361--366", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenbo Pang, Xiaozhong Fan, Yijun Gu, and Jiangde Yu. 2009. Chinese Unknown Words Extraction Based on Word Level Characteristics. 9th Inter- national Conference on Hybrid Intelligent System, 361-366.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Chinese new word identification: a latent discriminative model with global features", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Degen", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haiyu", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fuji", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Computer Science and Technology", |
|
"volume": "26", |
|
"issue": "1", |
|
"pages": "14--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Sun, Degen Huang, Haiyu Song, and Fuji Ren. 2011. Chinese new word identification: a latent dis- criminative model with global features. Journal of Computer Science and Technology, 26(1):14-24.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Automatic Recognition of Chinese Unknown Word for Single-Character and Affix Models", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanjiao", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhao", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Sixth International Conference on Intelligent Systems and Knowledge Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "435--444", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Jiang, Yanjiao Cao, and Zhao Lu. 2011. Auto- matic Recognition of Chinese Unknown Word for Single-Character and Affix Models. Sixth Interna- tional Conference on Intelligent Systems and Knowl- edge Engineering, 435-444.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Chinese Unknown Word Recognition using improved Conditional Random Fields. 8th International Conference on Intelligent Systems Design and Applications", |
|
"authors": [ |
|
{ |
|
"first": "Yisu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Buzhou", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--367", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yisu Xu, Xuan Wang, Buzhou Tang, and Xiaolong Wang. 2008. Chinese Unknown Word Recognition using improved Conditional Random Fields. 8th In- ternational Conference on Intelligent Systems De- sign and Applications, 363-367.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Chinese New Words Extraction Based on Machine Learning Approach. 2006 International Conference on Machine Learning and Cybernetics", |
|
"authors": [ |
|
{ |
|
"first": "Ziru", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiangjun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuedong", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3380--3384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziru Zhang, Qiangjun Wang, and Xuedong Tian. 2006. Chinese New Words Extraction Based on Machine Learning Approach. 2006 International Conference on Machine Learning and Cybernetics, 3380-3384.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "a i in SST C do 3 get a i = w[0]w[1]...w[k]; 4 for each w[j]w[j + 1] in a i do 5 if the length of w[j] == 1 then 6 if the length of w[j + 1(w[j]w[j + 1]) + +; 10 set temp to null; 11 for each w[j] in a i do 12 if length(w[j]) == 1 then 13 w[j] appended to temp; 14 else 15 If length(temp) > 1 if temp not in slpuw then 16 add temp to slpuw; 17 set temp to null;", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Extract new word candidates of affix Algorithm 2: The Candidate New word Detection Algorithm(CND) Input: slpuw Output: A set subset of substring 1 begin 2 for each c k in slpuw do 3 let s = c k , j = 2, substring is null; 4 for (; j < length(s); j + +) do 5 for (i = 0; i + j \u2212 1 < length(s); i + +) do 6 substring = s.sub(i, i + j);", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Experiments on contributions of various feature models", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"text": "Experimental results of ICTCLAS's with ChNWI on MicroBlog corpus Figure 3 shows that, compared with ICT-CLAS's, F-score of ICTCLAS's + ChNWI is improved near 5%. Compared with Avg and Max, PACLIC-27", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"text": "Numbers (and percentages) of correct sentences segmented by ICTCLAS's with ChNWI in Mi-croBlog corpus", |
|
"num": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |