|
{ |
|
"paper_id": "N13-1012", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:40:53.504238Z" |
|
}, |
|
"title": "Combining multiple information types in Bayesian word segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Doyle", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"addrLine": "San Diego La Jolla", |
|
"postCode": "92093", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "gdoyle@ucsd.edu" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"addrLine": "San Diego La Jolla", |
|
"postCode": "92093", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "rlevy@ucsd.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Humans identify word boundaries in continuous speech by combining multiple cues; existing state-of-the-art models, though, look at a single cue. We extend the generative model of Goldwater et al (2006) to segment using syllable stress as well as phonemic form. Our new model treats identification of word boundaries and prevalent stress patterns in the language as a joint inference task. We show that this model improves segmentation accuracy over purely segmental input representations, and recovers the dominant stress pattern of the data. Additionally, our model retains high performance even without single-word utterances. We also demonstrate a discrepancy in the performance of our model and human infants on an artificial-language task in which stress cues and transition-probability information are pitted against one another. We argue that this discrepancy indicates a bound on rationality in the mechanisms of human segmentation.", |
|
"pdf_parse": { |
|
"paper_id": "N13-1012", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Humans identify word boundaries in continuous speech by combining multiple cues; existing state-of-the-art models, though, look at a single cue. We extend the generative model of Goldwater et al (2006) to segment using syllable stress as well as phonemic form. Our new model treats identification of word boundaries and prevalent stress patterns in the language as a joint inference task. We show that this model improves segmentation accuracy over purely segmental input representations, and recovers the dominant stress pattern of the data. Additionally, our model retains high performance even without single-word utterances. We also demonstrate a discrepancy in the performance of our model and human infants on an artificial-language task in which stress cues and transition-probability information are pitted against one another. We argue that this discrepancy indicates a bound on rationality in the mechanisms of human segmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "For an adult speaker of a language, word segmentation from fluid speech may seem so easy that it barely needed to be learned. However, pauses in speech and word boundaries are not well correlated (Cole & Jakimik, 1980) , word boundaries are marked by a conspiracy of partially-informative cues (Johnson & Jusczyk, 2001) , and different languages mark their boundaries differently (Cutler & Carter, 1987) . This makes the problem of unsupervised word segmentation acquisition, whether by a computational model or an infant, a daunting task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 218, |
|
"text": "(Cole & Jakimik, 1980)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 319, |
|
"text": "(Johnson & Jusczyk, 2001)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 403, |
|
"text": "(Cutler & Carter, 1987)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Effective segmentation relies on the flexible integration of multiple types of segmentation cues, among them statistical regularities in phonemes and prosody, coarticulation, and allophonic variation. Infants begin using multiple segmentation cues within their first year of life (Johnson & Jusczyk, 2001) . Despite this, many state-of-the-art models look at only one type of information: phonemes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 305, |
|
"text": "(Johnson & Jusczyk, 2001)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this study, we expand an existing model to incorporate multiple cues, leading to an improvement in segmentation performance and opening new ways of investigating human segmentation acquisition. On the latter point, we show that rational learners can learn to segment without encountering words in isolation, and that human learners deviate from rationality in certain segmentation tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The prevailing unsupervised word segmentation systems (e.g., Brent, 1999; Goldwater, Griffiths, & Johnson, 2006; Blanchard & Heinz, 2008) use only phonemic information to segment speech. However, human segmenters use additional information types, notably stress information, in their segmentation. We present an overview of these phonemic models here before discussing the prosodic model expansion. A more complete review is available in Goldwater (2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 73, |
|
"text": "Brent, 1999;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 74, |
|
"end": 112, |
|
"text": "Goldwater, Griffiths, & Johnson, 2006;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 137, |
|
"text": "Blanchard & Heinz, 2008)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 454, |
|
"text": "Goldwater (2007)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Goldwater et al model is related to Brent (1999) 's model, both of which use strictly phonemic information to segment. The model assumes that the corpus is generated by a Dirichlet process over word bigrams. 1 We present a basic overview here, based on Sect. 5.5 of Goldwater, 2007 . To generate the word w i given the preceding word w i\u22121 :", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 52, |
|
"text": "Brent (1999)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 213, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 285, |
|
"text": "Goldwater, 2007", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. Decide if bigram b i = w i\u22121 , w i is novel 2. If b i non-novel, draw b i from bigram lexicon 3. If b i novel, decide whether w i is novel a. If w i non-novel, draw w i from word lexicon b. If w i novel, draw w i from word-generating distribution P 0 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The Dirichlet process first decides whether to draw a non-novel (\"nn\") bigram, with probability proportional to the number of times the previous word has appeared in the corpus:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p( w i\u22121 , w i nn|w i\u22121 ) = n w i\u22121 ,\u2022 n w i\u22121 ,\u2022 + \u03b1 1 ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where n x,y is the token count for bigram x, y . If the bigram is non-novel, word w i is drawn in proportion to the number of times it has appeared after w i\u22121 in the corpus:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(w i = x| w i\u22121 , w i nn) = n w i\u22121 ,x n w i\u22121 ,\u2022", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "If the bigram is novel, this could either be due to w i being a novel word or due to w i being an existing word that had not appeared with w i\u22121 before. The probability of w i being a non-novel word x is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(w i = x, w i nn| w i\u22121 , w i novel ) = b \u2022,w i (b \u2022,\u2022 + \u03b1 0 ) ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where b .,. is the count of word bigram types. Finally, if w i is a new word, its phonemic form is generated from a distribution P 0 . In the Goldwater et al model, this distribution is simply the product of the unigram probabilities of the phonemes, P (\u03c3 j ), times the probability of a word boundary, p # , to end the word:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(w i = \u03c3 1 \u2022 \u2022 \u2022 \u03c3 M | w i novel ) = p # (1 \u2212 p # ) M \u22121 P (\u03c3 j )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1 We will only discuss the bigram model here because it is more appropriate from both a cognitive perspective (it posits latent hierarchical structure) and engineering perspective (it segments more accurately) than the unigram model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To segment an observed corpus, the model Gibbs samples over the possible word boundaries (utterance boundaries are assumed to be word boundaries). 2 The exchangability of draws from a Dirichlet process allows for Gibbs sampling of each possible boundary given all the others.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 148, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Goldwater et al (2006)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Phillips and Pearl (2012) make these Bayesian segmentation models more cognitively plausible in two ways. The first is to move from phonemes to syllables as the base representational unit from which words are constructed, as infants learn to categorize syllables before phonemes (Eimas, 1999) . The second is to add memory and processing constraints on the learner. They find that syllable-based segmentation is better than phoneme-based segmentation in the bigram model (though worse in the unigram model), and that, counter-intuitively, the constrained learner outperforms the unconstrained learner. This improvement appears to be driven by better performance in segmenting more common words. In this work, we adopt the syllabified representation but retain the unconstrained rational learner assumption.", |
|
"cite_spans": [ |
|
{ |
|
"start": 279, |
|
"end": 292, |
|
"text": "(Eimas, 1999)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A cognitively-plausible variant", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Some previous models have incorporated multiple cues, specifically the phonemic and stress information that our model will use. Two prominent examples are Christiansen, Allen, and Seidenberg (1998)'s connectionist model and Gambell and Yang (2006) 's algebraic model. The connectionist model places word boundaries where the combination of phonemic and stress information predict likely utterance boundaries, but does not include an explicit sense of \"word\", and performs only modestly on the segmentation task (boundary F-scores of .40-.45). The algebraic model also underperforms the Bayesian model (Phillips & Pearl, 2012) unless it includes the heuristic that there is a word boundary between any two stressed syllables. Our model presents a more general and completely unsupervised approach to segmentation with multiple cuetypes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 247, |
|
"text": "Gambell and Yang (2006)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 625, |
|
"text": "(Phillips & Pearl, 2012)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other multiple-cue models", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In general, joint inference is becoming more common in language acquisition problems and has been shown to improve performance over single-feature inference. Examples include joint inference of a lexicon and phonetic categories (Feldman, Griffiths, & Morgan, 2009) , joint inference of syntactic word order and word reference (Maurits, Perfors, & Navarro, 2009) , and joint inference of word meanings and speaker intentions in child-directed speech (Frank, Goodman, & Tenenbaum, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 264, |
|
"text": "(Feldman, Griffiths, & Morgan, 2009)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 361, |
|
"text": "(Maurits, Perfors, & Navarro, 2009)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 484, |
|
"text": "(Frank, Goodman, & Tenenbaum, 2009)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other multiple-cue models", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Our model changes P 0 from a single-cue distribution, generating only phonemes, to a multiple-cue distribution that generates a stress form as well. This can improve segmentation performance and allows the investigation of rational segmentation behavior in a multiple-cue world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In the original model, P 0 (w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "i = \u03c3 1 \u2022 \u2022 \u2022 \u03c3 M ) \u221d j P (\u03c3 j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ", where P (\u03c3 j ) is the frequency of the phoneme \u03c3 j . In the multiple-cue model, we first generate a phonemic form w i , then assign a stress pattern s i to it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "P 0 (w i , s i ) = P W (w i )P S (s i |M ) = p # (1 \u2212 p # ) M \u22121 M j P (\u03c3 j )P S (s i |M ) (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The phonemic form w i has the same product-ofsegments probability as the Goldwater et al model, but \u03c3 j are now syllables instead of phonemes. We discuss the rationale behind this change in the next section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The phonemic form is generated first, and the stress form is then drawn as a multinomial over all possible stress patterns with the same number of syllables as w i . The stress distribution P S is a multinomial distribution over word-length stress templates. P S can be learned by the model based on a Dirichlet prior, but for simplicity in the present implementation, we estimate P S as the plus-one-smoothed frequency of the stress patterns in the current segmentation. There are two stress levels (stressed or unstressed), and 2 M possible stress templates for a word of length M . 3 3 We do not assume that each word has one and only one Unlike phonemic forms, stress patterns are drawn as a whole word. This allows the model to capture a wide range of stress biases, although it prevents the model from generalizing biases across different word lengths. A potential future change to P S that would allow for better generalization is discussed in Section 6.", |
|
"cite_spans": [ |
|
{ |
|
"start": 587, |
|
"end": 588, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We change from segmenting on phonemes to segmenting on syllables in order to more easily implement stress information, which is a supersegmental feature most appropriately located on syllables. Syllabified data has been used in some previous models of segmentation, especially those using stress information or syllable-level transition probabilities (Christiansen et al., 1998; Swingley, 2005; Gambell & Yang, 2006; Phillips & Pearl, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 378, |
|
"text": "(Christiansen et al., 1998;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 394, |
|
"text": "Swingley, 2005;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 416, |
|
"text": "Gambell & Yang, 2006;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 440, |
|
"text": "Phillips & Pearl, 2012)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On syllabification and stress", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For studying human word segmentation, Phillips and Pearl argue syllabified speech may be a more cognitively plausible testing ground. 3-monthold infants appear to have categorical representations of syllables (Eimas, 1999) , three months before word segmentation appears (Borfeld, Morgan, Golinkoff, & Rathbun, 2005) , and seven months before phoneme categorization (Werker & Tees, 1984) . In addition, syllabification is assumed in much work on human word segmentation, especially in artificial-language studies (e.g., Thiessen & Saffran, 2003) , which calculate statistical cues at the syllable level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 222, |
|
"text": "(Eimas, 1999)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 316, |
|
"text": "(Borfeld, Morgan, Golinkoff, & Rathbun, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 387, |
|
"text": "(Werker & Tees, 1984)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 545, |
|
"text": "Thiessen & Saffran, 2003)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On syllabification and stress", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The assumption that syllable boundaries are known affects the baseline performance of the model, as it reduces the number of possible word boundary locations (since a word boundary is necessarily a syllable boundary). As such performance over syllabified data cannot be directly compared to performance on non-syllabified data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On syllabification and stress", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "It may seem that syllabification is so closely tied to word segmentation that including the former in a model of the latter leaves little to the model. However, the determinants of syllable boundaries are not the same as those for word boundaries. The probstressed syllable, which would reduce the number of possible stress templates to M , for two reasons. First, in the current corpus, some words have citation forms with multiple stressed syllables. Second, in actual speech this assumption will not hold (e.g., many function words go unstressed). lem of assigning syllable boundaries is a question of deciding where a boundary goes between two syllable nuclei, with the assumption that there must be a boundary there. The problem of assigning word boundaries is a question of deciding whether there is a boundary between two syllable nuclei, and if so, where it is. Knowing the syllable boundaries reduces the set of possible word boundaries, but does not directly address the question of how likely a boundary is. The difference in these tasks is supported by the three-month gap between syllable and word identification in infants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On syllabification and stress", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use the Korman (1984) training corpus, as compiled by Christiansen et al. (1998) , in this study. This is a 24493-word corpus of English spoken by adults to infants aged 6-16 weeks. 4 Phonemes, stresses, and syllable boundaries are the same as those used by Christiansen et al, which were based on citation forms in the MRC Psycholinguistic Database. All monosyllabic words were coded as stressed. Only utterances for which all words had citation forms were included.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 24, |
|
"text": "Korman (1984)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 57, |
|
"end": 83, |
|
"text": "Christiansen et al. (1998)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This corpus is largely monosyllabic (87.3% of all word tokens), and heavily biased toward initial stress (89.2% of all multisyllable word tokens). No word is longer than three syllables, and most words have only one stressed syllable. A breakdown of the corpus by stress pattern is given in Table 1 . This monosyllabic bias is an inherent property of English, not idiosyncratic to this corpus. The Bernstein-Ratner child-directed corpus is also over 80% monosyllabic. We expect that the results of segmentation on child-directed data will extend to adult speech, as the adult-directed corpus used by Gambell and Yang (2006) has an average word length of 1.17 syllables. a task where the rational model deviates from human performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 600, |
|
"end": 623, |
|
"text": "Gambell and Yang (2006)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 298, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The model has four free parameters: \u03b1 0 and \u03b1 1 , which affect the likelihood of new words and bigrams, respectively, and p # and p $ , which affect the expected likelihood of word and utterance boundaries. Following Goldwater, Griffiths, and Johnson (2009) , we set \u03b1 0 = 20, \u03b1 1 = 100, p # = 0.8 and p $ = 0.5 in all experiments. 5 In all cases, the model performed five independent runs of 20000 iterations of Gibbs sampling the boundaries for the full corpus. Simulated annealing was performed during the burn-in period to improve convergence. All performance measures are reported as the mean of these five runs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 257, |
|
"text": "Goldwater, Griffiths, and Johnson (2009)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 333, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Performance is measured as word, boundary, and lexicon precision, recall, and F-scores. A word is matched iff both of its true boundaries are marked as boundaries and no internal boundaries are marked as word boundaries. Boundary counts omit utterance boundaries, which are assumed to be word boundaries. Lexical counts are based on word type counts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We begin by showing that including a second cue type improves segmentation performance. We compare segmentation on a corpus with the attested stress patterns to that of a corpus without stress. With stress information included in the model, word/boundary/lexicon F-scores are .68/.82/.80. Without stress, performance drops to .67/.82/.77. 6 Full results are given in Table 2 . Stress information primarily improves lexicon performance, along with a small improvement in token segmentation. Accounting for stress reduces both false positives and negatives in the lexicon; the fact that the lexical improvement is greater than that for words or boundaries suggests that much of the improvement rests is on rare words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 340, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 374, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Stress improves performance", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "These effects are small but significant. For word token performance, we performed a paired t-test on utterance token F-scores between the with-and without-stress models. This difference was significant (t = 11.28, df = 8125, p < .001). We performed a similar utterance-by-utterance test on boundaries; again a small singificant improvement was found (t = 8.92, df = 6084, p < .001). To assess lexicon performance, we calculated for each word type in the gold-standard lexicon the proportion of the five trials in which that word appeared in the learned lexicon for the two models. We then examined the words where the proportions differed between the models. 89 true words appeared more often in the with-stress lexicons; 40 appeared more often in the without-stress lexicons. (683 appeared equally often in both.) By a sign test, this is significant at p < .001. We also tested lexicon performance with a binomial test on the two models' lexicon accuracy; this result was marginal (p = .06).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stress improves performance", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The explicit tracking of stress information also improves the model's acquisition of the stress bias of the language. Acquisition of the stress bias is potentially useful for generalization; stress patterns can be used for an initial segmentation if few or none of the words are familiar. In practice, we see children use their stress biases to segment new words from English speech (Jusczyk, Houston, & Newsome, 1999) as well as artificial languages (Thiessen & Saffran, 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 418, |
|
"text": "(Jusczyk, Houston, & Newsome, 1999)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 477, |
|
"text": "(Thiessen & Saffran, 2003)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stress improves performance", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We assess the learned stress bias by dividing up the corpus as the model has segmented it, and count the number of tokens with SW versus WS stress patterns. 7 With stress representation, the learned stress bias is 6.77:1, and without stress representation, the stress bias is lower, at 6.33:1. Although these are both underestimates of the corpus's true stress bias (7.86:1), the stressed model is stronger and a better estimate of the true value.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 158, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stress improves performance", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The model's performance can be compared to various baselines, but perhaps the strongest is one with every syllable boundary being a word boundary. This baseline represents a shift from boundary precision being at ceiling (as in the model) to boundary recall being at ceiling. In fact, due to the preponderance of monosyllabic words in English childdirected speech, this baseline outperforms the model on word and boundary F-scores (.68 and .82 in the model, .82 and .91 in the baseline). However, the baseline's lexicon is much worse than the model's (F=.80 in the with-stress model, F=.64 in the baseline), and the baseline fails to learn anything about the language's stress biases. In addition, the baseline oversegments, whereas both the model and infant segmenters undersegment (Peters, 1983) . This raises an important question about what the model should seek to optimize: though the baseline is more accurate by token, no structure is learned; type performance is more important if we want to learn the underlying structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 783, |
|
"end": 797, |
|
"text": "(Peters, 1983)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stress improves performance", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We next use this model to test the necessity of isolated words in rational word segmentation. It is not immediately obvious how human learners begin to segment words from fluid speech. Stress biases and other phonological cues are dominant in all but the earliest of infant word segmentation (Johnson & Jusczyk, 2001 ). This raises a chicken-and-egg problem; if the cues infants favor to segment words, such as stress biases, are dependent on the words of the language, how do they learn enough words to determine the cues' biases?", |
|
"cite_spans": [ |
|
{ |
|
"start": 292, |
|
"end": 316, |
|
"text": "(Johnson & Jusczyk, 2001", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are isolated words necessary?", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "One existing proposal is that human learners develop their stress biases based on words frequently heard in isolation (Jusczyk et al., 1999) . In English, these include names and common diminutives (e.g., mommy, kitty) that generally have initial stress. These single-word utterances could offer the segmenter an initial guess of the stress bias, by supposing that short utterances are single words and recording their stress patterns. The most common stress patterns in short utterances could then be used as an initial guess at the stress bias to bootstrap other words and thereby improve the learned stress bias.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 140, |
|
"text": "(Jusczyk et al., 1999)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are isolated words necessary?", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We test the rational learner's need for such explicit bootstrapping by learning to segment a corpus with all single-word utterances removed. The corpus is produced by excising all single-word utterances from the Korman corpus. This results in a 22081word corpus, 10% fewer tokens than in the original. However, it does not substantially change the lexicon; the number of distinct word types only drops from 811 to 806.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are isolated words necessary?", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We compare performance only on ambiguous boundaries and lexicon, as these are comparable between the corpora, and find that the model performs almost equally well. Without single-word utterances, boundary and lexical F-scores are .81 and .80, compared to .82 and .80 with single-word utterances. This shows that rational learners are able to segment even without the possibility of bootstrapping stress patterns from single-word utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are isolated words necessary?", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Lastly, we use this model to examine rational performance in a multiple-cue segmentation task. We show that humans' segmentation does not adhere to these predictions, suggesting a bound on human rationality in word segmentation. We consider an artificial language study by Thiessen and Saffran (2003) . In this study, infants are exposed to an artificial language consisting of four bisyllabic word types uttered repeatedly without pauses. Each syllable appears in only one word type, so within-word transition probabilities are always 1, while across-word transition probabilities are less than 0.5. Segmentation strategies that hy-Against bias, with TP AB CD CD AB WS WS WS WS With bias, against TP A BC DC DA B W SW SW SW S Table 3 : Examples of segmenting an artificial language according to transition probabilities (top) or stress bias (bottom), when the true words have weak-strong stress. Vertical lines represent word boundaries. The top segmentation produces a smaller lexicon, but the bottom segmentation produces primarily words with the preferred stress pattern.", |
|
"cite_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 300, |
|
"text": "Thiessen and Saffran (2003)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 727, |
|
"end": 734, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "pothesize word boundaries at low transition probabilities or that seek to minimize the lexicon size will segment out the four word types as expected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Segmentation in the experiment is complicated by the presence of stress in the artificial language. Depending on the condition, the words are either all strong-weak or all weak-strong. In the first condition, segmenting according to transition probabilities, lexicon size, or English stress bias favors the same segmentation. In the second condition, though, segmenting by the English stress bias to yield a lexicon of strong-weak words requires boundaries in the middle of the words. The segmenter must decide whether transition probabilities or preferred stress patterns are more important in segmentation. This situation is illustrated in Table 3 , with a corpus consisting of two word types, AB and CD, each with weak-strong stress.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 642, |
|
"end": 649, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Thiessen and Saffran found that seven-monthold English-learning infants consistently segmented according to the transition probabilities, regardless of stress. However, nine-month-olds segmented according to the English stress bias, even if this meant going against the transition probabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Intuitively, this could be rational behavior according to our model. A child's increasing age means more exposure to data, potentially leading the child to develop more confidence in the stress bias. As confidence in the stress bias increases, the cost of segmenting against it increases as well. A sufficiently strong stress preference could lead the segmenter to accept a large lexicon, all of whose words have the preferred stress pattern, over a small lexi-con, all of whose words have the dispreferred stress pattern.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "To judge by the Korman corpus, English has a stress bias of approximately 7:1 in favor of SW bisyllabic stress over WS. 8 If human segmentation behavior follows the rational model, the model should predict segmentation to favor strong-weak words over the transition probabilities when the stress bias is approximately this strong.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We test this rationality hypothesis with a smaller version of the Thiessen and Saffran artificial language, consisting of 48 tokens. 9 In one version, all tokens have the preferred SW pattern, and in the other all tokens have the dispreferred WS pattern. We then adjust the P S distribution such that P S (SW |M = 2) = b * P S (W S|M = 2), where b is the bias ratio. We run the model otherwise the same as in the previous experiments, except with 10 runs instead of 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Contrary to this hypothesis, the model's segmentation with b = 7 was the same whether the true words were strong-weak or weak-strong. In all ten runs, transition probabilities dictated the segmentation. To switch to stress-based segmentation, the bias must be orders of magnitude greater than the English bias. Figure 1 shows the proportions of runs in the weak-strong condition that show segmentation according to the stress bias, as the bias increases by factors of 10. When b = 10000, three of the ten runs segmented according to the stress bias; below that, the stress bias did not affect the rational model's segmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 319, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Why is this? In the Bayesian model, the stress bias of a language affects only the P S (s i |M ) term in the P 0 distribution, so non-novel words are not penalized for their stress pattern. The model pays only once to create a word; once the word is generated, no matter how a priori implausible the word was, it may be cheaply drawn again as a non-novel word. This effect can be illustrated with a brief calculation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Consider a corpus built from four bisyllabic word types (AB, CD, EF, GH), each appearing N times. If 10 1 10 2 10 3 10 4 10 5 10 6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Figure 1: Percentage of runs segmented with the stress bias, against transition probabilities, as bias varies. At English-level biases, the rational model still overrules the stress bias when segmenting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "the corpus is segmented against the transition probabilities, the resulting lexicon will have 16 bisyllabic word types (BA, BC, BE, BG, DA, etc.), each occurring approximately N 4 times. The probability of the against-bias corpus (C W S ) is proportional to the probability of generating the four word types, and then drawing them non-novelly from the lexicon. 10 (To simplify the calculations, we use the unigram version of the Goldwater et al model.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(C W S ) \u221d P 4 W P S (W S) 4 (N !) 4 1 4N !", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The first two terms are the probability of generating the four word types (Eqn. 5); 11 the second two terms are the Dirichlet process draws from the existing lexicon N times each (Eqn. 2). By comparison, the probability of the with-bias corpus C SW depends on generating the 16 word types, and drawing each non-novelly N 4 times.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(C SW ) \u221d P 16 W P S (SW ) 16 N 4 ! 16 1 4N !", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Given an SW bias b and a uniform distribution over syllables (so P W = 1 64 ), we find:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(C W S ) p(C SW ) = 64 12 (b + 1) 12 b 16 (N !) 4 ( N 4 !) 16", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "This equation shows that the rational model is heavily biased toward the segmentation that fits the transition probabilities. Increasing the stress bias b or decreasing the number of observed word tokens makes the rational model more likely to segment with the stress bias (against transition probabilities), but as we see in the experimental results, the stress bias must be very strong to overcome the efficient lexicon that the transition probability segmentation provides.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Since humans do not show this same inherent bias (or quickly lose it as they acquire the stress bias), we can ask how humans deviate from rationality. One possibility is that humans simply do not segment in this Bayesian manner. However, previous work (Frank, Goldwater, Griffiths, & Tenenbaum, 2010) has shown that human word segmentation shows similar behavior to a resource-limited Bayesian model. Equation 8 suggests that human segmentation could deviate from rationality by having an effectively stronger bias than English would suggest (reducing the first fraction) 12 or, as with Phillips and Pearl's constrained learners, by having effectively less input than the model assumes (reducing the second fraction).", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 300, |
|
"text": "(Frank, Goldwater, Griffiths, & Tenenbaum, 2010)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bounded rationality in human segmentation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Introducing stress into the Bayesian segmentation model suggests a few additional expansions. One possibility is to add other cues into the generative model via P 0 . Any cue that is based on the word itself can be added in this way, with little change to the general model structure. Phonotactics can be added using an n-gram distribution for P 0 (Blanchard & Heinz, 2008) . Coarticulation between adjacent phonemes is also used in human segmentation (Johnson & Jusczyk, 2001) , so the P 0 distribution could predict higher within-word coarticulation. Integrating additional cues used by human segmenters extends the investigation of the bounds on rationality in human segmentation and in balancing multiple conflicting cues. 12 A potential source of an inflated bias is infants' preference for strong-weak patterns. Jusczyk, Cutler, and Redanz (1993) found English-hearing infants listened longer to strong-weak patterns than weak-strong. This could lead to overestimation of the stress bias by making possible strong-weak segmentations more prominent in the segmenter's mind.", |
|
"cite_spans": [ |
|
{ |
|
"start": 348, |
|
"end": 373, |
|
"text": "(Blanchard & Heinz, 2008)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 477, |
|
"text": "(Johnson & Jusczyk, 2001)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 729, |
|
"text": "12", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 818, |
|
"end": 852, |
|
"text": "Jusczyk, Cutler, and Redanz (1993)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A more complex view of the stress system of a language may also be useful. One possibility is to place a Dirichlet prior over the stress templates and allow P S to be learned as a latent variable in the model. Another possibility is to treat the stress templates more generally; in the present implementation, knowledge of the preferred stress patterns for word of one length tells the segmenter nothing about preferred stress patterns in another length. Crosslinguistically common stress rules (e.g., those that place stress a certain number of syllables from the left or right edge of a word) can be coded into P S to improve generalization. Each rule dictates a specific stress pattern for each word length. When a word is generated in the Dirichlet process, the generative model would decide whether to assign stress according to one of these rules or to assign lexical stress from a default multinomial distribution. (This \"default\" distribution would handle idiosyncratic stress assignments, as one might see with names or morphologically complex words, like Spanish reflexive verbs.) A sparse prior over these rules, asymmetrically weighted against the default category, will encourage the model to explain as much of the observed stress patterns as possible with a few dominant rules, improving the phonological structure that the segmenter learns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Improving the realism of the data is also important. The corpora used in much of segmentation research are idealized representations of the true data, and the dictionary-based phoneme and stress patterns used in this study are no exception. This ideal setting may paint a skewed picture of the segmentation problem, by providing a more consistent and learnable data source than humans actually receive. Elsner, Goldwater, and Eisenstein (2012) 's model unifying lexical and phonetic acquisition takes a significant step in showing that a rational segmenter can handle noisy input by recognizing phonetic variants of a base form. In terms of stress representations, dictionary-based stress has been standard in previous work (Christiansen et al., 1998; Gambell & Yang, 2006; Rytting, Brew, & Fosler-Lussier, 2010) , but it is important to confirm such results against a (currently nonexistent) corpus with stresses based on the actual utterances. Effective use of stress in a less idealized setting may require a more complex representation of stress in the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 443, |
|
"text": "Elsner, Goldwater, and Eisenstein (2012)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 724, |
|
"end": 751, |
|
"text": "(Christiansen et al., 1998;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 773, |
|
"text": "Gambell & Yang, 2006;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 812, |
|
"text": "Rytting, Brew, & Fosler-Lussier, 2010)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Effective word segmentation combines multiple factors to make predictions about word boundaries. We extended an existing Bayesian segmentation model to account for two factors, phonemes and stress, when segmenting. This improves segmentation performance and opens up new possibilities for comparing rational segmentation and human segmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The model assumes that utterance boundaries are generated just like other words, and includes an adjustable parameter p $ to account for their frequency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ExperimentsWe test the model on three problems. First, we show that the addition of stress information improves segmentation performance compared to a stress-less model. Next, we apply the model to a question in human segmentation acquisition. Finally, we look at4 Approximately 150 word tokens from the original corpus were omitted in our version of the corpus due to a disparity between recorded number of syllables and number of stresses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Performance was similar for a range of settings between 1 and 100 for \u03b10 and between 10 and 200 for \u03b11.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Recall that due to the syllabified data, these results are not directly comparable to unsyllabified results in previous work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note this defines a stress bias for the stressless model as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The specific bias varies from corpus to corpus, but this appears to be a representative value.9 The 48 tokens come from four word types, with two types appearing 16 times and the other two appearing 8 times, mimicking the relative frequencies of Thiessen and Saffran's languages. Their test language had 270 tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is also possible to generate this corpus by re-drawing the words novelly, but this is much less likely than non-novel draws.11 Because all syllables have equal unigram probabilities, the probability of all words' phonemic forms are equal, and will be written as PW .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was partially supported by an Alfred P. Sloan Fellowship to RL and by NSF award 0830535. We also appreciate the feedback of the reviewers and the members of the UCSD Computational Psycholinguistics Lab.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Improving word segmentation by simultaneously learning phonotactics", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Blanchard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Heinz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blanchard, D., & Heinz, J. (2008). Improving word segmentation by simultaneously learn- ing phonotactics. In Proceedings of CoNLL (pp. 65-72).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Mommy and me: familiar names help launch babies into speech-stream segmentation", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Borfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Morgan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Golinkoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Rathbun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Psychological Science", |
|
"volume": "16", |
|
"issue": "4", |
|
"pages": "298--304", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Borfeld, H., Morgan, J., Golinkoff, R., & Rath- bun, K. (2005). Mommy and me: familiar names help launch babies into speech-stream segmentation. Psychological Science, 16(4), 298-304.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An efficient, probabilistically sound algorithm for segmentation and word discovery", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Brent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Machine Learning", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "71--105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brent, M. R. (1999). An efficient, probabilistically sound algorithm for segmentation and word discovery. Machine Learning, 34, 71-105.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning to segment speech using multiple cues: A connectionist model. Language and Cognitive Processes", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Christiansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Seidenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "221--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christiansen, M. H., Allen, J., & Seidenberg, M. S. (1998). Learning to segment speech using multiple cues: A connectionist model. Lan- guage and Cognitive Processes, 13, 221-268.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A model of speech perception. In Perception and production of fluent speech", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Cole", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jakimik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "136--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cole, R., & Jakimik, J. (1980). A model of speech perception. In Perception and production of fluent speech (pp. 136-163). Hillsdale, NJ: Erlbaum.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The predominance of strong initial syllables in the English vocabulary", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Cutler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Carter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1987, |
|
"venue": "Comp. Speech Lang", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "133--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cutler, A., & Carter, D. (1987). The predominance of strong initial syllables in the English vocab- ulary. Comp. Speech Lang., 2, 133-142.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Segmental and syllabic representations in the perception of speech by young infants", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Eimas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of the Acoustic Society of America", |
|
"volume": "105", |
|
"issue": "", |
|
"pages": "1901--1911", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eimas, P. (1999). Segmental and syllabic representa- tions in the perception of speech by young in- fants. Journal of the Acoustic Society of Amer- ica, 105, 1901-1911.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bootstrapping a unified model of lexical and phonetic acquisition", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Elsner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th annual meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elsner, M., Goldwater, S., & Eisenstein, J. (2012). Bootstrapping a unified model of lexical and phonetic acquisition. In Proceedings of the 50th annual meeting of the ACL.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Learning phonetic categories by learning a lexicon", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Morgan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 31st annual conference on cognitive science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Feldman, N., Griffiths, T., & Morgan, J. (2009). Learning phonetic categories by learning a lexicon. In Proceedings of the 31st annual conference on cognitive science.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Modeling human performance in statistical word segmentation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frank, M., Goldwater, S., Griffiths, T., & Tenen- baum, J. (2010). Modeling human perfor- mance in statistical word segmentation. Cog- nition.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Using speakers' referential intentions to model early cross-situational word learning", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Psychological Science", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "579--585", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frank, M., Goodman, N., & Tenenbaum, J. (2009). Using speakers' referential intentions to model early cross-situational word learning. Psychological Science, 20, 579-585.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Word segmentation: Quick but not dirty", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Gambell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gambell, T., & Yang, C. (2006). Word segmen- tation: Quick but not dirty. (Unpublished manuscript)", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Nonparametric Bayesian models of lexical acquisition. Unpublished doctoral dissertation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goldwater, S. (2007). Nonparametric Bayesian models of lexical acquisition. Unpublished doctoral dissertation, Brown Univ.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Contextual dependencies in unsupervised word segmentation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of Coling/ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goldwater, S., Griffiths, T., & Johnson, M. (2006). Contextual dependencies in unsupervised word segmentation. In Proceedings of Col- ing/ACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A Bayesian framework for word segmentation: Exploring the effects of context", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Cognition", |
|
"volume": "112", |
|
"issue": "", |
|
"pages": "21--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goldwater, S., Griffiths, T. L., & Johnson, M. (2009). A Bayesian framework for word seg- mentation: Exploring the effects of context. Cognition, 112, 21-54.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Word segmentation by 8-month-olds: When speech cues count more than statistics", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jusczyk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "J. of Memory and Language", |
|
"volume": "44", |
|
"issue": "", |
|
"pages": "548--567", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johnson, E., & Jusczyk, P. (2001). Word segmen- tation by 8-month-olds: When speech cues count more than statistics. J. of Memory and Language, 44, 548-567.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Preference for predominant stress patterns of English words", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jusczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Cutler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Redanz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Child Development", |
|
"volume": "64", |
|
"issue": "", |
|
"pages": "675--687", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jusczyk, P., Cutler, A., & Redanz, N. (1993). Pref- erence for predominant stress patterns of En- glish words. Child Development, 64, 675- 687.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The beginnings of word segmentation in English-learning infants", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jusczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Houston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Newsome", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Cognitive Psychology", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "159--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jusczyk, P., Houston, D., & Newsome, M. (1999). The beginnings of word segmentation in English-learning infants. Cognitive Psychol- ogy, 39, 159-207.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Adaptive aspects of maternal vocalizations in differing contexts at ten weeks. First language", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Korman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "44--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Korman, M. (1984). Adaptive aspects of mater- nal vocalizations in differing contexts at ten weeks. First language, 5, 44-45.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Joint acquisition of word order and word reference", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Maurits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Perfors", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Navarro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of 31st annual conference of the Cognitive Science Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maurits, L., Perfors, A., & Navarro, D. (2009). Joint acquisition of word order and word reference. In Proceedings of 31st annual conference of the Cognitive Science Society.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The units of language acquisition: Monographs in applied psycholinguistics", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peters, A. (1983). The units of language acqui- sition: Monographs in applied psycholinguis- tics. Cambridge Univ. Press.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "less is more\" in Bayesian word segmentation: When cognitively plausible learners outperform the ideal", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Pearl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 34th annual conference of the cognitive science society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phillips, L., & Pearl, L. (2012). \"less is more\" in Bayesian word segmentation: When cogni- tively plausible learners outperform the ideal. In Proceedings of the 34th annual conference of the cognitive science society.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Segmenting words from natural speech: subsegmental variation in segmental cues", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Rytting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Fosler-Lussier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Child Language", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "513--543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rytting, C. A., Brew, C., & Fosler-Lussier, E. (2010). Segmenting words from natural speech: subsegmental variation in segmental cues. Journal of Child Language, 37, 513- 543.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Statistical clustering and the contents of the infant vocabulary", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Swingley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Cognitive Psychology", |
|
"volume": "50", |
|
"issue": "", |
|
"pages": "86--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Swingley, D. (2005). Statistical clustering and the contents of the infant vocabulary. Cognitive Psychology, 50, 86-132.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "When cues collide: Use of stress and statistical cues to word boundaries by 7-to 9-month-old infants", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Thiessen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Saffran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Developmental Psychology", |
|
"volume": "39", |
|
"issue": "4", |
|
"pages": "706--716", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thiessen, E. D., & Saffran, J. R. (2003). When cues collide: Use of stress and statistical cues to word boundaries by 7-to 9-month-old infants. Developmental Psychology, 39(4), 706-716.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Cross-language speech perception: Evidence for perceptual reorganization during the first year of life", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Werker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Tees", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "fant Behavior and Development", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "49--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Werker, J., & Tees, R. (1984). Cross-language speech perception: Evidence for perceptual reorganization during the first year of life. In- fant Behavior and Development, 7, 49-63.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Corpus stress patterns by types and tokens, showing an initial-stress bias in all lengths." |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Precision, recall, and F-score over corpora with and without stress information available. Stress information especially improves lexical performance." |
|
} |
|
} |
|
} |
|
} |