|
{ |
|
"paper_id": "N15-1018", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:35:45.359018Z" |
|
}, |
|
"title": "TopicCheck: Interactive Alignment for Assessing Topic Model Stability", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Roberts", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "meroberts@ucsd.edu" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stewart", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "bstewart@fas.harvard.edu" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "rjweiss@stanford.edu" |
|
}, |
|
{ |
|
"first": "Dustin", |
|
"middle": [], |
|
"last": "Tingley", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "dtingley@gov.harvard.edu" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Grimmer", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "jgrimmer@stanford.edu" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heer", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "jheer@uw.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Content analysis, a widely-applied social science research method, is increasingly being supplemented by topic modeling. However, while the discourse on content analysis centers heavily on reproducibility, computer scientists often focus more on scalability and less on coding reliability, leading to growing skepticism on the usefulness of topic models for automated content analysis. In response, we introduce TopicCheck, an interactive tool for assessing topic model stability. Our contributions are threefold. First, from established guidelines on reproducible content analysis, we distill a set of design requirements on how to computationally assess the stability of an automated coding process. Second, we devise an interactive alignment algorithm for matching latent topics from multiple models, and enable sensitivity evaluation across a large number of models. Finally, we demonstrate that our tool enables social scientists to gain novel insights into three active research questions.", |
|
"pdf_parse": { |
|
"paper_id": "N15-1018", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Content analysis, a widely-applied social science research method, is increasingly being supplemented by topic modeling. However, while the discourse on content analysis centers heavily on reproducibility, computer scientists often focus more on scalability and less on coding reliability, leading to growing skepticism on the usefulness of topic models for automated content analysis. In response, we introduce TopicCheck, an interactive tool for assessing topic model stability. Our contributions are threefold. First, from established guidelines on reproducible content analysis, we distill a set of design requirements on how to computationally assess the stability of an automated coding process. Second, we devise an interactive alignment algorithm for matching latent topics from multiple models, and enable sensitivity evaluation across a large number of models. Finally, we demonstrate that our tool enables social scientists to gain novel insights into three active research questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Content analysis -the examination and systematic categorization of written texts (Berelson, 1952) -is a fundamental and widely-applied research method in the social sciences and humanities (Krippendorff, 2004a) , found in one third of all articles published in major communication journals (Wimmer and Dominick, 2010) . Initial reading and coding, two labor-intensive steps in the analysis process, are increasingly replaced by computational approaches such as statistical topic modeling (Grimmer, 2013; McFarland et al., 2013; Roberts et al., 2014a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 97, |
|
"text": "(Berelson, 1952)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 210, |
|
"text": "(Krippendorff, 2004a)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 317, |
|
"text": "(Wimmer and Dominick, 2010)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 503, |
|
"text": "(Grimmer, 2013;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 527, |
|
"text": "McFarland et al., 2013;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 550, |
|
"text": "Roberts et al., 2014a)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, while the discourse on content analysis overwhelmingly centers around the reproducibility and generalizability of a coding scheme (Krippendorff, 2004b; Lombard et al., 2002) , computer scientists tend to focus more on increasing the scale of analysis and less on establishing coding reliability. Machine-generated latent topics are often taken on faith to be a truthful and consistent representation of the underlying corpus, but in practice exhibit significant variations among models or modeling runs. These unquantified uncertainties fuel growing skepticism (Schmidt, 2012) and hamper the continued adoption (Grimmer and Stewart, 2011) of topic models for automated content analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 160, |
|
"text": "(Krippendorff, 2004b;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 182, |
|
"text": "Lombard et al., 2002)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 585, |
|
"text": "(Schmidt, 2012)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 647, |
|
"text": "(Grimmer and Stewart, 2011)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In response, we introduce TopicCheck, an interactive tool for assessing the stability of topic models. Our threefold contributions are as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "First, from established guidelines on reproducible content analysis, we distill a set of design requirements on how to computationally assess the stability of an automated coding process. We advocate for the use of multiple models for analysis, a user-driven approach to identify acceptable levels of coding uncertainty, and providing users with the capability to inspect model output at all levels of detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Second, we devise an interactive up-to-one alignment algorithm for assessing topic model stability. Through repeated applications of a topic model to generate multiple outputs, our tool allows users to inspect whether the model consistently uncover the same set of concepts. We allow users to interactively define groupings of matching topics, and present the aligned topics using an informative tabular layout, so that users can quickly identify stable topical groupings as well as any inconsistencies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Finally, in three case studies, we demonstrate that our tool allows social scientists to gain novel insights into active and ongoing research questions. We provide an in-depth look at the multi-modality of topic models. We document how text pre-processing alters topical compositions, causing shifts in definitions and the removal of select topics. We report on how TopicCheck supports the validity of newlyproposed communication research methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Manual approaches to extract information from textual data -reading the source documents and codifying notable concepts -do not scale. For example, Pew Research Center produces the News Coverage Index (2014) to measure the quality of news reporting in the United States. Intended to track 1,450 newspapers nationwide, their purely manual efforts only cover 20 stories per day. Researchers stand to lose rich details in their data when their attention is limited to a minuscule fraction of the available texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Critical of approaches that \"[make] restrictive assumptions or [are] prohibitively costly,\" Quinn et al. (2010) discuss the use of topic models (Blei et al., 2003) to enable large-scale text analysis by using machine-generated latent topics to approximate previously manually-crafted codes. Automated content analysis has enabled groundbreaking massive studies (Grimmer, 2013; McFarland et al., 2013; Roberts et al., 2014a) . While this initial uptake of topic models is encouraging, an over-emphasis on scalability and the use of a single model for analysis invites skepticism and threatens continued adoption.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 163, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 376, |
|
"text": "(Grimmer, 2013;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 400, |
|
"text": "McFarland et al., 2013;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 423, |
|
"text": "Roberts et al., 2014a)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Coding reliability is critical to content analysis. When social scientists devise a coding scheme, they must clearly articulate the definition of their codes in such a way that any person can consistently apply the given codes to all documents in a corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coding Reliability & Growing Skepticism", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Despite high labor cost, content analysis is typically conducted with multiple coders in order to es-tablish coding reliability; the proper application of reliability measures is heavily discussed and debated in the literature (Krippendorff, 2004b; Lombard et al., 2002) . In contrast, software packages (McCallum, 2013; \u0158eh\u016f\u0159ek and Sojka, 2010) and graphical tools (Chaney and Blei, 2014; Chuang et al., 2012b) have made topic models accessible, cheap to compute, easy to deploy, but they almost always present users with a single model without any measure of uncertainty; we find few studies on topic model sensitivity and no existing tool to support such analyses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 248, |
|
"text": "(Krippendorff, 2004b;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 270, |
|
"text": "Lombard et al., 2002)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 320, |
|
"text": "(McCallum, 2013;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 345, |
|
"text": "\u0158eh\u016f\u0159ek and Sojka, 2010)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 389, |
|
"text": "(Chaney and Blei, 2014;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 411, |
|
"text": "Chuang et al., 2012b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coding Reliability & Growing Skepticism", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Schmidt (2012) summarizes the view among digital humanists, a group of early adopters of topic models, on the experience of working with uncertain modeling results: \"A poorly supervised machine learning algorithm is like a bad research assistant. It might produce some unexpected constellations that show flickers of deeper truths; but it will also produce tedious, inexplicable, or misleading results. . . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coding Reliability & Growing Skepticism", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "[Excitement] about the use of topic models for discovery needs to be tempered with skepticism about how often the unexpected juxtapositions. . . will be helpful, and how often merely surprising.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coding Reliability & Growing Skepticism", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Researchers increasingly voice skepticism about the validity of using single models for analysis. In a comprehensive survey of automatic content analysis methods, Grimmer et al. (2011) highlight the need to validate models through close reading and model comparison, and advise against the use of software that \"simply provide the researcher with output\" with no capability to ensure the output is conceptually valid and useful. Chuang et al. (2012a) report that findings from one-off modeling efforts may not sustain under scrutiny. Schmidt (2012) argues that computer-aided text analysis should incorporate competing models or \"humanists are better off applying zero computer programs.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 184, |
|
"text": "Grimmer et al. (2011)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 450, |
|
"text": "Chuang et al. (2012a)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coding Reliability & Growing Skepticism", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "While topic models remove some issues associated with human coding, they also introduce new sources of uncertainties. We review three factors related to our case studies: multi-modality, text preprocessing, and human judgment of topical quality. Roberts et al. (2014b) examine the multi-modal distributions of topic models that arise due to the non-convex nature of the underlying optimization. They characterize the various local solutions, and demonstrate that the spread of topics can lead to contradictory analysis outcomes. The authors note that optimal coding may not necessarily correspond to models that yield the highest value of the objective function, but there is currently a paucity of computational tools to inspect how the various modes differ, help researchers justify why one local mode might be preferred over another on the basis of their domain knowledge, or for an independent researcher to validate another's modeling choices. Fokkens et al. (2013) report widespread reproducibility failures in natural language processing when they replicate -and fail to reproduce -the results reported on two standard experiments. The authors find that minor decisions in the modeling process can impact evaluation results, including two factors highly relevant to topic modeling: differences in text pre-processing and corpus vocabulary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 268, |
|
"text": "Roberts et al. (2014b)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 949, |
|
"end": 970, |
|
"text": "Fokkens et al. (2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Uncertainties in Topic Models", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The word intrusion test (Chang et al., 2009; Lau et al., 2014 ) is considered the current state-of-theart approach to assess topical quality, and captures human judgment more accurately than other topical coherence measures (Stevens et al., 2012; Wallach et al., 2009) . However, in this approach, users inspect only a single latent topic at a time without access to the overall set of topics. As a part of this paper, we investigate whether exposure to multiple competing models affects human judgment, and whether model consistency impacts topical coherence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 44, |
|
"text": "(Chang et al., 2009;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 45, |
|
"end": 61, |
|
"text": "Lau et al., 2014", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 246, |
|
"text": "(Stevens et al., 2012;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 268, |
|
"text": "Wallach et al., 2009)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Uncertainties in Topic Models", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "While no single definition exists for the process of content analysis, a frequently-cited and wideapplied template is provided by Krippendorff (1989; 2004b) who recommends four steps to safeguard the reproducibility of a coding process. Practitioners must demonstrate coder reliability, a decisive agreement coefficient, an acceptable level of agreement, and test individual variables.", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 149, |
|
"text": "Krippendorff (1989;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 156, |
|
"text": "2004b)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility of a Coding Process", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, our paper is the first to convert guidelines on reproducible human coding into software design requirements on validating automated content analysis. Our interactive alignment algorithm is the first implementation of these guidelines. Our case studies represent the first reports on the impact of computationally quantifying topic model uncertainties, situated within the context of real-world ongoing social science research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility of a Coding Process", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Much of the research on topic modeling focuses on model designs (Blei et al., 2004; Blei and Lafferty, 2006; Rosen-Zvi et al., 2004) or inference algorithms (Anandkumar et al., 2012) . Our tool is complementary to this large body of work, and supports real-world deployment of these techniques. Interactive topic modeling (Hu et al., 2014) can play a key role to help users not only verify model consistency but actively curate high-quality codes; its inclusion is beyond the scope of a single conference paper. While supervised learning (Settles, 2011) has been applied to content analysis, it represents the application of a pre-defined coding scheme to a text corpus, which is different from the task of devising a coding scheme and assessing its reliability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 83, |
|
"text": "(Blei et al., 2004;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 84, |
|
"end": 108, |
|
"text": "Blei and Lafferty, 2006;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 132, |
|
"text": "Rosen-Zvi et al., 2004)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 182, |
|
"text": "(Anandkumar et al., 2012)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 339, |
|
"text": "(Hu et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 553, |
|
"text": "(Settles, 2011)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility of a Coding Process", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "A measure of coding reproducibility is whether a topic model can consistently uncover the same set of latent topics. We assume that users have a large number of topic model outputs, presumed to be identical, and that the users wish to examine unexpected variations among the outputs. To guide tool development, we first identify software design requirements, to meet the standards social scientists need to demonstrate producible coding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Validation Tool Design Requirements", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A key difference exists between measuring intercoder agreement and assessing topic model variations. In a manual coding process, human coders are provided code identifiers; responses from different coders can be unambiguously mapped onto a common scheme. No such mapping exists among the output from repeated runs of a topic model. Validation tools must provide users with effective means to generate topical mapping.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical Mapping & Up-to-One Alignment", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "However, the general alignment problem of optimally mapping multiple topics from one model to multiple topics in another model is both ill-defined and computationally intractable. Since our tool is to support the comparison of similar -and supposedly identical -model output, we impose the following constraint. A latent topic belonging to a model may align with up to one latent topic in another model. We avoid the more restrictive constraint of one-toone alignment. Forcing a topic to always map onto another topic may cause highly dissimilar topics to be grouped together, obscuring critical mismatches. Instead, up-to-one mapping allows for two potential outcomes, both of which correspond directly to the intended user task: recognize consistent patterns across the models (when alignment occurs) and identify any deviations (when alignment fails).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topical Mapping & Up-to-One Alignment", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We synthesize the following four requirements from Krippendorff's guidelines (2004b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 84, |
|
"text": "Krippendorff's guidelines (2004b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guidelines Adapted for Topic Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To calculate the equivalent of coder reliability, we advocate the use of multiple models to determine modeling consistency, which may be determined from the repeated applications of the same topic model, a search through the parameter space of a model, or the use of multiple models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guidelines Adapted for Topic Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Selecting an appropriate agreement coefficient depends on the underlying data type, such as binary, multivariate, ordered, or continuous codes (Cohen, 1960; Holsti, 1969; Krippendorff, 1970; Osgood, 1959; Scott, 1995) . No widely-accepted similarity measure exists for aligning latent topics, which are probability distributions over a large vocabulary. We argue that validation tools must be sufficiently modular, in order to accept any user-defined topical similarity measure for aligning latent topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 156, |
|
"text": "(Cohen, 1960;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 170, |
|
"text": "Holsti, 1969;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 190, |
|
"text": "Krippendorff, 1970;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 204, |
|
"text": "Osgood, 1959;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 217, |
|
"text": "Scott, 1995)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guidelines Adapted for Topic Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Acceptable level of agreement depends on the purpose of the analysis, and should account for the costs of drawing incorrect conclusions from a coding scheme. For example, do \"human lives hang on the results of a content analysis?\" (Krippendorff, 2004b) . Validation tools must allow users to set the appropriate acceptable level of agreement, and help users determine -rather than dictate -when topic models match and what constitutes reasonable variations in the model output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 252, |
|
"text": "(Krippendorff, 2004b)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guidelines Adapted for Topic Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Finally, Krippendorff points out that aggregated statistics can obscure critical reliability failures, and practitioners must test individual variables. We interpret this recommendation as the need to present users with not a single overall alignment score but details at all levels: models, topics, and constituent words within each latent topic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guidelines Adapted for Topic Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We introduce TopicCheck, an implementation of our design specifications. At the core of this tool is an interactive topical alignment algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive Topical Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our algorithm can be considered as hierarchical agglomerative clustering with up-to-one mapping constraints. As input, it takes in three arguments: a list of topic models, a topical similarity measure, and a matching criterion. As output, it generates a list of topical groups, where each group contains a list of topics with at most one topic from each model. At initialization, we create a topical group for every topic in every model. We then iteratively merge the two most similar groups based on the user-supplied topical similarity measure, provided that the groups satisfy the user-specified matching criterion and the mapping constraints. When no new groups can be formed, the algorithm terminates and returns a sorted list of final topical groups.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Clustering with Constraints", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "During the alignment process, the following two invariants are guaranteed: Every topic is always assigned to exactly one group; every group contains at most one topic from each model. A topic model m consists of a list of latent topics. A latent topic t is represented by a probability distribution over words. A topical group g also consists of a list of latent topics. Let |m|, |t|, and |g| denote the number of models, topics, and groups respectively. We create a total of |g| = |m| \u00d7 |t| initial topical groups. Although |g| decreases by 1 after each merge, |g| \u2265 |t| at all times. At the end of alignment, |g| = |t| if and only if perfect alignment occurs and every group contains exactly one topic from each model. Users may supply any topical similarity measure that best suits their analysis needs. We select cosine similarity for our three case studies, though our software is modular and accepts any input. As a first implementation, we apply single-linkage clustering criteria when comparing the similarity of two topical groups. Single-linkage clustering is computationally efficient (Sibson, 1973) , so that users may interact with the algorithm and receive feedback in real-time; our procedure generalizes to other linkage criteria such as complete-linkage or average-linkage.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1096, |
|
"end": 1110, |
|
"text": "(Sibson, 1973)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Clustering with Constraints", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "At each merge step, the most similar pair of topical groups are identified. If they meet the matching criteria and the mapping constraints, the pair is combined into a new group. Otherwise, the algorithm iteratively examines the next most similar pair until either a merge occurs or when all pairs are ex- (Roberts et al., 2013) . Latent topics are represented as rectangles; bar charts within the rectangles represent top terms in a topic. Topics belonging to the same model are arranged in a column; topics assigned to the same group are arranged in a row. This chart is completely filled with topics only if perfect alignment occurs. When topics in a model fail to align with topics in other models, empty cells appear in its column. Similarly, when topics in a group are not consistently uncovered by all models, empty cells appear in its row. Hovering over a term highlights all other occurrences of the same term. Top terms belonging to each topical group are shown on the right; they represent the most frequent words over all topics in the group, by summing their probability distributions. hausted, at which point the procedure terminates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 306, |
|
"end": 328, |
|
"text": "(Roberts et al., 2013)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Clustering with Constraints", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Users can specify a similarity threshold, below which topical groups are considered to differ too much to be matched. Two groups are allowed to merge only if both of the following conditions are met: their similarity is above the user-defined sim-ilarity threshold and every topic in the combined group belongs to a different model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Clustering with Constraints", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We devise a tabular layout to present the alignment output at all levels of detail: groups, models, topics, and words. Users can interact with the algorithm, redefine matching criteria, and inspect the aligned models interactively in real-time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tabular Layout and User Interactions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We arrange topical groups as rows and topic models as columns as shown in Figure 1 . A topic assigned to group g i and belonging to model m j is placed at the intersection of row i and column j. Our up-to-one mapping ensures at most one topic per each cell. A table of size |g| \u00d7 |m| will only be completely filled with topics if perfect alignment occurs. When topics in model m j fail to align with topics in other models, empty cells appear in column j. Similarly, when topics in group g i are not consistently uncovered by all models, empty cells appear in row i. Within each topic, we show the probability distribution of its constituent words as a bar chart.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 82, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tabular Layout and User Interactions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Users define three parameters in our tool. First, they may set the matching criteria, and define how aggressively the topics are merged into groups. Second, users may alter the number of topical groups to reveal. Rather than displaying numerous sparse groups, the tool shows only the top groups as determined by their topical weight. Topics in all remaining groups are placed at the bottom of the table and marked as ungrouped. Third, users may adjust the number of top terms to show, as a trade-off between details vs. overview. Increasing the number of terms allows users to inspect the topics more carefully, but the cells take up more screen space, reducing the number of visible groups. Decreasing the number of terms reduces the size of each cell, allowing users to see more groups and observe high-level patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tabular Layout and User Interactions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The tabular layout enables rapid visual assessment of consistency within a model or a group. We further facilitate comparisons via brushing and linking (Becker and Cleveland, 1987) . When users hover over a word on the right hand side or over a bar within the bar charts, we highlight all other occurrences of the same word. For example, in Figure 1 , hovering over the term econom reveals that the word is common in three topical groups.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 180, |
|
"text": "(Becker and Cleveland, 1987)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tabular Layout and User Interactions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We implemented our alignment algorithm and user interface in JavaScript, so they are easily accessible within a web browser; topical similarity is computed on a Python-backed web server. We report user responses and initial findings from deploying the tool on three social science research projects. Interactive versions of the projects are available at http://content-analysis.info/naacl.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deployment and Initial Findings", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We deployed TopicCheck on topic models generated by Roberts et al. (2014b) to examine how model output clusters into local modes. As the models are produced by 50 runs of an identical algorithm with all pre-processing, parameters, and hyper-parameters held constant, we expect minimal variations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 74, |
|
"text": "Roberts et al. (2014b)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Look at Multi-Modal Solutions", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As shown in Figure 1 , we observe that the top two topical groups, about Barack Obama and John Mc-Cain respectively, are consistently uncovered across all runs. The third topical group, about the Iraqi and Afghani wars (defined by a broader set of terms) is also consistently generated by 49 of the 50 runs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Look at Multi-Modal Solutions", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Toward the bottom of the chart, we observe signs of multi-modality. Topical groups #15 to #17 represent variations of topics about the economy. Whereas group #15 is about the broader economy, groups #16 and #17 focus on taxes and the financial crisis, respectively. Half of the runs produced the broader economy topic; the other runs generated only one or two of the specialized subtopics. No single model uncovered all three, suggesting that the inference algorithm converged to one of two distinct local optimal solutions. In Figure 2 , by lowering the matching criteria and revealing additional groups, we find that the model continues to produce interesting topics such as those related to global warming (group #24) or women's rights (group #25), but these topics are not stable across the multiple modes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 528, |
|
"end": 536, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Look at Multi-Modal Solutions", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We conducted an experiment to investigate the effects of rare word removal using TopicCheck. As a part of our research, we had collected 12,000 news reports from five different international news sources over a period of ten years, to study systematic differences in news coverage on the rise of China, between western and Chinese media.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "While many modeling decisions are involved in our analysis, we choose rare word removal for two reasons. First, though the practice is standard, to the best of our knowledge, we find no systematic studies on how aggressively one should cull the vocabulary. While rare word removal is generally considered to have limited impact on topic model output, we find evidence to the contrary. By varying the removal threshold, for this corpus of international news reports on the rise of China, we observe that topics such as group #11 on the Beijing Olympics begin to disappear. Topics about Hong Kong appear sporadically. On top of the inconsistency issues, different pre-processing settings lead to drifts in topic definitions. For milder removal thresholds (toward the left), group #13 discusses Hong Kong within the context of Taiwan and Macau. With more aggressive filtering (toward the right), group #14 shifts into discussions about Hong Kong itself such as one country two systems and the special administrative region. Unchecked, these seemingly minor text pre-processing decisions may eventually lead researchers down different paths of analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Second, as latent topics are typically defined through their top words, filtering words that occur only in a small fraction of the documents is generally considered to have limited impact on model output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We trained structural topic models (Roberts et al., 2013) based on a subset of the corpus with 2,398 documents containing approximately 20,000 unique words. We applied 10 different settings where we progressively removed a greater number of rare terms beyond those already filtered by the default settings while holding all other parameters constant. The number of unique words retained by the models were 1,481 (default), 904, 634, 474, 365, . . ., down to 124 for the 10 settings. We generated 6 runs of the model at each setting, for a total of 60 runs. Removed words are assigned a value of 0 in the topic vector when computing cosine similarity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 57, |
|
"text": "(Roberts et al., 2013)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We observe significant changes to the model output across the pre-processing settings, as shown in Figure 3 . The six models on the far left (columns 1 to 6) represent standard processing; rare word removal ranges from the mildest (columns 7 to 12) to the most aggressive (columns 55 to 60) as the columns move from left to right across the chart.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 107, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "While some topical groups (e.g., #1 on the communist party) are stable across all settings, many others fade in and out. Group #11 on the Beijing Olympics is consistent under standard processing and the mildest removal, but disappears completely afterward. We find two topical groups about Hong Kong that appear sporadically. On top of the instability issues, we observe that their content drifts across the settings. With milder thresholds, topical group #13 discusses Hong Kong within the context of Taiwan and Macau. With more aggressive filtering, topical group #14 shifts into discussions about Hong Kong itself such as one country two systems and the special administrative region. Unchecked, these minor text pre-processing decisions may lead researchers down different paths of analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Pre-Processing & Replication Issues", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Agenda-setting refers to observations by McCombs et al. (1972) that the media play an important role in dictating issues of importance for voters, and by Iyengar et al. (1993) that news selection bias can determine how the public votes. Studying agendasetting requires assessing the amount of coverage paid to specific issues. Previous manual coding efforts are typically limited to either a single event or subsampled so thinly that they lose the ability to consistently track events over time. Large-scale analysis (e.g., for an entire federal election) remains beyond the reach of most communication scholars.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 62, |
|
"text": "McCombs et al. (1972)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 175, |
|
"text": "Iyengar et al. (1993)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "As part of our research, we apply topic modeling to closed-captioning data from over 200,000 hours of broadcasts on all mainstream news networks, to track the full spectrum of topics across all media out- Figure 4 : To enable large-scale studies of agenda-setting, we applied topic modeling to closed-captioning of over 200,000 hours of broadcasts, to estimate coverage in mainstream news networks. Through TopicCheck, the researchers find consistent topical groups that correspond to known major news categories. Group #9 represents topics about advertisements and valuable data to study the relationships between broadcasters and advertisers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 213, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "lets. We conduct word intrusion tests (Chang et al., 2009) on Amazon Mechanical Turk, and obtain over 50,000 user ratings to identify high quality topics. However, to establish topic modeling as a valid research method, we must demonstrate the reliability of how we include or exclude topics in our analyses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 58, |
|
"text": "(Chang et al., 2009)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "By applying TopicCheck to 32 runs of the same topic model, as shown in Figure 4 , we confirm that the consistent topical groupings capture at least four major known news categories: weather (such as group #5), finance (group #3), major events (group #7 on the Trayvon Martin shooting), and natural disasters (group #11 on Hurricane Katrina). We find additional evidence supporting the use of topic models, including the consistent appearance of advertising topics (group #9 on the sales of prescription medicine to senior citizens, a major demographic of the broadcast news audience). These topics may enable studies on the relationship between broadcasters and advertisers, an important but difficult question to address because few previous studies have the resources to codify advertisement content.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 79, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "However, event-specific topics tend to appear less consistently (such as group #24 on Russia, its conflict with Ukraine, and the Sochi Olympics). We note the lack of consistent topics on supreme court cases, an expected but missing news category, which warrants more in-depth investigations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We compare human judgment of topical quality when examining multiple models and those based on word intrusion tests. We calculate the aggregated topical coherence scores for each topical grouping. We find that consistent topical groups tend to receive higher coherence scores. However, topics about natural disasters receive low scores with a high variance (avg 0.5371; stdev 0.2497); many of them would have previously been excluded from analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "News Coverage & Topical Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To many social scientists, statistical models are measurement tools for inspecting social phenomena, such as probing recurring language use in a text corpus with topic models. In this light, instruments with known performance characteristicsincluding well-quantified uncertainties and proper coverage -are more valuable than potentially powerful but inconsistent modeling approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our initial findings suggest that a single topic model may not capture all perspectives on a dataset, as evident in the multiple local solutions about the economy, Hong Kong, and natural disasters in the three case studies respectively. By exposing model stability, our tool can help researchers validate modeling decisions, and caution against making too general a claim about any single modeling result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We hypothesize that the low coherence scores for topics about natural disasters might derive from two causes. First, news media might cover an event differently (e.g., focusing on economic vs. humanitarian issues during Hurricane Katrina). Second, un-folding events may naturally have less stable vocabularies. In both cases, detecting and pinpointing reporting bias is central to the study of agenda-setting. These observations suggest that for certain applications, identifying consistent topics across multiple models may be equally critical as, if not more than, enforcing topical coherence within a single model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Increasingly, text analysis relies on data-dependent modeling decisions. Rare word removal can substantively alter analysis outcomes, but selecting an appropriate threshold requires inspecting the content of a text corpus. TopicCheck can help archive the exact context of analysis, allowing researchers to justify -and readers to verify and challengemodeling decisions through access to data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Finally, topic modeling has dramatically lowered the costs associated with content analysis, allowing hundreds of models to be built in parallel. The current intended user task for TopicCheck is to validate the stability of presumably identical models. We plan to develop additional tools to help social scientists design better models, and actively explore the effects of alternative coding schemes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We present TopicCheck for assessing topic model stability. Through its development, we demonstrate that existing research on reproducible manual codification can be transferred and applied to computational approaches such as automated content analysis via topic modeling. We hope this work will help computer scientists and social scientists engage in deeper conversations about research reproducibility for large-scale computer-assisted text analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported in part by a grant from the Brown Institute for Media Innovation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A spectral algorithm for latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "Anima", |
|
"middle": [], |
|
"last": "Anandkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [ |
|
"Kai" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kakade", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "917--925", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anima Anandkumar, Yi kai Liu, Daniel J. Hsu, Dean P Foster, and Sham M Kakade. 2012. A spectral algo- rithm for latent dirichlet allocation. In Neural Infor- mation Processing Systems (NIPS), pages 917-925.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Brushing scatterplots", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Becker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cleveland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1987, |
|
"venue": "Technometrics", |
|
"volume": "29", |
|
"issue": "2", |
|
"pages": "127--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard A. Becker and William S. Cleveland. 1987. Brushing scatterplots. Technometrics, 29(2):127-142.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Content analysis in communication research", |
|
"authors": [ |
|
{ |
|
"first": "Bernard", |
|
"middle": [], |
|
"last": "Berelson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1952, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernard Berelson. 1952. Content analysis in communi- cation research. Free Press.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Dynamic topic models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "113--120", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei and John D. Lafferty. 2006. Dynamic topic models. In International Conference on Machine Learning (ICML), pages 113-120.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Latent Dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3(1):993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hierarchical topic models and the nested chinese restaurant process", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei, Thomas L. Griffiths, Michael I. Jordan, and Joshua B. Tenenbaum. 2004. Hierarchical topic models and the nested chinese restaurant process. In Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Visualizing topic models", |
|
"authors": [ |
|
{ |
|
"first": "Allison June-Barlow", |
|
"middle": [], |
|
"last": "Chaney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Weblogs and Social Media (ICWSM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "419--422", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allison June-Barlow Chaney and David M. Blei. 2014. Visualizing topic models. In International Conference on Weblogs and Social Media (ICWSM), pages 419- 422.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Reading tea leaves: How humans interpret topic models", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Gerrish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "288--296", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Chang, Jordan Boyd-Graber, Chong Wang, Sean Gerrish, and David M. Blei. 2009. Reading tea leaves: How humans interpret topic models. In Neural Information Processing Systems (NIPS), pages 288-296.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Interpretation and trust: Designing model-driven visualizations for text analysis", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Conference on Human Factors in Computing Systems (CHI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "443--452", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Chuang, Christopher D. Manning, and Jeffrey Heer. 2012a. Interpretation and trust: Design- ing model-driven visualizations for text analysis. In Conference on Human Factors in Computing Systems (CHI), pages 443-452.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Termite: Visualization techniques for assessing textual topic models", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Advanced Visual Interfaces (AVI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Chuang, Christopher D. Manning, and Jeffrey Heer. 2012b. Termite: Visualization techniques for assessing textual topic models. In Advanced Visual In- terfaces (AVI).", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A coefficient of agreement for nominal scales. Educational and Psychological Measurement", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "37--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Cohen. 1960. A coefficient of agreement for nom- inal scales. Educational and Psychological Measure- ment, 20:37-46.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The CMU 2008 Political Blog Corpus", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Eisenstein and Eric Xing. 2010. The CMU 2008 Political Blog Corpus. Carnegie Mellon University.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Offspring from reproduction problems: What replication failure teaches us", |
|
"authors": [ |
|
{ |
|
"first": "Antske", |
|
"middle": [], |
|
"last": "Fokkens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marten", |
|
"middle": [], |
|
"last": "Marieke Van Erp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Postma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1691--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antske Fokkens, Marieke van Erp, Marten Postma, Ted Pedersen, Piek Vossen, and Nuno Freire. 2013. Off- spring from reproduction problems: What replication failure teaches us. In Annual Meeting of the Asso- ciation for Computational Linguistics (ACL), pages 1691-1701.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Text as data: The promise and pitfalls of automatic content analysis methods for political texts", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Grimmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stewart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Political Analysis", |
|
"volume": "21", |
|
"issue": "3", |
|
"pages": "267--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Grimmer and Brandon M. Stewart. 2011. Text as data: The promise and pitfalls of automatic content analysis methods for political texts. Political Analysis, 21(3):267-297.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Appropriators not position takers: The distorting effects of electoral incentives on congressional representation", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Grimmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "American Journal of Political Science", |
|
"volume": "57", |
|
"issue": "3", |
|
"pages": "624--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Grimmer. 2013. Appropriators not position takers: The distorting effects of electoral incentives on con- gressional representation. American Journal of Politi- cal Science, 57(3):624-642.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Content analysis for the social sciences and humanities", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ole", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Holsti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ole R. Holsti. 1969. Content analysis for the social sciences and humanities. Addison-Wesley Publishing Company.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Interactive topic modeling. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Yuening", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brianna", |
|
"middle": [], |
|
"last": "Satinoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alison", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "95", |
|
"issue": "", |
|
"pages": "423--469", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuening Hu, Jordan Boyd-Graber, Brianna Satinoff, and Alison Smith. 2014. Interactive topic modeling. Ma- chine Learning, 95(3):423-469.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "News coverage of the gulf crisis and public opinion: A study of agenda-setting, priming, and framing", |
|
"authors": [ |
|
{ |
|
"first": "Shanto", |
|
"middle": [], |
|
"last": "Iyengar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Simon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Communication Research", |
|
"volume": "20", |
|
"issue": "3", |
|
"pages": "365--383", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shanto Iyengar and Adam Simon. 1993. News cover- age of the gulf crisis and public opinion: A study of agenda-setting, priming, and framing. Communica- tion Research, 20(3):365-383.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Bivariate agreement coefficients for reliability of data", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1970, |
|
"venue": "Sociological methodology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "139--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 1970. Bivariate agreement coef- ficients for reliability of data. In E. R. Borgatta and G. W. Bohrnstedt, editors, Sociological methodology, pages 139-150. John Wiley & Sons.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Content analysis", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "International encyclopedia of communication", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 1989. Content analysis. In E. Barnouw, G. Gerbner, W. Schramm, T. L. Worth, and L. Gross, editors, International encyclopedia of communication. Oxford University Press.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Content analysis: An introduction to its methodology", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Sage", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 2004a. Content analysis: An intro- duction to its methodology. Sage, 2nd edition.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Reliability in content analysis: Some common misconceptions and recommendations", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Human Communication Research", |
|
"volume": "30", |
|
"issue": "3", |
|
"pages": "411--433", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 2004b. Reliability in content analy- sis: Some common misconceptions and recommenda- tions. Human Communication Research, 30(3):411- 433.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Machine reading tea leaves: Automatically evaluating topic coherence and topic model quality", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jey Han Lau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Newman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "530--539", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jey Han Lau, David Newman, and Timothy Baldwin. 2014. Machine reading tea leaves: Automatically evaluating topic coherence and topic model quality. In Conference of the European Chapter of the Asso- ciation for Computational Linguistics (EACL), pages 530-539.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Content analysis in mass communication: Assessment and reporting of intercoder reliability", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Lombard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Snyder-Duch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheryl", |
|
"middle": [ |
|
"Campanella" |
|
], |
|
"last": "Bracken", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Human Communication Research", |
|
"volume": "28", |
|
"issue": "4", |
|
"pages": "587--604", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Lombard, Jennifer Snyder-Duch, and Cheryl Campanella Bracken. 2002. Content analysis in mass communication: Assessment and reporting of intercoder reliability. Human Communication Research, 28(4):587-604.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "MALLET: A machine learning for language toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew McCallum. 2013. MALLET: A machine learning for language toolkit. http://mallet.cs.umass.edu.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The agenda-setting function of mass media", |
|
"authors": [ |
|
{ |
|
"first": "Maxwell", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Mccombs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donald", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Shaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "Public Opinion Quarterly", |
|
"volume": "36", |
|
"issue": "5", |
|
"pages": "176--187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maxwell E. McCombs and Donald L. Shaw. 1972. The agenda-setting function of mass media. Public Opin- ion Quarterly, 36(5):176-187.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Differentiating language usage through topic models. Poetics: Special Issue on Topic Models and the Cultural Sciences", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mcfarland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "607--625", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel A. McFarland, Daniel Ramage, Jason Chuang, Jeffrey Heer, and Christopher D. Manning. 2013. Dif- ferentiating language usage through topic models. Po- etics: Special Issue on Topic Models and the Cultural Sciences, 41(6):607-625.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "The representational model and relevant research", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Osgood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1959, |
|
"venue": "Trends in content analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. E. Osgood. 1959. The representational model and relevant research. In I. de Sola Pool, editor, Trends in content analysis, pages 33-88. University of Illinois Press.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Empiricism is not a matter of faith", |
|
"authors": [ |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Computational Linguistics", |
|
"volume": "34", |
|
"issue": "3", |
|
"pages": "465--470", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ted Pedersen. 2008. Empiricism is not a matter of faith. Computational Linguistics, 34(3):465-470.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "How to analyze political attention with minimal assumptions and costs", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Quinn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burt", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Colaresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Crespin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dragomir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "American Journal of Political Science", |
|
"volume": "54", |
|
"issue": "1", |
|
"pages": "209--228", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin M. Quinn, Burt L. Monroe, Michael Colaresi, Michael H. Crespin, and Dragomir R. Radev. 2010. How to analyze political attention with minimal as- sumptions and costs. American Journal of Political Science, 54(1):209-228.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Software framework for topic modelling with large corpora", |
|
"authors": [ |
|
{ |
|
"first": "Petr", |
|
"middle": [], |
|
"last": "Radim\u0159eh\u016f\u0159ek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sojka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "LREC Workshop on New Challenges for NLP Frameworks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Radim\u0158eh\u016f\u0159ek and Petr Sojka. 2010. Software frame- work for topic modelling with large corpora. In LREC Workshop on New Challenges for NLP Frameworks, pages 45-50.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The structural topic model and applied social science", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stewart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dustin", |
|
"middle": [], |
|
"last": "Tingley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edoardo", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Airoldi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "NIPS Workshop on Topic Models", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret E. Roberts, Brandon M. Stewart, Dustin Tin- gley, and Edoardo M. Airoldi. 2013. The structural topic model and applied social science. In NIPS Work- shop on Topic Models.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Topic models for open-ended survey responses with applications to experiments", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [], |
|
"last": "Stewart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dustin", |
|
"middle": [], |
|
"last": "Tingley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Lucas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jetson", |
|
"middle": [], |
|
"last": "Leder-Luis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bethany", |
|
"middle": [], |
|
"last": "Albertson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shana", |
|
"middle": [], |
|
"last": "Gadarian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Rand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "American Journal of Political Science. Forthcoming", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret E. Roberts, Brandon Stewart, Dustin Tingley, Chris Lucas, Jetson Leder-Luis, Bethany Albertson, Shana Gadarian, and David Rand. 2014a. Topic mod- els for open-ended survey responses with applications to experiments. American Journal of Political Science. Forthcoming.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Navigating the local modes of big data: The case of topic models", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stewart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dustin", |
|
"middle": [], |
|
"last": "Tingley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Data Science for Politics, Policy and Government", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret E. Roberts, Brandon M. Stewart, and Dustin Tingley. 2014b. Navigating the local modes of big data: The case of topic models. In R. Michael Alvarez, editor, Data Science for Politics, Policy and Govern- ment. In Press.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "The author-topic model for authors and documents", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Rosen-Zvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steyvers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Padhraic", |
|
"middle": [], |
|
"last": "Smyth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Conference on Uncertainty in Artificial Intelligence (UAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "487--494", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Rosen-Zvi, Thomas Griffiths, Mark Steyvers, and Padhraic Smyth. 2004. The author-topic model for authors and documents. In Conference on Uncertainty in Artificial Intelligence (UAI), pages 487-494.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Words alone: Dismantling topic models in the humanities", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal of Digital Humanities", |
|
"volume": "2", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin M. Schmidt. 2012. Words alone: Dismantling topic models in the humanities. Journal of Digital Hu- manities, 2(1).", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Reliability of content analysis:: The case of nominal scale coding", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Scott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Public Opinion Quarterly", |
|
"volume": "19", |
|
"issue": "3", |
|
"pages": "321--325", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William A. Scott. 1995. Reliability of content analy- sis:: The case of nominal scale coding. Public Opin- ion Quarterly, 19(3):321-325.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Closing the loop: Fast, interactive semi-supervised annotation with queries on features and instances", |
|
"authors": [ |
|
{ |
|
"first": "Burr", |
|
"middle": [], |
|
"last": "Settles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1467--1478", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Burr Settles. 2011. Closing the loop: Fast, interactive semi-supervised annotation with queries on features and instances. In Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1467-1478.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "SLINK: an optimally efficient algorithm for the single-link cluster method", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Sibson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1973, |
|
"venue": "The Computer Journal", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "30--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Sibson. 1973. SLINK: an optimally efficient al- gorithm for the single-link cluster method. The Com- puter Journal, 16:30-34.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Exploring topic coherence over many models and many topics", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Kegelmeyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Andrzejewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Buttler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Conference on Empirical Methods on Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "952--961", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith Stevens, Philip Kegelmeyer, David Andrzejewski, and David Buttler. 2012. Exploring topic coherence over many models and many topics. In Conference on Empirical Methods on Natural Language Process- ing and Computational Natural Language Learning (EMNLP-CoNLL), pages 952-961.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Evaluation methods for topic models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hanna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mimno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1105--1112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hanna M. Wallach, Iain Murray, Ruslan Salakhutdinov, and David Mimno. 2009. Evaluation methods for topic models. In International Conference on Machine Learning (ICML), pages 1105-1112.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Mass Media Research: An Introduction", |
|
"authors": [ |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Wimmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Dominick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Cengage Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roger Wimmer and Joseph Dominick. 2010. Mass Me- dia Research: An Introduction. Cengage Learning.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "This chart shows topics uncovered from 13,250 political blogs(Eisenstein and Xing, 2010) by 50 structural topic models", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Continued fromFigure 1, users may decrease the similarity threshold to generate additional groupings of topics that are less consistent, uncovered by as few as 3 of the 50 modeling runs.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Figure 3: While rare word removal is generally considered to have limited impact on topic model output, we find evidence to the contrary. By varying the removal threshold, for this corpus of international news reports on the rise of China, we observe that topics such as group #11 on the Beijing Olympics begin to disappear. Topics about Hong Kong appear sporadically. On top of the inconsistency issues, different pre-processing settings lead to drifts in topic definitions. For milder removal thresholds (toward the left), group #13 discusses Hong Kong within the context of Taiwan and Macau. With more aggressive filtering (toward the right), group #14 shifts into discussions about Hong Kong itself such as one country two systems and the special administrative region. Unchecked, these seemingly minor text pre-processing decisions may eventually lead researchers down different paths of analysis.", |
|
"type_str": "figure", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |