|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:02:58.469379Z" |
|
}, |
|
"title": "Uncertainty and Inclusivity in Gender Bias Annotation: An Annotation Taxonomy and Annotated Datasets of British English Text", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Havens", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "lucy.havens@ed.ac.uk" |
|
}, |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Terras", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "m.terras@ed.ac.uk" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "bbach@inf.ed.ac.uk" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Alex", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "balex@ed.ac.uk" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Mitigating harms from gender biased language in Natural Language Processing (NLP) systems remains a challenge, and the situated nature of language means bias is inescapable in NLP data. Though efforts to mitigate gender bias in NLP are numerous, they often vaguely define gender and bias, only consider two genders, and do not incorporate uncertainty into models. To address these limitations, in this paper we present a taxonomy of gender biased language and apply it to create annotated datasets. We created the taxonomy and annotated data with the aim of making gender bias in language transparent. If biases are communicated clearly, varieties of biased language can be better identified and measured. Our taxonomy contains eleven types of gender biases inclusive of people whose gender expressions do not fit into the binary conceptions of woman and man, and whose gender differs from that they were assigned at birth, while also allowing annotators to document unknown gender information. The taxonomy and annotated data will, in future work, underpin analysis and more equitable language model development.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Mitigating harms from gender biased language in Natural Language Processing (NLP) systems remains a challenge, and the situated nature of language means bias is inescapable in NLP data. Though efforts to mitigate gender bias in NLP are numerous, they often vaguely define gender and bias, only consider two genders, and do not incorporate uncertainty into models. To address these limitations, in this paper we present a taxonomy of gender biased language and apply it to create annotated datasets. We created the taxonomy and annotated data with the aim of making gender bias in language transparent. If biases are communicated clearly, varieties of biased language can be better identified and measured. Our taxonomy contains eleven types of gender biases inclusive of people whose gender expressions do not fit into the binary conceptions of woman and man, and whose gender differs from that they were assigned at birth, while also allowing annotators to document unknown gender information. The taxonomy and annotated data will, in future work, underpin analysis and more equitable language model development.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The need to mitigate bias in data has become urgent as evidence of harms from such data grows (Birhane and Prabhu, 2021; O'Neill et al., 2021; Perez, 2019; Noble, 2018; Vainapel et al., 2015; Sweeney, 2013) . Due to the complexities of bias often overlooked in Machine Learning (ML) bias research, including Natural Language Processing (NLP) (Devinney et al., 2022; Sta\u0144czak and Augenstein, 2021) , Blodgett et al. (2020) , Leavy (2018) , and Crawford (2017) call for greater interdisciplinary engagement and stakeholder collaboration. The Gallery, Library, Archive, and Museum (GLAM) sector has made similar calls for interdisciplinary engagement, looking to applications of data science and ML to better understand and mitigate bias in GLAM collections (Padilla, 2017 (Padilla, , 2019 Geraci, 2019) . Supporting the NLP and GLAM communities' shared aim of mitigating the minoritization 1 of certain people that biased language causes, we provide a taxonomy of gender biased language and demonstrate its application in a case study with GLAM documentation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 120, |
|
"text": "(Birhane and Prabhu, 2021;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 142, |
|
"text": "O'Neill et al., 2021;", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 155, |
|
"text": "Perez, 2019;", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 168, |
|
"text": "Noble, 2018;", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 191, |
|
"text": "Vainapel et al., 2015;", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 206, |
|
"text": "Sweeney, 2013)", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 365, |
|
"text": "(Devinney et al., 2022;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 396, |
|
"text": "Sta\u0144czak and Augenstein, 2021)", |
|
"ref_id": "BIBREF72" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 421, |
|
"text": "Blodgett et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 436, |
|
"text": "Leavy (2018)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 458, |
|
"text": "Crawford (2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 755, |
|
"end": 769, |
|
"text": "(Padilla, 2017", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 786, |
|
"text": "(Padilla, , 2019", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 800, |
|
"text": "Geraci, 2019)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use GLAM documentation to refer to the descriptions of heritage items written in GLAM catalogs. Adapting our previously published definition, we use gender biased language to refer to \"language that creates or reinforces inequitable power relations among people, harming certain people through simplified, dehumanizing, or judgmental words or phrases that restrict their [gender] identity; and privileging other people through words or phrases that favor their [gender] identity\" (Havens et al., 2020, 108) . We focus on gender bias due to the contextual nature of gender and bias (they vary across time, location, culture, and people), as well as the existing efforts of our partner institution, the Archives of the Centre for Research Collections at the University of Edinburgh, to mitigate gender bias in its documentation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 382, |
|
"text": "[gender]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 472, |
|
"text": "[gender]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 509, |
|
"text": "(Havens et al., 2020, 108)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "GLAM documentation provides a unique benefit compared to many text sources: it contains historical and contemporary language. GLAM continually acquire and describe heritage items to enable the items' discoverability. In archives, heritage items include photographs, handwritten documents, instruments, and tweets, among other materials. Heritage items and the language that describes them influence society's understanding of the past, the present, and the direction society is moving into the future (Benjamin, 2019; Welsh, 2016; Yale, 2015; Cook, 2011; Smith, 2006) . Through research with GLAM documentation, variations in biased language could be better understood. Should diachronic patterns emerge, the NLP community could train models to identify newly-emerging, previously unseen types of bias.", |
|
"cite_spans": [ |
|
{ |
|
"start": 501, |
|
"end": 517, |
|
"text": "(Benjamin, 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 530, |
|
"text": "Welsh, 2016;", |
|
"ref_id": "BIBREF82" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 542, |
|
"text": "Yale, 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 554, |
|
"text": "Cook, 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 567, |
|
"text": "Smith, 2006)", |
|
"ref_id": "BIBREF70" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper presents an annotation taxonomy ( \u00a75) to label gender biased language inclusive of trans and gender diverse identities, 2 as well as a dataset of historical and contemporary language from British English archival documentation annotated according to the taxonomy. Linguistics, gender studies, information sciences, and NLP literature inform the taxonomy's categorization of gender biased language. As a result, the taxonomy holds relevance beyond the GLAM sector in which we situate our work. The taxonomy may be applied when creating NLP datasets or models, or when measuring varieties of gender bias in language, because the taxonomy's definitions of types of gender biases are rooted in the language of text, rather than an abstracted representation of text. Uniquely, our taxonomy includes labels that record uncertainty about a person's gender.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As we situate our work in the GLAM sector, this paper provides a case study ( \u00a76) demonstrating how the annotation taxonomy was applied to create an annotated dataset of archival documentation. For future NLP work, the resulting dataset of historical and contemporary language annotated for gender biases provides a corpus to analyze gender biased language for diachronic patterns, to analyze correlations between types of gender biases, and to develop gender bias classification models. Specific to the GLAM sector, gender bias classification models could enhance reparative description practices. A model's ability to automatically identify descriptions of heritage items that contain gender biases would enable efficient prioritization of the additions and revisions needed on outdated, harmful descriptions in GLAM documentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper adopts our previously published definition of biased language (Havens et al., 2020) , narrowing the focus to gender bias as written in \u00a71. Gender biased language may cause representational or allocative harms to a person of any gender (Blodgett et al., 2020; Crawford, 2017) . The taxonomy created in this paper considers a person's gender to be self-described and changeable, rather than being limited to the binary and static conceptualization of gender as either a man or woman since birth (Keyes, 2018; Scheuerman et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 94, |
|
"text": "(Havens et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 246, |
|
"end": 269, |
|
"text": "(Blodgett et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 285, |
|
"text": "Crawford, 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 517, |
|
"text": "(Keyes, 2018;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 542, |
|
"text": "Scheuerman et al., 2020)", |
|
"ref_id": "BIBREF67" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recognizing that a person's gender may be impossible to determine from the information available about them, the taxonomy also allows annotators to record uncertainty (Shopland, 2020) . Furthermore, the paper acknowledges that characteristics other than gender, such as racialized ethnicity and economic class, influence experiences of power and oppression (Crenshaw, 1991) . Drawing on archival science and feminist theories, the paper considers knowledge derived from language as situated in a particular perspective and, as a result, incomplete (Tanselle, 2002; Harding, 1995; Haraway, 1988) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 183, |
|
"text": "(Shopland, 2020)", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 373, |
|
"text": "(Crenshaw, 1991)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 564, |
|
"text": "(Tanselle, 2002;", |
|
"ref_id": "BIBREF78" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 579, |
|
"text": "Harding, 1995;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 594, |
|
"text": "Haraway, 1988)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To communicate this paper's perspective, we as authors report our identification as three women and one man; and our nationalities, as American, German, and Scots. Annotators identify as women (one specifying queer woman and two, cis women); they are of American, British, Hungarian, and Scots nationalities. Though annotators do not represent great gender diversity, 3 the annotation process still contributes to the advancement of gender equity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As women, the annotators identify as a minoritized gender. The evolution of British English demonstrates the historical dominance of the perspective of the heteronormative man, and the pejoration of terms for women (Spencer, 2000; Schulz, 2000; Lakoff, 1989) . 4 Creating a women-produced dataset challenges the dominant gender perspective by explicitly labeling where minoritized genders' perspectives are missing (D'Ignazio and Klein, 2020; Smith, 2006; Fairclough, 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 230, |
|
"text": "(Spencer, 2000;", |
|
"ref_id": "BIBREF71" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 244, |
|
"text": "Schulz, 2000;", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 258, |
|
"text": "Lakoff, 1989)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 262, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 442, |
|
"text": "(D'Ignazio and Klein, 2020;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 455, |
|
"text": "Smith, 2006;", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 473, |
|
"text": "Fairclough, 2003)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Evidence of bias in ML data and models abound regarding gender (Kurita et al., 2019; Zhao et al., 2019) , disability (Hutchinson et al., 2020) , racial-ized ethnicities (Sap et al., 2019) , politics and economics (Elejalde et al., 2017) , and, for an intersectional approach (Crenshaw, 1991) , a combination of characteristics (Jiang and Fellbaum, 2020; Sweeney and Najafian, 2019; Tan and Celis, 2019) . Harms from such biases are also well documented (Birhane and Prabhu, 2021; Costanza-Chock and Philip, 2018; Noble, 2018; Vainapel et al., 2015; Sweeney, 2013) . Despite numerous bias mitigation approaches put forth (Cao and Daum\u00e9 III, 2020; Dinan et al., 2020a; Hube and Fetahu, 2019; Webster et al., 2018; Zhao et al., 2018) , many have limited efficacy, failing to address the complexity of biased language (Sta\u0144czak and Augenstein, 2021; Blodgett et al., 2021; Gonen and Goldberg, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 84, |
|
"text": "(Kurita et al., 2019;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 103, |
|
"text": "Zhao et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 142, |
|
"text": "(Hutchinson et al., 2020)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 187, |
|
"text": "(Sap et al., 2019)", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 236, |
|
"text": "(Elejalde et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 291, |
|
"text": "(Crenshaw, 1991)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 353, |
|
"text": "(Jiang and Fellbaum, 2020;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 381, |
|
"text": "Sweeney and Najafian, 2019;", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 402, |
|
"text": "Tan and Celis, 2019)", |
|
"ref_id": "BIBREF77" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 479, |
|
"text": "(Birhane and Prabhu, 2021;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 512, |
|
"text": "Costanza-Chock and Philip, 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 525, |
|
"text": "Noble, 2018;", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 548, |
|
"text": "Vainapel et al., 2015;", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 549, |
|
"end": 563, |
|
"text": "Sweeney, 2013)", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 645, |
|
"text": "(Cao and Daum\u00e9 III, 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 666, |
|
"text": "Dinan et al., 2020a;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 689, |
|
"text": "Hube and Fetahu, 2019;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 711, |
|
"text": "Webster et al., 2018;", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 730, |
|
"text": "Zhao et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 845, |
|
"text": "(Sta\u0144czak and Augenstein, 2021;", |
|
"ref_id": "BIBREF72" |
|
}, |
|
{ |
|
"start": 846, |
|
"end": 868, |
|
"text": "Blodgett et al., 2021;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 869, |
|
"end": 894, |
|
"text": "Gonen and Goldberg, 2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Methods of removing bias tend to be mathematically focused, such as Basta et al. (2020) and Borkan et al. (2019 ). As McCradden et al. (2020 state, typical ML bias mitigation approaches assume biases' harms can be mathematically represented, though no evidence of the relevance of proposed bias metrics to the real world exists. On the contrary, Goldfarb-Tarrant et al. (2021) found no correlation between a commonly used intrinsic bias metric, Word Embedding Association Test, and extrinsic metrics in the downstream tasks of coreference resolution and hate speech detection. Due to the misalignment between abstract representations of bias and the presence and impact of bias, this paper presents a taxonomy to measure biased language at its foundation: words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 87, |
|
"text": "Basta et al. (2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 111, |
|
"text": "Borkan et al. (2019", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 140, |
|
"text": "). As McCradden et al. (2020", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 376, |
|
"text": "Goldfarb-Tarrant et al. (2021)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Limitations to bias mitigation efforts also result from overly simplistic conceptualizations of bias (Devinney et al., 2022; Sta\u0144czak and Augenstein, 2021; Blodgett et al., 2020) . NLP gender bias work, for example, often uses a binary gender framework either in its conceptualization (such as Webster et al. (2018)) or application (such as Dinan et al. (2020b)) , and tends to focus on one variety of gender bias, stereotypes (Sta\u0144czak and Augenstein, 2021; Doughman et al., 2021; Bolukbasi et al., 2016) . NLP bias work more generally often asserts a single ground truth (Davani et al., 2022; Sang and Stanton, 2022; Basile et al., 2021) . Despite evidence that bias varies across domains (Basta et al., 2020) , approaches to mitigating bias have yet to address the contextual nature of biased language, such as how it varies across time, location, and culture (Bjorkman, 2017; Bucholtz, 1999; Corbett, 1990) . This paper adopts a data feminist (D'Ignazio and Klein, 2020) and perspectivist ap-proach (Basile, 2022) to situate identification and measurement of bias in a particular context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 124, |
|
"text": "(Devinney et al., 2022;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 155, |
|
"text": "Sta\u0144czak and Augenstein, 2021;", |
|
"ref_id": "BIBREF72" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 178, |
|
"text": "Blodgett et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 316, |
|
"text": "Webster et al. (2018))", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 362, |
|
"text": "Dinan et al. (2020b))", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 458, |
|
"text": "(Sta\u0144czak and Augenstein, 2021;", |
|
"ref_id": "BIBREF72" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 481, |
|
"text": "Doughman et al., 2021;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 505, |
|
"text": "Bolukbasi et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 594, |
|
"text": "(Davani et al., 2022;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 618, |
|
"text": "Sang and Stanton, 2022;", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 639, |
|
"text": "Basile et al., 2021)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 711, |
|
"text": "(Basta et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 879, |
|
"text": "(Bjorkman, 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 880, |
|
"end": 895, |
|
"text": "Bucholtz, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 896, |
|
"end": 910, |
|
"text": "Corbett, 1990)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1017, |
|
"text": "(Basile, 2022)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Data feminism views data as situated and partial, drawing on feminist theories' view of knowledge as particular to a time, place, and people (Harding, 1995; Crenshaw, 1991; Haraway, 1988) . Similarly, the Perspectivist Data Manifesto encourages disaggregated publication of annotated data, recognizing that conflicting annotations may all be valid (Basile, 2022) . Indigenous epistemologies, such as the Lakota's concept of wa\u021fk\u00e0N, further the notion of the impossibility of a universal truth. Translated as \"that which cannot be understood,\" wa\u021fk\u00e0N communicates that knowledge may come from a place beyond what we can imagine (Lewis et al., 2018) . Our taxonomy thus permits annotations to overlap and record uncertainty, and our aggregated dataset incorporates all annotators' perspectives.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 156, |
|
"text": "(Harding, 1995;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 172, |
|
"text": "Crenshaw, 1991;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 187, |
|
"text": "Haraway, 1988)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 362, |
|
"text": "(Basile, 2022)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 647, |
|
"text": "(Lewis et al., 2018)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Encouraging greater transparency in dataset creation, Bender et al. (2021) and Jo and Gebru (2020) caution against creating datasets too large to be adequately interrogated. Hutchinson et al. (2021) , Mitchell et al. (2019) , and Bender and Friedman (2018) propose new documentation methods to facilitate critical interrogation of data and the models trained on them. Our appendices include a data statement documenting the creation of the annotated data presented in this paper ( \u00a7B). To maximize the transparency of our data documentation, we will publish the data only after further interrogation of its gender bias annotations, including collaborative analysis with the Centre for Research Collections.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 74, |
|
"text": "Bender et al. (2021)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 198, |
|
"text": "Hutchinson et al. (2021)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 223, |
|
"text": "Mitchell et al. (2019)", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 256, |
|
"text": "Bender and Friedman (2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To practically apply theories and approaches from NLP, data feminism, and indigenous epistemologies, we apply the case study method, common to social science and design research. Case studies use a combination of data and information gathering approaches to study particular phenomena in context (Martin and Hanington, 2012) , suitable for annotating gender biased language because gender and bias vary across time, location, and culture. Furthermore, case studies report and reflect upon outliers discovered in the research process (ibid.), supporting our effort to create space for the perspectives of people minoritized due to their gender identity. After first developing the annotation taxonomy through an interdisciplinary literature review and participatory action research with archivists ( \u00a75), we applied the taxonomy in a case study to create datasets annotated for gender bias ( \u00a76).", |
|
"cite_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 324, |
|
"text": "(Martin and Hanington, 2012)", |
|
"ref_id": "BIBREF55" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Adopting our previously published bias-aware methodology (Havens et al., 2020) , we employed participatory action research (Swantz, 2008; Reid and Frisby, 2008) , collaborating with the institution that manages our data source: the Centre for Research Collections. Due to validity (Welty et al., 2019) and ethical concerns (Gleibs, 2017) with crowdsourcing, we hired annotators with expertise in archives (the domain area of the case study's data) and gender studies (the focus area of this paper's bias mitigation) to apply the taxonomy in a case study. Hiring a small number of annotators will enable us to publish disaggregated versions of the annotated data, implementing data perspectivism (Basile, 2022; Basile et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 78, |
|
"text": "(Havens et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 137, |
|
"text": "(Swantz, 2008;", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "Reid and Frisby, 2008)", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 301, |
|
"text": "(Welty et al., 2019)", |
|
"ref_id": "BIBREF83" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 337, |
|
"text": "(Gleibs, 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 695, |
|
"end": 709, |
|
"text": "(Basile, 2022;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 730, |
|
"text": "Basile et al., 2021)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Following the approach of Smith (2006) to heritage, we consider heritage to be a process of engaging with the past, present, and future. Annotators in this paper's case study visited, interpreted, and negotiated with heritage (Smith, 2006) in the form of archival documentation. Annotating archival documentation with labels that mark specific text spans as gender biased transforms the documentation, challenging the \"authorized heritage discourse\" (ibid., 29) of the heteronormative man. We aim such explicit labeling to recontextualize the archival documentation, transforming its language by placing it in a new social context (Fairclough, 2003) : the 21 st century United Kingdom, with gender conceptualized as a self-defined, changeable identity characteristic. We aim this negotiationthrough-annotation to guide the NLP models we will create with the data in the future towards more equitable representations of gender.", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 38, |
|
"text": "Smith (2006)", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 239, |
|
"text": "(Smith, 2006)", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 631, |
|
"end": 649, |
|
"text": "(Fairclough, 2003)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our annotation taxonomy organizes labels (lettered) into three categories (numbered). Category and label names are bolded. Each label's listing includes a definition and example. Examples are italicized; labeled text in each example is underlined. For every label, annotators could label a single word or multiple words. Examples come from the archival documentation summarized in \u00a76 except for 1(a), Non-binary, and 3(d), Empowering, because annotators did not find text relevant to their definitions (the \"Fonds ID,\" or collection identifier, indicates where in the documentation example descriptions may be found). \u00a77 further explains the rationale for the taxonomy's labels, and how they facilitate analysis and measurement of gender biased language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Taxonomy", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1. Person Name: the name of a person, including any pre-nominal titles (i.e., Professor, Mrs., Sir, Queen) , when the person is the primary entity being described (rather than a location named after a person, for example) We chose to build on the gender bias taxonomy of Hitti et al. (2019) because the authors grounded their definitions of types of gender bias in gender studies and linguistics, and focused on identifying gender bias at the word level, aligning with our approach. Though Dinan et al. (2020b) also provide a framework for defining types of gender bias, their framework focuses on relationships between people in a conversation, identifying \"bias when speaking ABOUT someone, bias when speaking TO someone, and bias from speaking AS someone\" (316). The nature of our corpus makes these gender bias dimensions irrelevant to our work: GLAM documentation contains descriptions that only contain text written about a person or people (or other topics); it does not contain text that provides gender information about who is speaking or who is being spoken to. Additionally, despite writing of four gender values (unknown, neutral, feminine, and masculine), the dataset and classifiers of Dinan et al. (2020b) are limited to \"masculine and feminine classes\" (317). The authors also do not explain how they define \"bias,\" limiting our ability to draw on their research. Doughman et al. (2021) provide another gender bias taxonomy that builds on that of Hitti et al. (2019) , resulting in overlaps between our taxonomies. However, Doughman et al. (2020) focus on gender stereotypes, while our taxonomy considers other types of gender biases. Though less explicit in the names of our taxonomy's labels, we also looked to the descriptions of gender and gender bias from Cao and Daum\u00e9 III (2021), who point out the limited gender information available in language. The aim of our dataset creation differs from Cao and Daum\u00e9 III (2021), though. They created data that represents trans and gender diverse identities in order to evaluate models' gender biases, specifically looking at where coreference resolution fails on trans and non-binary referents. By contrast, we aim to create a dataset that documents biased representations of gender, with the future aim of creating models that are able to identify types of gender bias in language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 106, |
|
"text": "Professor, Mrs., Sir, Queen)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 290, |
|
"text": "Hitti et al. (2019)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 510, |
|
"text": "Dinan et al. (2020b)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1381, |
|
"end": 1403, |
|
"text": "Doughman et al. (2021)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1464, |
|
"end": 1483, |
|
"text": "Hitti et al. (2019)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1541, |
|
"end": 1563, |
|
"text": "Doughman et al. (2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Taxonomy", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To demonstrate the application of the taxonomy, we present a case study situated in the United Kingdom in the 21 st century, annotating archival documentation written in British English from the Centre for Research Collections at the University of Edinburgh (CRC Archives). This paper thus takes the first step in building a collection of case studies that situate NLP bias research in a specific context. (Loper and Bird, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 406, |
|
"end": 428, |
|
"text": "(Loper and Bird, 2002)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A collection of case studies would enable the NLP community to determine which aspects of bias mitigation approaches generalize across time, location, culture, people, and identity characteristics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The CRC's Archives' documentation served as a suitable data source because the documentation adheres to an international standard for organizing archival metadata (ISAD(G) (ICA, 2011)), the archivists at the institution had found gender bias in the documentation's language, and the archivists were already engaged in efforts to mitigate gender bias in the archival documentation. The documentation describes a variety of heritage collections and items, such as letters, journals, photographs, degree certificates, and drawings; on a variety of topics, such as religion, research, teaching, architecture, and town planning. Employees at the partner institution describe themselves as activists changing archival practices to more accurately represent the diverse groups of people that the archival collections are intended to serve.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The annotation corpus consists of 24,474 sentences and 399,957 words, selected from the first 20% of the entire corpus of archival documentation from the partner institution's catalog (see \u00a7B.9 for more on this corpus). Table 1 provides a breakdown of the size of the annotation corpus by metadata field. 90% of the annotation corpus (circa 22,027 sentences and 359,961 words) was doubly annotated with all labels, and 10% of the annotation corpus (circa 2,447 sentences and 39,996 words) was triply annotated with all labels. In total, the annotation process amounted to circa 400 hours of work and \u00a35,333.76, funded by a variety of internal institutional funds. Each of the four hired annotators worked for 72 hours over eight weeks at \u00a318.52 per hour (minimum wage is \u00a39.50 per hour (Gov.uk, 2022)). The hired annotators were PhD students selected for their experience in gender studies or archives, with three of the annotators having experience in both. The lead annotator worked for 86 hours over 16 weeks as part of their PhD research.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 227, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The categories of labels in the annotation taxonomy were divided among annotators according to the textual relations the labels record. Hired annotators 1 and 2 (A1 and A2) labeled internal relations of the text with Person Name and Linguistic categories, hired annotators 3 and 4 (A3 and A4) labeled external relations of the text with the Contextual category, and the lead annotator (A0) labeled both relations with all categories. A1 and A3 labeled the same subset of archival documentation, and A2 and A4 labeled the same subset of archival documentation, ensuring every description had labels from all categories. The lead annotator labeled the same descriptions as A1 and A3, and a subset of the descriptions that A2 and A4 labeled (due to time constraints, A0 could not label all the same descriptions). Prior to beginning annotation, Gendered Pronoun, Gendered Role, and Occupation labels were automatically applied. The annotators corrected mistakes from this automated process during their manual annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We produced three instances of the annotation corpus: one for A0, one for each pair of hired annotators (A1 and A3, and A2 and A4), and one aggregated dataset. The aggregated dataset combines annotations from all five annotators, totaling 76,543 annotations with duplicates and 55,260 annotations after deduplication. Manual reviews of each annotator's dataset informed the aggregation approach, which involved a combination of programmatic and manual steps. The data statement in \u00a7B details the aggregation approach. Figure 1 displays the number of annotations in the aggregated dataset by label ( \u00a7A contains additional annotation figures). In line with perspectivist NLP (Basile, 2022) , the individual annotator's datasets will be published alongside the aggregated dataset, enabling researchers to interrogate patterns of agreement and disagreement, and enabling future work to compare the performance of classifiers trained on disaggregated and aggregated datasets. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 674, |
|
"end": 688, |
|
"text": "(Basile, 2022)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 526, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Due to our aim to create a training dataset for document classification models, identifying strictly matching text spans that annotators labeled was deemed less important than the presence of a label in a description. Consequently, inter-annotator agreement (IAA) calculations consider annotations with the same label to agree if their text spans match or overlap. Figures 2 and 3 display the F 1 scores for each label, with the aggregated dataset's labels as predicted and the annotators' labels as expected. Tables 2 and 3 in the appendices list true and false positives, false negatives, precision, and recall, in addition to F 1 scores, for IAA among the annotators and with the aggregated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 524, |
|
"text": "Tables 2 and 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "IAA calculations reflect the subjectivity of gender bias in language. F 1 scores for the gendered language labels Gendered Role and Gendered Pronoun fall between 0.71 and 0.99. F 1 scores for annotating gender biased language are relatively low, with the greatest agreement on the Generalization label at only 0.56, on the Omission label at 0.48, and on the Stereotype label at 0.57. For Person Name labels, A0 and A2 agree more than A1: A0 and A2's F 1 scores for all Person Name labels are between 0.82 and 0.86, while A1's scores with either A0 or A2 are between 0.42 and 0.64. A1 has a particularly high false negative rate for the Unknown label compared to A0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "After creating the aggregated dataset, we calculated IAA between each annotator and the aggregated dataset. F 1 scores for all Person Name and Linguistic labels except Generalization are similarly high (0.74 to 0.98). Generalization proved particularly difficult to label. Annotators used Generalization and Gendered Role inconsistently. As a result, during the aggregation process, we revised the definition of Generalization to more clearly distinguish it from Gendered Role. Consequently the IAA between annotators and the aggregated dataset for this label is particularly low (0.1 to 0.4).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "For Contextual labels, F 1 scores with the aggregated dataset as \"expected\" and an annotator as \"predicted\" increased more dramatically than the Person Name and Linguistic labels' F 1 scores. Besides Omission with A3, all F 1 scores are between 0.76 and 0.91. For Stereotype, A3 agreed more strongly with the aggregated dataset than A0 and A4. The reverse is true for Omission and Occupation, with A0 and A4 agreeing more strongly with the aggregated dataset than A3. A3's notes explain that she did not annotate an incomplete version of a person's name as an omission if the complete version was provided elsewhere in the collection's descriptions, whereas A0 and A4 annotated incomplete versions of people's names as omission unless the complete version appeared in the same description. Two labels were not applied according to the taxonomy's definitions: Empowering and Non-binary. Empowering was used by A3 according to a different definition than that of the taxonomy (see \u00a7B). As only 80 instances of the label exist in A3's dataset, though, there are likely to be insufficient examples for effectively training classifiers on this label in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The annotators did not use the Non-binary label. That being said, this does not mean there were not people who would identify as non-binary represented in the text of the annotation corpus. Additional linguistic and historical research may identify people who were likely to identify as nonbinary in the corpus of archival documentation, as well as more specific gender identities for people whose names were annotated as Masculine or Feminine. Metadata entries for people in the partner institution's catalog may also provide more information relevant to gender identities. Shopland (2020) finds that focusing on actions that people were described doing can help to locate people of minoritized genders (and sexualities) in historical texts. However, Shopland also cautions researchers against assuming too much: a full understanding of a person's gender often remains unattainable from the documentation that exists about them. As Figure 1 displays, Unknown is the most prevalent label in the Person Name category, because each annotation of a person's name was informed by words within the description in which that name appears. Consequently, for people named in more than one description, there may be different person name labels applied to their name across those descriptions. The rationale for this approach comes from the aim to train document classification models on the annotated data where each description serves as a document. Should a person change their gender during their lifetime, and archival documentation exists that describes them as different genders, the person may wish a model to use the most recent description of a person to determine their gender, or not use any gender information about the person, in case obviating their change of gender leads to safety concerns (Dunsire, 2018) . Furthermore, many GLAM content management systems do not have versioning control, so dates of descriptions may not exist to determine the most recent description of a person's gender. Person Name labels are thus based on the description in which a name appears to minimize the risk of misgendering (Scheuerman et al., 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1798, |
|
"end": 1813, |
|
"text": "(Dunsire, 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 933, |
|
"end": 941, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The paper's annotation taxonomy builds on biased language research from NLP, information sciences, gender studies, and linguistics literature. The gender bias taxonomy of Hitti et al. (2019) , which categorizes gender biases based on whether the bias comes from the sentence structure or the context (i.e. people, relationships, time period, location) of the language, served as a foundation. We adopted four labels from that taxonomy: Gendered Pronoun, Gendered Role, Generalization, and Stereotype (merging Hitti et al.'s Societal Stereotype and Behavioral Stereotype categories). Drawing on archival science and critical discourse analysis, and guided by participatory action research with archivists (e.g., interviews, workshops), we added to and restructured Hitti et al.'s taxonomy. The Person Name labels were added so that the representation of people of different genders in the archival documentation could be estimated. Annotators chose which label to apply to a person's name based on gendered pronouns or roles that refer to that person in the description in which their name appears. For example, \"they\" as singular for Non-binary, \"his\" for Masculine, and \"she\" for Feminine; or \"Mx.\" for Non-binary, \"Lady\" for Feminine, or \"son\" for Masculine. The Unknown, Feminine, and Masculine labels distinguish our approach from previous NLP gender bias work that has not allowed for uncertainty.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 190, |
|
"text": "Hitti et al. (2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Guessing a person's gender risks misgendering (Scheuerman et al., 2020), a representational harm (Blodgett et al., 2020; Crawford, 2017) , and fails to acknowledge that sufficient information often is not available to determine a person's gender with certainty (Shopland, 2020) . This led us to replace the initial labels of Woman and Man with Feminine and Masculine, recognizing that pronouns and roles are insufficient for determining how people define their gender. Each Person Name label encompasses multiple genders. For instance, a person who identifies as a transwoman, as genderfluid, or as a cis woman may use feminine pronouns, such as \"she,\" or feminine roles, such as \"wife.\" Though we aimed to create a taxonomy inclusive of all genders, we acknowledge this may not have been achieved, and welcome feedback on how to represent any genders inadvertently excluded.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 120, |
|
"text": "(Blodgett et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 136, |
|
"text": "Crawford, 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 277, |
|
"text": "(Shopland, 2020)", |
|
"ref_id": "BIBREF69" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We also added three labels to the Contextual category: Occupation, Omission, and Empowering. Occupation was added because, when combined with historical employment statistics, Occupationlabeled text spans could inform estimates of the representation of particular genders within the collaborating archive's collections. Furthermore, Person Name annotations combined with their occupations could guide researchers to material beyond the archive that may provide information about those people's gender identity. Omission was added because, during group interviews, representatives from the collaborating archive described finding gender bias through the lack of information provided about women relative to the detail provided about men. Empowering was added to account for how communities reclaim certain derogatory terms, such as \"queer,\" in a positive, self-affirming manner (Bucholtz, 1999) . Figure 1 displays how prevalent Omission was in the annotated data: this label is the most commonly applied label from the Contextual category. Such prevalence demonstrates the value of interdisciplinary collaboration and stakeholder engagement, carried out in our participatory action research with domain experts. Had archivists at the partner institution not been consulted, we would not have known how relevant omitted information regarding gender identities would be to identifying and measuring gender bias in archival documentation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 877, |
|
"end": 893, |
|
"text": "(Bucholtz, 1999)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 896, |
|
"end": 904, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The final annotation taxonomy includes labels for gendered language (specifically, Gendered Role, Gendered Pronoun, and all labels in the Person Name category), rather than only explicitly gender biased language (specifically, Generalization, Stereotype, and Omission), because measuring the use of gendered words across an entire archives' collection provides information about gender bias at the overall collections' level. For example, using a gendered pronoun such as \"he\" is not inherently biased, but if the use of this masculine gendered pronoun far outnumbers the use of other gendered pronouns in our dataset, we can observe that the masculine is over-represented, indicating a masculine bias in the archives' collections overall. Labeling gender-biased language focuses on the individual description level. For example, the stereotype of a wife playing a supporting role to her husband comes through in this description: Jewel took an active interest in her husband's work, accompanying him when he travelled, sitting on charitable committees, looking after missionary furlough houses and much more.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Instructions for applying the taxonomy permitted labels to overlap as each annotator saw fit, and asked annotators to annotate from their contemporary perspective. Approaching the archival metadata descriptions as discourse (meaning language as representations of the material, mental, and social worlds (Fairclough, 2003) ), the taxonomy of labels represents the \"internal relations\" and \"external relations\" of the descriptions (ibid., 37). The Person Name and Linguistic categories annotate in-ternal relations, meaning the \"vocabulary (or 'lexical') relations\" (ibid., 37) of the descriptions. To apply their labels, annotators looked for the presence of particular words and phrases (i.e., gendered pronouns, gendered titles, familial roles).", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 322, |
|
"text": "(Fairclough, 2003)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The Contextual category annotates external relations: relations with \"social events ... social practices and social structures\" (Fairclough, 2003, 36) . To apply Contextual labels, annotators reflected on the production and reception of the language in the archival documentation. For instance, to apply the Stereotype label, annotators considered the relationship between a description's language with social hierarchies in 21 st century British society, determining whether the term or phase adequately represented the possible gender diversity of people being described.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 150, |
|
"text": "(Fairclough, 2003, 36)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "This paper has presented a taxonomy of gender biased language with a case study to support clarity and alignment in NLP gender bias research. Recognizing the value of clearly defined metrics for advancing bias mitigation, the taxonomy provides a structure for identifying types of gender biased language at the level they originate (words and phrases), rather than at a level of abstraction (i.e., vector spaces). Still, the case study presented in this paper demonstrates the difficulty of determining people's gender with certainty. While recognizing the value of NLP systems for mitigating harms from gender biased language at large scale, we contend that conceptualizations of gender must extend to trans and gender diverse gender expressions if NLP systems are to empower minoritized gender communities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Future work will include the publication of the case study's datasets, analysis of the datasets, and document classification models trained on the datasets. The datasets will include each individual annotator's dataset and two aggregated datasets, one with duplicates across different annotators, and one deduplicated to exclude matching and overlapping annotations from different annotators. The analysis of the datasets and creation of models trained on them will be informed by participatory action research, incorporating perspectives from archivists, and from people of trans and gender diverse identities not represented in the research team. The dataset will be published in the same location as the code written to create the corpus of archival documentation and the annotated datasets. 5 The taxonomy and forthcoming datasets aim to guide NLP systems towards measurable and inclusive conceptualizations of gender.", |
|
"cite_spans": [ |
|
{ |
|
"start": 795, |
|
"end": 796, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Thank you to our collaborators, Rachel Hosker and her team at the Centre for Research Collections; our annotators, Suzanne Black, Ashlyn Cudney, Anna Kuslits, and Iona Walker; and Richard Tobin, who wrote the pre-annotation scripts for this paper's annotation process. We also extend our gratitude to the organizations who provided grants to support the research reported in this paper: Table 2 : Inter-annotator agreement measures for annotators who used the Person Name and Linguistic categories of labels to annotate archival documentation. The first two columns note the annotator whose labels were considered expected or predicted, respectively. The abbreviation \"pos\" is for \"positive;\" \"neg,\" for \"negative.\" The last column lists the number of files with annotations by both annotators for that row. No annotators applied the \"Non-binary\" label. 0 3 Empowering 0 80 0 ---485 0 4 Empowering 0 0 0 ---149 3 4 Empowering 0 0 80 ---57 Table 3 : Inter-annotator agreement measures for annotators who used the Contextual category of labels to annotate archival metadata descriptions. The first two columns note the annotator whose labels were considered expected or predicted, respectively. The abbreviation \"pos\" is for \"positive;\" \"neg,\" for \"negative.\" The last column lists the number of files with annotations by both annotators for that row. Only annotator 3 applied the \"Empowering\" label. Table 4 : Inter-annotator agreement between the aggregated dataset and annotators for the Person Name and Linguistic categories of labels to annotate archival documentation. The first two columns note the annotator whose labels were considered expected or predicted, respectively. The abbreviation \"pos\" is for \"positive;\" \"neg,\" for \"negative.\" The last column lists the number of files with annotations by both annotators for that row. No annotators applied the \"Non-binary\" label. Empowering 0 0 0 ---450 Table 5 : Inter-annotator agreement between the aggregated dataset and annotators for the Contextual category of labels to annotate archival metadata descriptions. The first two columns note the annotator whose labels were considered expected or predicted, respectively. The abbreviation \"pos\" is for \"positive;\" \"neg,\" for \"negative.\" The last column lists the number of files with annotations by both annotators for that row. Only annotator 3 applied the \"Empowering\" label. Figure 6: Confusion matrices normalized with a weighted average on the aggregated data's labels, so that class imbalances are taken into account. The top left confusion matrix displays intersections between the aggregated datasets labels, illustrating where the same text spans have more than one label. The remaining confusion matrices to display the agreement between an annotator's labels (Y axis) and the aggregated data's labels (X axis). The Y axis scale is the same for all matrices, ranging from zero to one. Figure 7: Disagreeing and Agreeing Label Counts Across All Annotators' Datasets. The bar chart displays counts of the occurrence of disagreements and agreements across annotators' labels. Annotations by two annotators with the same or overlapping text span but different labels are considered to be in disagreement. Annotations by two annotators with the same or overlapping text span and the same labels are considered to be in agreement. Agreements with the same text span are considered to be exact matches. Agreements with different but overlapping text spans are considered to be overlaps. Combined, the annotated datasets contain 198,520 annotations. These datasets were created from a corpus of 1,460 files of archival metadata descriptions totaling circa 15,419 sentences and 255,943 words. That corpus is the first 20% of text from the corpus described in the Provenance Appendix ( \u00a7B.9), annotated for gender bias according the the taxonomy in Other ( \u00a7B.8). 73 of files (10% of the text) were triply annotated; the remaining 1,387 files (90% of the text) were doubly annotated. There are six instances of the annotated corpus: one for each of the five annotators and one that aggregates all annotators' labels. Participatory action research with archivists led the project to choose four metadata fields were chosen in the archival catalog to extract for annotation: Title, Scope and Contents, Biographical / Historical, and Processing Information. The five annotated datasets were merged into a single aggregated dataset for classifier training and evaluation, so comparisons could be made on classifiers' performances after training on an individual annotator's dataset versus on the aggregated dataset. The merging process began with a onehour manual review of each annotator's labels to identify patterns and common mistakes in their labeling, which informed the subsequent steps for merging the five annotated datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 394, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 854, |
|
"end": 965, |
|
"text": "0 3 Empowering 0 80 0 ---485 0 4 Empowering 0 0 0 ---149 3 4 Empowering 0 0 80 ---57 Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1418, |
|
"end": 1425, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1926, |
|
"end": 1933, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The second step of the merging process was to manually review disagreeing labels for the same text span and add the correct label to the aggregated dataset. Disagreeing labels for the same text span were reviewed for all Person Name, Linguistic, and Contextual categories of labels. For Person Name and Linguistic labels, where three annotators labeled the same span of text, majority voting determined the correct label: if two out of the three annotators used one label and the other annotator used a different label, the label used by the two annotators was deemed correct and added to the aggregated dataset. For Contextual labels, unless an obvious mistake was made, the union of all three annotators' labels was included in the aggregated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Additional Tables and Figures", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Thirdly, the \"Occupation\" and \"Gendered Pronoun\" labels were reviewed. A unique list of the text spans with these labels was generated and incorrect text spans were removed from this list. The \"Occupation\" and \"Gendered Pronoun\" labels in the annotated datasets with text spans in the unique lists of valid text spans were added to the aggregated dataset. Fourthly, the remaining Linguistic labels (\"Gendered Pronoun,\" \"Gendered Role,\" and \"Generalization\") not deemed incorrect in the annotated datasets were added to the aggregated dataset. Due to common mistakes in annotating Person Name labels with one annotator, only data from the other two annotators who annotated with Person Name labels was added to the aggregated dataset. Fifthly, for annotations with overlapping text spans and the same label, the annotation with the longer text span was added to the aggregated dataset. The sixth and final step to constructing the aggregated dataset was to take the union of the remaining Contextual labels (\"Stereotype,\" \"Omission,\" \"Occupation,\" and \"Empowering\") not deemed incorrect in the three annotated datasets with these labels and add them to the aggregated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Additional Tables and Figures", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The metadata descriptions extracted from the Archive's catalog are written primarily in British English, with the occasional word in another language such as French or Latin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.2 Language Variety", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The producing research team are of American, German, and Scots nationalities, and are three women and one man. We all work primarily as academic researchers in the disciplines of natural language processing, data science, data visualization, humancomputer interaction, digital humanities, and digital cultural heritage. Additionally, one of us is audited an online course on feminist and social justice studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.3 Producer Demographic", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The five annotators are of American and European nationalities and identify as women. Four annotators were hired by the lead annotator for their experience in gender studies and archives. The four annotators worked 72 hours each over eight weeks in 2022, receiving \u00a31,333.44 each (\u00a318.52 per hour). The lead annotator completed the work for her PhD project, which totaled to 86 hours of work over 16 weeks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.4 Annotator Demographic", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The archival metadata descriptions describe material about a range of topics, such as teaching, research, town planning, music, and religion. The materials described also vary, from letters and journals to photographs and audio recordings. The descriptions in this project's dataset with a known date (which describe 38.5% of the archives' records) were written from 1896 through 2020.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.5 Speech or Publication Situation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The annotated dataset will be published with a forthcoming paper detailing the methodology and theoretical framework that guided the development of the annotation taxonomy and the annotation process, accompanied by analysis of patterns and outliers in the annotated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.5 Speech or Publication Situation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The datasets were organized for annotation in a web-based annotation paltform, the brat rapid annotation tool (Stenetorp et al., 2012) . Consequently, the data formats conform to the brat formats: plain text files that end in '.txt' contain the original text and plain text files that end in '.ann' contain the annotations. The annotation files include the starting and ending text span of a label, the actual text contained in that span, the label name, and any notes annotators recorded about the rationale for applying the label they did. The names of all the files consist of the name of the fonds (the archival term for a collection) and a number indicating the starting line number of the descriptions. Descriptions from a single fonds were split across files so that no file contained more than 100 lines, because brat could not handle the extensive length of certian fonds' descriptions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 134, |
|
"text": "(Stenetorp et al., 2012)", |
|
"ref_id": "BIBREF73" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.6 Data Characteristics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A subset of annotations were applied automatically with a grep script and then corrected during the manual annotation process. All three categories of the annotation taxonomy were manually applied by the annotators. The lead annotator then manually checked the labels for accuracy. That being said, due to time constraints, mistakes are likely to remain in the application of labels (for example, the starting letter may be missing from a labeled text span or a punctuation mark may have accidentally been included in a labeled text span).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.7 Data Quality", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The detailed schema that guided the annotation process is listed below with examples for each label. In each example, the labeled text is underlined. All examples are taken from the dataset except for labels 1.1, \"Non-binary,\" and 3.4, \"Empowering,\" as the annotators did not find any text to which the provided label definitions applied. The annotation instructions permitted labels to overlap as each annotator saw fit, and asked annotators to read and annotate from their contemporary perspective. The categories of labels from the annotation taxonomy were divided among annotators: two hired annotators labeled with categories 1 and 2, two hired annotators labeled with category 3, and the lead annotator labeled with all categories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The annotation taxonomy includes labels for gendered language, rather than only explicitly genderbiased language, because measuring the use of gendered words across an entire archives' collection provides information about gender bias at the overall collections' level. For example, using a gendered pronoun such as \"he\" is not inherently biased, but if the use of this masculine gendered pronoun far outnumbers the use of other gendered pronouns in our dataset, we can observe that the masculine is over-represented, indicating a masculine bias in the archives' collections overall. Labeling genderbiased language focuses on the individual description level. For example, the stereotype of a wife playing only or primarily a supporting role to her husband comes through in the following description:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Jewel took an active interest in her husband's work, accompanying him when he travelled, sitting on charitable committees, looking after missionary furlough houses and much more. She also wrote a preface to his Baptism and Conversion and a foreward [sic] to his A Reasoned Faith. (Fonds Identifier: Coll-1036) 1. Person Name: the name of a person, including any pre-nominal titles (i.e., Professor, Mrs., Sir, Queen), when the person is the primary entity being described (rather than a location named after a person, for example)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.1 Non-binary:* the pronouns or roles of the named person within the descriptive field in which this instance of the name appears (either Title, Scope and Contents, Biographical / Historical, or Processing Information) are non-binary Example 1.1: Francis McDonald went to the University of Edinburgh where they studied law. Note: the annotation process did not find suitable text on which to apply this label in the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.2 Feminine: the pronouns, titles, or roles of the named person within the descriptive field in which this instance of the name appears (either Title, Scope and Contents, Biographical / Historical, or Processing Information) are feminine Example 1.2: \"Jewel took an active interest in her husband's work...\" (Fonds Identifier: Coll-1036) 1.3 Masculine: the pronouns, titles, or roles of the named person within the descriptive field in which this instance of the name appears (either Title, Scope and Contents, Biographical / Historical, or Processing Information) are masculine Example 1.3: \"Martin Luther, the man and his work.\" (Fonds Identifier: BAI) 1.4 Unknown: any pronouns, titles, or roles of the named person within the descriptive field in which this instance of the name appears (either Title, Scope and Contents, Biographical / Historical, or Processing Information) are gender neutral, or no such pronouns or roles are provided within the descriptive field Example 1.4: \"Testimonials and additional testimonials in favour of Niecks, candidacy for the Chair of Music, 1891\" (Fonds Identifier: Coll-1086) 2. Linguistic: gender marked in the way a word, phrase or sentence references a person or people, assigning them a specific gender that does not account for all genders possible for that person or people 2.1 Generalization: use of a genderspecific term (i.e. roles, titles) to refer to a group of people that could identify as more than the specified gender Example 2.1: \"His classes included Anatomy, Practical Anatomy, ... Midwifery and Diseases of Women, Therapeutics, Neurology, ... Public Health, and Diseases of the Skin.\" (Fonds Identifier: Coll-1118) 2.2 Gendered Role: use of a title or word denoting a person's role that marks either a non-binary, feminine, or masculine gender Example 2.2: \"New map of Scotland for Ladies Needlework, 1797 \" (Fonds Identifier: Coll-1111 2.3 Gendered Pronoun: explicitly marking the gender of a person or people through the use of pronouns (e.g., he, him, himself, his, her, herself, and she) Example 2.3: \"He obtained surgical qualifications from Edinburgh University in 1873 ([M.B.] ).\" (Fonds Identifier: Coll-1096) 3. Contextual: expectations about a gender or genders that comes from knowledge about the time and place in which language is used, rather than from linguistic patterns alone (i.e., sentence structure or word choice)", |
|
"cite_spans": [ |
|
{ |
|
"start": 1647, |
|
"end": 1676, |
|
"text": "(Fonds Identifier: Coll-1118)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1851, |
|
"end": 1867, |
|
"text": "Needlework, 1797", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1868, |
|
"end": 1898, |
|
"text": "\" (Fonds Identifier: Coll-1111", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2138, |
|
"end": 2145, |
|
"text": "([M.B.]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2150, |
|
"end": 2179, |
|
"text": "(Fonds Identifier: Coll-1096)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3.1 Stereotype: a word, phrase, or sentence that communicates an expectation of a person or group of people's behaviors or preferences that does not reflect the reality of all their possible behaviors or preferences; or a word, phrase, or sentence that focuses on a particular aspect of a person that doesn't represent that person holistically Example 3.1: \"The engraving depicts a walking figure (female) set against sunlight, and holding/releasing a bird.\" (Fonds Identifier: Coll-1116) 3.2 Omission: focusing on the presence, responsibility, or contribution of a single gender in a situation in which more than one gender has a presence, responsibility or contribution; or defining one person's identity in terms of their relation to another person Example 3.2: \"This group portrait of Laurencin, Apollinaire, and Picasso and his mistress became the theme of a larger version in 1909 entitledApollinaire [sic] and his friends.\" (Fonds Identifier: Coll-1090).", |
|
"cite_spans": [ |
|
{ |
|
"start": 478, |
|
"end": 488, |
|
"text": "Coll-1116)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3.3 Occupation: a word or phrase that refers to a person or people's job title (singular or plural) for which the person or people received payment; do not annotate occupations used as a pre-nominal title (for example, \"Colonel Sir Thomas Francis Fremantle\" should not have an occupation label) Example 3.3: \"He became a surgeon with the Indian Medical Service.\" (Fonds Identifier: Coll-1096).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3.4 Empowering: reclaiming derogatory words or phrases to empower a minoritized person or people Example 3.4: a person describing themself as queer in a self-affirming, positive manner Note: the annotation process did not find enough text on which to apply this label in the dataset to include it when training a classifier. One annotator used the label according to a different definition.** *The \"Non-binary\" label was not used by the annotators. That being said, this does not mean there were not people who would identify as non-binary represented in the text of the annotation corpus. When relying only on descriptions written by people other than those represented in the descriptions, knowledge about people's gender identity remains incomplete (Shopland, 2020) . Additional linguistic research informed by a knowledge of terminology for the relevant time period may identify people who were likely to identify as non-binary in the corpus of archival metadata descriptions. For example, Shopland (2020) finds that focusing on actions that people were described doing can help to locate people of minoritized genders (and sexualities) in historical texts, but also cautions researchers against assuming too much. A full understanding of a person's gender often remains unattainable from the documentation that exists about them. **One annotator used the \"Empowering\" label in the following instances:", |
|
"cite_spans": [ |
|
{ |
|
"start": 752, |
|
"end": 768, |
|
"text": "(Shopland, 2020)", |
|
"ref_id": "BIBREF69" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 When a person referenced with feminine terms was described as the active party in marriage", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Honor or achievement held by a woman (as indicated in the text)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note: Honors and achievements held by men were labeled as stereotypes, as there was a consistent focus on this type of detail about people, which involved spheres of life historically dominated by men in the UK. Spheres of life historically dominated by women in the UK were described with greater vagueness, eliminating the possibility of honors or achievements in these spheres to be identified.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The fate of a wife is mentioned in an entry predominantly about the life of a husband", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Family members referenced with feminine terms are prioritized (i.e., they are listed first, more detail is given about them than those referenced with masculine terms)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 A gender-neutral term is used instead of gendered term", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All annotators were encouraged to use the annotation tool's notes field to record their rationale for particular label choices, especially for text labeled with \"Generalization,\" \"Stereotype,\" or \"Omission.\" The work intends these notes to lend transparency to the annotation process, providing anyone who wishes to use the data with insight onto the annotator's mindset when labeling the archival documentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.8 Other: Annotation Schema", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Data Statement: Corpus of Archival Documentation B.9.1 Curation Rationale", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We (the research team) will use the extracted metadata descriptions to create a gold standard dataset annotated for contextual gender bias. We adopt Hitti et al.'s definition of contextual gender bias in text: written language that connotes or implies an inclination or prejudice against a gender through the use of gender-marked keywords and their context (2019).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A member of our research team has extracted text from four descriptive metadata fields for all collections, subcollections, and items in the Archive's online catalog. The first field is a title field. The second field provides information about the people, time period, and places associated with the collection, subcollection, or item to which the field belongs. The third field summarizes the contents of the collection, subcollection, or item to which the field belongs. The last field records the person who wrote the text for the collection, subcollection, or item's descriptive metadata fields, and the date the person wrote the text (although not all of this information is available in each description; some are empty). Using the dataset of extracted text, we will experiment with training a discriminative classification algorithm to identify types of contextual gender bias. Additionally, the dataset will serve as a source of annotated, historical text to complement datasets composed of contemporary texts (i.e. from social media, Wikipedia, news articles).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We chose to use archival metadata descriptions as a data source because:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1. Metadata descriptions in the Archive's catalog (and most GLAM catalogs) are freely, publicly available online 2. GLAM metadata descriptions have yet to be analyzed at large scale using natural language processing (NLP) methods and, as records of cultural heritage, the descriptions have the potential to provide historical insights on changes in language and society (Welsh, 2016) 3. GLAM metadata standards are freely, publicly available, often online, meaning we can use historical changes in metadata standards used in the Archive to guide large-scale text analysis of changes in the language of the metadata descriptions over time 4. The Archive's policy acknowledges its responsibility to address legacy descriptions in its catalogs that use language considered biased or otherwise inappropriate today 6", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 383, |
|
"text": "(Welsh, 2016)", |
|
"ref_id": "BIBREF82" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The metadata descriptions extracted from the Archive's catalog are written in British English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.2 Language Variety", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We (the research team) are of American, German, and Scots nationalities, and are three females and one male. We all work primarily as academic researchers in the disciplines of natural language processing, data science, data visualization, humancomputer interaction, digital humanities, and digital cultural heritage. Additionally, one of us has been auditing a feminism and social justice course, and reading literature on feminist theories, queer theory, and indigenous epistemologies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.3 Producer Demographic", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Not applicable", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.4 Annotator Demographic", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The metadata descriptions extracted from the Archive's online catalog using Open Access Initiative -Protocol for Metadata Harvesting (OAI-PMH). For OAI-PMH, an institution (in this case, the Archive) provides a URL to its catalog that displays its catalog metadata in XML format. A member of our research team wrote scripts in Python to extract three descriptive metadata fields for every collection, subcollection, and item in the Archive's online catalog (the metadata is organized hierarchically). Using Python and its Natural Language Toolkit library (Loper and Bird, 2002) , the researcher removed duplicate sentences and calculated that the extracted metadata descriptions consist of a total of 966,763 words and 68,448 sentences across 1,231 collections. The minimum number of words in a collection is 7 and the maximum, 156,747, with an average of 1,306 words per collection and standard deviation of 7,784 words. The archival items described in resulting corpus consist of a variety of material, from photographs and manuscripts (letters, lecture notes, and other handwritten documents) to instruments and tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 555, |
|
"end": 577, |
|
"text": "(Loper and Bird, 2002)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.5 Speech or Publication Situation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Upon extracting the metadata descriptions using OAI-PMH, the XML tags were removed so that the total words and sentences of the metadata descriptions could be calculated to ensure the text source provided a sufficiently large dataset. A member of our research team has grouped all the extracted metadata descriptions by their collection (the \"fonds\" level in the XML data), preserving the context in which the metadata descriptions were written and will be read by visitors to the Archive's online catalog.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.6 Data Characteristics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As a member of our research team extracts and filters metadata descriptions from the Archive's online catalog, they write assertions and tests to ensure as best as possible that metadata is not lost or unintentionally changed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.7 Data Quality", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The data can be freely accessed at: datashare. ed.ac.uk/handle/10283/3794. The data preparation code has been published at: github. com/thegoose20/annot-prep.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.8 Other", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The data described above was harvested from the University of Edinburgh's Centre for Research Collections' Archives catalog in 2020 (archives.collections.ed.ac.uk).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.9.9 Provenance Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The annotation instructions were written to guide annotators in applying the taxonomy of to the annotation corpus of archival metadata descriptions. Prior to beginning the annotation process, an annotation pilot was undertaken with three participants to test the clarity of the annotation taxonomy. The pilot led to revisions of the instructions: more examples were added and annotators were explicitly instructed to read and interpret the descriptions from their contemporary perspective. The annotation instructions below contain a slightly different annotation taxonomy than the final annotation taxonomy included above in the main body of the paper. This is due to the fact that during and after the annotation process, the taxonomy was revised based on the data that was being annotated. The definitions of Gendered Role and Generalization proved to be difficult to distinguish in practice, so the definitions were revised during the dataset aggregation process. Additionally, we realized during the annotation process that \"Woman\" and \"Man\" were inaccurate labels based on what we could learn about gender from text, so we changed these labels to \"Feminine\" and \"Masculine,\" respectively, for the final annotation taxonomy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Annotation Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Step 1: As you read and label the archival metadata descriptions displayed on the screen, including text that quotes from source material, meaning text surrounded in quotation marks that reproduces something written in a letter, manuscript, or other text-based record from an archival collection. NOTE: If you are unsure about an annotation, please make a note the file name and your question so that we can discuss it and decide on the way to annotate that sort of language moving forward!", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Step 2: Please note that Gendered-Pronouns, Gendered-Roles, and Occupations have been preannotated. If any of these three categories of language have been annotated incorrectly, please correct them by clicking on the annotation label, deleting it, and making the correct annotation. If any of these three categories of language have been missed in the pre-annotation process, please annotate them yourself.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Step 3: Read the archival metadata descriptions displayed and while reading:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Use your mouse to highlight a selection of text or click on a word that uses gendered language according to the schema in the table on the next page.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Using the keyboard shortcuts (see the table) or your mouse, select the type of gendered language you've identified. Please select the most specific label possible (listed as i, ii, iii, or iv)! Please only select Person-Name, Linguistic or Contextual if you do not feel their subcategories are suitable to the gendered language you would like to annotate.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 46, |
|
"text": "the table)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If you select a subcategory of Contextual gendered language, please write a brief note explaining what you've annotated as gendered in the \"Notes\" section of the \"New/Edit Annotation\" pop-up window.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 If you used your mouse to open the pop-up window, press the Enter/Return key or the \"OK\" button to make the annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 You may make overlapping annotations, meaning a single word or phrase may have multiple gendered language annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Please annotate all instances of a particular type of gendered language used for a specific person or people in the text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Please note that the labels to annotate with as defined below are intended to guide your interpretation of the text through a contemporary lens (not a historical lens).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The examples provided in the schema below are highlighted according to the words, phrases or sentences that should be highlighted or clicked in brat. If in doubt about how much to annotate, please annotate more words rather than less! 1. Person-Name: the name of a person including any pre-nominal titles they have (i.e., Professor, Mrs., Sir)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "NOTE 1: Please annotate every instance of a name in brat only (do not use a spreadsheet anymore). This means that each person may have multiple person-name labels annotating the same form of their name or different forms of their name.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "NOTE 2: Use the pronouns and roles that occur within the descriptive field in which the name appears (either \"Title,\" \"Scope and Contents,\" \"Biographical / Historical,\" or \"Processing Information\") to determine whether the annotation label should be Woman, Man, Nonbinary, or Unknown. Please do not use the occupation, name, or other information that implies a gender to determine the annotation label; only use explicit terms such as gender-marking pronouns (him, her, he, she, himself, herself, etc.) and gendermarking roles (mother, father, daughter, wife, husband, son, Mrs, Ms, Mr, etc.) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 527, |
|
"end": 592, |
|
"text": "(mother, father, daughter, wife, husband, son, Mrs, Ms, Mr, etc.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(a) Woman: the pronouns (i.e., she, her) or roles (i.e., mother, wife, daughter, grandmother, Mrs., Ms., Queen, Lady, Baroness) or use of term nee [Last Name] indicating a maiden name within the descriptive field in which the name appears (either \"Title,\" \"Scope and Contents,\" \"Biographical / Historical,\" or \"Processing Information\") of the named person suggest they are a woman Example: Mrs. Jane Bennet went to Huntsford. (b) Men: the pronouns, roles, or titles of the named person suggest they are a man Example: Conrad Hal Waddington lived in Edinburgh and he published scientific papers. (c) Non-binary: the pronouns or roles of the named person within the descriptive field in which this instance of the name appears (either \"Title,\" \"Scope and Contents,\" \"Biographical / Historical,\" or \"Processing Information\") suggest they are non-binary NOTE: a preliminary search of the text returned no results for exclusively nonbinary pronouns such as Mx, so most likely any non-binary person would be indicated with \"they\"); if the gender of a person is named and it's not a woman or man, please note this gender in the \"Notes\" section of the annotation pop-up window Example: Francis McDonald went to the University of Edinburgh where they studied law. (d) Unknown: there are no pronouns or roles for the named person within the descriptive field in which this instance of the name appears (either \"Title,\" \"Scope and Contents,\" \"Biographical / Historical,\" or \"Processing Information\") that suggest their gender identity Example: Jo McMahon visited Edinburgh in 1900.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2. Linguistic: gender marked in the way a sentence references a person or people, assigning them a specific gender that does not account for all genders possible for that person or group of people (Keyboard shortcut: L) (a) Generalization: use of a gender-specific term to refer to a group of people (including the job title of a person) that could identify as more than the specified gender (Keyboard shortcut: G) Example 1: The chairman of the university was born in 1980. Explanation: Chair would be the gender-neutral form of chairman Example 2: Readers, scholars, and workmen Explanation: readers and scholars are gender-neutral, while workpeople or workers would be the gender-neutral form of workmen Example 3: Housewife (b) Gendered Pronoun: explicitly marking the gender of a person or people through the use of the pronouns he, him, his, her, and she (Keyboard shortcut: P) Example 1: She studied at the University of Edinburgh. In 2000, she graduated with a degree in History. . Contextual: gender bias that comes from knowledge about the time and place in which language is used, rather than from linguistic patterns alone (i.e., sentence structure, word choice) (Keyboard shortcut: C) (a) Occupation: occupations, whether or not they explicitly communicate a gender, should be annotated, as statistics from external data sources can be used to estimate the number of people of different genders who held such occupations; please label words as occupations if they'd be a person's job title and are how the person would make money, but not if the words are used as a title (Keyboard shortcut: J) Example 1: minister Example 2: Sergeant-Major-General (b) Stereotype: language that communicates an expectation of a person or group of people's behaviors or preferences that does not reflect the reality of all possible behaviors/preferences that person or group of people may have, or language that focuses on a particular aspect of a person that doesn't represent that person holistically; for example, women described in relation to their family and home, and men in relation to their careers and workplace; men more associated with science and women more associated with liberal arts (Keyboard shortcut: S) NOTE: Please label whichever words, phrases, or sentences you feel communicate the stereotype. Three different examples are shown below for how this may look. Include names being turned into ways of thought (e.g., Bouldingism, Keynsian). Example 1: The event was sports-themed for all the fathers in attendance. Explanation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The assumption here is that all fathers and only fathers would enjoy a sports-themed event. A neutral alternative sentence could read: The event was sports-themed for all the former athletes in attendance Example 2: A programmer works from his computer most of the day. Explanation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The assumption here is that any programmer must be a man, since the indefinite article \"A\" is used with the pronoun \"his\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Example 3: A man with no doctorate degree being known as Dr. Jazz Explanation: Women often receive negative attention for using titles such as Dr (see the WSJ op-ed on Dr Jill Biden for a recent example) while men typically do not (c) Omission: focusing on the presence, responsibility, or contribution of a single gender in a situation in which more than one gender has a presence, responsibility or contribution; or defining a person's identity in terms of their relation to another person (Keyboard shortcut: O) NOTE: If initials are provided, consider that enough of a name that it doesn't need to be labeled as an omission! Explanation: Mrs. Oliphant is referred to by the last name she shares with her husband without including her given name (d) Empowering: use of gendered language to challenge stereotypes or norms that reclaims derogatory terms, empowering a minoritized person or people; for example, using the term queer in an empowering rather than a derogatory manner (Keyboard shortcut: E) Example: \"Queer\" being used in a selfaffirming, positive manner to describe oneself", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Step 4: If you would like to change an annotation you have made, double click the annotation label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C.1 Instructions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This paper uses minoritization in the sense D'Ignazio and Klein (2020) use the term: as a descriptor to emphasize a group of people's experience of oppression, rather than using the noun minority, which defines people as oppressed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This paper uses gender diverse in the sense the Trans Metadata Collective (2022) uses the term: to include gender expressions that do not fit within binary conceptualizations of gender, that differ from one's gender assigned at birth, and that cannot be described with the culturally-specific term trans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The availability of people who responded to the annotator application and the annotation timeline limited the gender diversity that could be achieved among annotators.4 In the 16 th century, grammarians instructed writers to write \"men\" or \"man\" before \"women\" or \"woman.\" In the 18 th century, \"man\" and \"he\" began to be employed as universal terms, rather than \"human\" and \"they\"(Spencer, 2000).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Archive is not alone; across the GLAM sector, institutions acknowledge and are exploring ways to address legacy language in their catalogs' descriptions. The \"Note\" in We Are What We Steal provides one example: dxlab.sl.nsw. gov.au/we-are-what-we-steal/notes/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "If you would like to remove the annotation, click the \"Delete\" button in the pop-up window. If you would like to change the annotation, click the label you would like to change to and then click the \"OK\" button.Step 5: Click the right arrow at the top left of the screen to navigate to the next archival metadata description (if you would like to return to a previous description, click the left arrow).Step 6: If the screen does not advance when you click the right arrow, you've reached the end of the folder you're currently in. To move onto the next file, please hover over the blue bar at the top of the screen and click the \"Collection\" button. Click the first list item in the pop-up window \"../\" to exit your current folder and then double click the next folder in the list. Double click the first file in this next folder to begin annotating its text.Step 7: Repeat from step 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The Perspectivist Data Manifesto", |
|
"authors": [ |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valerio Basile. 2022. The Perspectivist Data Manifesto. [Online; accessed March 21, 2022].", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Toward a Perspectivist Turn in Ground Truthing for Predictive Computing", |
|
"authors": [ |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federico", |
|
"middle": [], |
|
"last": "Cabitza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Campagner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Fell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valerio Basile, Federico Cabitza, Andrea Campagner, and Michael Fell. 2021. Toward a Perspectivist Turn in Ground Truthing for Predictive Computing. CoRR, abs/2109.04270.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Extensive Study on the Underlying Gender Bias in Contextualized Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Basta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noe", |
|
"middle": [], |
|
"last": "Casas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Neural Computing & Applications", |
|
"volume": "33", |
|
"issue": "8", |
|
"pages": "3371--3384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christine Basta, Marta R Costa-juss\u00e0, and Noe Casas. 2020. Extensive Study on the Underlying Gender Bias in Contextualized Word Embeddings. Neural Computing & Applications, 33(8):3371-3384.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Batya", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "587--604", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00041" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender and Batya Friedman. 2018. Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science. Transactions of the Association for Computational Linguistics, 6:587-604.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angelina", |
|
"middle": [], |
|
"last": "Mcmillan-Major", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shmargaret", |
|
"middle": [], |
|
"last": "Shmitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "610--623", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3442188.3445922" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender, Timnit Gebru, Angelina McMillan- Major, and Shmargaret Shmitchell. 2021. On the Dangers of Stochastic Parrots: Can Language Mod- els Be Too Big? In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Trans- parency, FAccT '21, page 610-623, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Race after technology : abolitionist tools for the new Jim code", |
|
"authors": [ |
|
{ |
|
"first": "Ruha", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Polity", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruha Benjamin. 2019. Race after technology : aboli- tionist tools for the new Jim code. Polity, Cambridge, UK. 5 github.com/thegoose20/annot", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Large image datasets: A pyrrhic win for computer vision?", |
|
"authors": [ |
|
{ |
|
"first": "Abeba", |
|
"middle": [], |
|
"last": "Birhane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vinay Uday", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Prabhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "IEEE Winter Conference on Applications of Computer Vision (WACV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1536--1546", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abeba Birhane and Vinay Uday Prabhu. 2021. Large image datasets: A pyrrhic win for computer vision? 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1536-1546.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Singular They and the Syntactic Representation of Gender in English", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bronwyn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bjorkman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bronwyn M Bjorkman. 2017. Singular They and the Syntactic Representation of Gender in English. Glossa (London), 2(1):1.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Language (Technology) is Power: A Critical Survey of \"Bias\" in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Su", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5454--5476", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.485" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Blodgett, Solon Barocas, Hal Daum\u00e9 III, and Hanna Wallach. 2020. Language (Technology) is Power: A Critical Survey of \"Bias\" in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5454-5476.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Stereotyping Norwegian salmon: An inventory of pitfalls in fairness benchmark datasets", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gilsinia", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Olteanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Sim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.81" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Gilsinia Lopez, Alexandra Olteanu, Robert Sim, and Hanna Wallach. 2021. Stereotyping Norwegian salmon: An inventory of pitfalls in fair- ness benchmark datasets. In Proceedings of the 59th", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1004--1015", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 1004-1015, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 30th International Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4356--4364", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.485" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai. 2016. Man is to Computer Programmer as Woman is to Home- maker? Debiasing Word Embeddings. In Proceed- ings of the 30th International Conference on Neural Information Processing Systems, pages 4356-4364.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Nuanced Metrics for Measuring Unintended Bias with Real Data for Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Borkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Dixon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Sorensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nithum", |
|
"middle": [], |
|
"last": "Thain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "WWW '19: Companion Proceedings of The 2019 World Wide Web Conference, WWW '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "491--500", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3308560.3317593" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Borkan, Lucas Dixon, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. 2019. Nuanced Met- rics for Measuring Unintended Bias with Real Data for Text Classification. In WWW '19: Companion Proceedings of The 2019 World Wide Web Confer- ence, WWW '19, page 491-500, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Toward gender-inclusive coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trista", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4568--4595", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.418" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Trista Cao and Hal Daum\u00e9 III. 2020. Toward gender-inclusive coreference resolution. In Proceed- ings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4568-4595, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Toward gender-inclusive coreference resolution: An analysis of gender and bias throughout the machine learning lifecycle*", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trista", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Computational Linguistics", |
|
"volume": "47", |
|
"issue": "3", |
|
"pages": "615--661", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/coli_a_00413" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Trista Cao and Hal Daum\u00e9 III. 2021. Toward gender-inclusive coreference resolution: An analysis of gender and bias throughout the machine learning lifecycle*. Computational Linguistics, 47(3):615- 661.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "We Are What We Keep; We Keep What We Are': Archival Appraisal Past, Present and Future", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of the Society of Archivists", |
|
"volume": "32", |
|
"issue": "2", |
|
"pages": "173--189", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://doi-org.ezproxy.is.ed.ac.uk/10.1080/00379816.2011.619688" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Cook. 2011. 'We Are What We Keep; We Keep What We Are': Archival Appraisal Past, Present and Future. Journal of the Society of Archivists, 32(2):173-189.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Clearing the air: some thoughts on gender-neutral writing", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Corbett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "IEEE Transactions on Professional Communication", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "2--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/47.49063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M.Z. Corbett. 1990. Clearing the air: some thoughts on gender-neutral writing. IEEE Transactions on Professional Communication, 33(1):2-6.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Design Justice, A.I., and Escape from the Matrix of Domination", |
|
"authors": [ |
|
{ |
|
"first": "Sasha", |
|
"middle": [], |
|
"last": "Costanza-Chock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Philip", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Design and Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21428/96c8d426" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sasha Costanza-Chock and Nick Philip. 2018. Design Justice, A.I., and Escape from the Matrix of Domina- tion. Journal of Design and Science.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The Trouble with Bias", |
|
"authors": [ |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Neural Information Processing Systems Conference Keynote. [Online; accessed 10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kate Crawford. 2017. The Trouble with Bias. In Neural Information Processing Systems Conference Keynote. [Online; accessed 10-July-2020].", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Collection: Papers and artwork of Yolanda Sonnabend relating to her collaboration with C.H. Waddington", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "CRC. 2018. Collection: Papers and artwork of Yolanda Sonnabend relating to her collaboration with C.H. Waddington. [Online; accessed 19 May 2022.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Mapping the Margins: Intersectionality, Identity Politics, and Violence against Women of Color", |
|
"authors": [ |
|
{ |
|
"first": "Kimberl\u00e9", |
|
"middle": [], |
|
"last": "Crenshaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Stanford Law Review", |
|
"volume": "43", |
|
"issue": "6", |
|
"pages": "1241--1299", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/1229039" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kimberl\u00e9 Crenshaw. 1991. Mapping the Margins: Inter- sectionality, Identity Politics, and Violence against Women of Color. Stanford Law Review, 43(6):1241- 1299.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Dealing with Disagreements: Looking Beyond the Majority Vote in Subjective Annotations", |
|
"authors": [ |
|
{ |
|
"first": "Aida", |
|
"middle": [ |
|
"Mostafazadeh" |
|
], |
|
"last": "Davani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "D\u00edaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinodku", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "92--110", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aida Mostafazadeh Davani, Mark D\u00edaz, and Vinodku- mar Prabhakaran. 2022. Dealing with Disagreements: Looking Beyond the Majority Vote in Subjective An- notations. Transactions of the Association for Com- putational Linguistics, 10:92-110.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Theories of \"Gender\" in NLP Bias Research", |
|
"authors": [ |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Devinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Bj\u00f6rklund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henrik", |
|
"middle": [], |
|
"last": "Bj\u00f6rklund", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.48550/ARXIV.2205.02526" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hannah Devinney, Jenny Bj\u00f6rklund, and Henrik Bj\u00f6rk- lund. 2022. Theories of \"Gender\" in NLP Bias Re- search. Computing Research Repository.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Data Feminism. Strong ideas series", |
|
"authors": [ |
|
{ |
|
"first": "D'", |
|
"middle": [], |
|
"last": "Catherine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lauren", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Ignazio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Catherine D'Ignazio and Lauren F. Klein. 2020. Data Feminism. Strong ideas series. MIT Press, Cam- bridge, USA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Queens are powerful too: Mitigating gender bias in dialogue generation", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Urbanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8173--8188", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.656" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Angela Fan, Adina Williams, Jack Ur- banek, Douwe Kiela, and Jason Weston. 2020a. Queens are powerful too: Mitigating gender bias in dialogue generation. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 8173-8188, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Multi-Dimensional Gender Bias Classification", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ledell", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "314--331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Angela Fan, Ledell Wu, Jason Weston, Douwe Kiela, and Adina Williams. 2020b. Multi- Dimensional Gender Bias Classification. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 314-331, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Time-aware word embeddings for three Lebanese news archives", |
|
"authors": [ |
|
{ |
|
"first": "Jad", |
|
"middle": [], |
|
"last": "Doughman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fatima", |
|
"middle": [ |
|
"Abu" |
|
], |
|
"last": "Salem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shady", |
|
"middle": [], |
|
"last": "Elbassuoni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4717--4725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jad Doughman, Fatima Abu Salem, and Shady El- bassuoni. 2020. Time-aware word embeddings for three Lebanese news archives. In Proceedings of the 12th Language Resources and Evaluation Confer- ence, pages 4717-4725, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Gender bias in text: Origin, taxonomy, and implications", |
|
"authors": [ |
|
{ |
|
"first": "Jad", |
|
"middle": [], |
|
"last": "Doughman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Khreich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maya", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Gharib", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maha", |
|
"middle": [], |
|
"last": "Wiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zahraa", |
|
"middle": [], |
|
"last": "Berjawi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--44", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.gebnlp-1.5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jad Doughman, Wael Khreich, Maya El Gharib, Maha Wiss, and Zahraa Berjawi. 2021. Gender bias in text: Origin, taxonomy, and implications. In Proceedings of the 3rd Workshop on Gender Bias in Natural Lan- guage Processing, pages 34-44, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Ethical issues in catalogue content standards", |
|
"authors": [ |
|
{ |
|
"first": "Gordon", |
|
"middle": [], |
|
"last": "Dunsire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Catalogue & Index", |
|
"volume": "191", |
|
"issue": "", |
|
"pages": "11--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gordon Dunsire. 2018. Ethical issues in catalogue con- tent standards. In Catalogue & Index, volume 191, pages 11-15.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The Nature of Real and Perceived Bias in Chilean Media", |
|
"authors": [ |
|
{ |
|
"first": "Erick", |
|
"middle": [], |
|
"last": "Elejalde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Ferres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eelco", |
|
"middle": [], |
|
"last": "Herder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 28th ACM Conference on Hypertext and Social Media, HT '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3078714.3078724" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erick Elejalde, Leo Ferres, and Eelco Herder. 2017. The Nature of Real and Perceived Bias in Chilean Media. In Proceedings of the 28th ACM Conference on Hy- pertext and Social Media, HT '17, page 95-104, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Analysing Discourse: Textual Analysis for Social Research. Routledge", |
|
"authors": [ |
|
{ |
|
"first": "Norman", |
|
"middle": [], |
|
"last": "Fairclough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Norman Fairclough. 2003. Analysing Discourse: Tex- tual Analysis for Social Research. Routledge, Lon- don, UK.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Programmatic approaches to bias in descriptive metadata", |
|
"authors": [ |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Geraci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Code4Lib Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noah Geraci. 2019. Programmatic approaches to bias in descriptive metadata. In Code4Lib Conference 2019. [Online; accessed 28-May-2020].", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Are all \"research fields\" equal? Rethinking practice for the use of data from crowdsourcing market addresss", |
|
"authors": [ |
|
{ |
|
"first": "Ilka", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Gleibs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "49", |
|
"issue": "", |
|
"pages": "1333--1342", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3758/s13428-016-0789-y" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilka H. Gleibs. 2017. Are all \"research fields\" equal? Rethinking practice for the use of data from crowd- sourcing market addresss. Behavior Research Meth- ods, 49(4):1333-1342.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Intrinsic bias metrics do not correlate with application bias", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Marchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [ |
|
"Mu\u00f1oz" |
|
], |
|
"last": "S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mugdha", |
|
"middle": [], |
|
"last": "Pandya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1926--1940", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.150" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Rebecca Marchant, Ri- cardo Mu\u00f1oz S\u00e1nchez, Mugdha Pandya, and Adam Lopez. 2021. Intrinsic bias metrics do not correlate with application bias. In Proceedings of the 59th An- nual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 1926-1940, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Lipstick on a Pig: Debiasing Methods Cover up Systematic Gender Biases in Word Embeddings But do not Remove Them", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1903.03862v2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a Pig: Debiasing Methods Cover up Systematic Gen- der Biases in Word Embeddings But do not Remove Them. NAACL 2019, arXiv:1903.03862v2.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Gov.uk. 2022. National Minimum Wage and National Living Wage rates", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gov.uk. 2022. National Minimum Wage and National Living Wage rates.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Situated Knowledges: The Science Question in Feminism and the Privilege of Partial Perspective", |
|
"authors": [ |
|
{ |
|
"first": "Donna", |
|
"middle": [], |
|
"last": "Haraway", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1988, |
|
"venue": "Feminist Studies", |
|
"volume": "14", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/3178066" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Donna Haraway. 1988. Situated Knowledges: The Sci- ence Question in Feminism and the Privilege of Par- tial Perspective. Feminist Studies, 14(3):575.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Strong objectivity\": A response to the new objectivity question", |
|
"authors": [ |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "Harding", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Synthese", |
|
"volume": "104", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/BF01064504" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra Harding. 1995. \"Strong objectivity\": A response to the new objectivity question. Synthese, 104(3).", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Situated data, situated systems: A methodology to engage with power relations in natural language processing research", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Havens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Terras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Alex", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucy Havens, Melissa Terras, Benjamin Bach, and Beat- rice Alex. 2020. Situated data, situated systems: A methodology to engage with power relations in natu- ral language processing research. In Proceedings of the Second Workshop on Gender Bias in Natural Lan- guage Processing, pages 107-124, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Proposed Taxonomy for Gender Bias in Text; A Filtering Methodology for the Gender Generalization Subtype", |
|
"authors": [ |
|
{ |
|
"first": "Yasmeen", |
|
"middle": [], |
|
"last": "Hitti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunbee", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolyne", |
|
"middle": [], |
|
"last": "Pelletier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8--17", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3802" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yasmeen Hitti, Eunbee Jang, Ines Moreno, and Car- olyne Pelletier. 2019. Proposed Taxonomy for Gen- der Bias in Text; A Filtering Methodology for the Gender Generalization Subtype. In Proceedings of the First Workshop on Gender Bias in Natural Lan- guage Processing, pages 8-17, Florence, IT. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Neural Based Statement Classification for Biased Language", |
|
"authors": [ |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Hube", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besnik", |
|
"middle": [], |
|
"last": "Fetahu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "195--203", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3289600.3291018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christoph Hube and Besnik Fetahu. 2019. Neural Based Statement Classification for Biased Language. In Proceedings of the Twelfth ACM International Con- ference on Web Search and Data Mining, pages 195- 203, Melbourne, AU. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Social biases in NLP models as barriers for persons with disabilities", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinodkumar", |
|
"middle": [], |
|
"last": "Prabhakaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Denton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kellie", |
|
"middle": [], |
|
"last": "Webster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Denuyl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.487" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Hutchinson, Vinodkumar Prabhakaran, Emily Den- ton, Kellie Webster, Yu Zhong, and Stephen Denuyl. 2020. Social biases in NLP models as barriers for persons with disabilities. In Proceedings of the 58th", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Annual Meeting of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5491--5501", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 5491-5501, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Towards Accountability for Machine Learning Datasets: Practices from Software Engineering and Infrastructure", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Smart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Hanna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Denton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Greer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oddur", |
|
"middle": [], |
|
"last": "Kjartansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parker", |
|
"middle": [], |
|
"last": "Barnes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "560--575", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3442188.3445918" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Hutchinson, Andrew Smart, Alex Hanna, Emily Denton, Christina Greer, Oddur Kjartansson, Parker Barnes, and Margaret Mitchell. 2021. Towards Ac- countability for Machine Learning Datasets: Prac- tices from Software Engineering and Infrastructure. In Proceedings of the 2021 ACM Conference on Fair- ness, Accountability, and Transparency, FAccT '21, page 560-575, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "General International Standard Archival Description", |
|
"authors": [], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ICA. 2011. ISAD(G): General International Standard Archival Description -Second edition.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Interdependencies of gender and race in contextualized word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "May", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "May Jiang and Christiane Fellbaum. 2020. Interdepen- dencies of gender and race in contextualized word embeddings. In Proceedings of the Second Workshop on Gender Bias in Natural Language Processing, pages 17-25, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Lessons from Archives: Strategies for Collecting Sociocultural Data in Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Eun", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, FAT* '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "306--316", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3351095.3372829" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eun Seo Jo and Timnit Gebru. 2020. Lessons from Archives: Strategies for Collecting Sociocultural Data in Machine Learning. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, FAT* '20, page 306-316, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "The Misgendering Machines", |
|
"authors": [ |
|
{ |
|
"first": "Os", |
|
"middle": [], |
|
"last": "Keyes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3274357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Os Keyes. 2018. The Misgendering Machines:", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Trans/HCI Implications of Automatic Gender Recognition", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the ACM on Human-Computer Interaction", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3274357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Trans/HCI Implications of Automatic Gender Recog- nition. Proceedings of the ACM on Human- Computer Interaction, 2(CSCW).", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Measuring Bias in Contextualized Word Representations. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Keita", |
|
"middle": [], |
|
"last": "Kurita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nidhi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Pareek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keita Kurita, Nidhi Vyas, Ayush Pareek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring Bias in Contextualized Word Representations. CoRR, abs/1906.07337.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Language and Woman's Place. Harper & Row", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Lakoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Lakoff. 1989. Language and Woman's Place. Harper & Row, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Gender Bias in Artificial Intelligence: The Need for Diversity and Gender Theory in Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Susan", |
|
"middle": [], |
|
"last": "Leavy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 1st International Workshop on Gender Equality in Software Engineering, GE '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "14--16", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3195570.3195580" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Susan Leavy. 2018. Gender Bias in Artificial Intelli- gence: The Need for Diversity and Gender Theory in Machine Learning. In Proceedings of the 1st Inter- national Workshop on Gender Equality in Software Engineering, GE '18, page 14-16, New York, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Archer Pechawis, and Suzanne Kite", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"Edward" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Philip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noelani", |
|
"middle": [], |
|
"last": "Arista", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Design and Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21428/bfafd97b" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Edward Lewis, Nick Philip, Noelani Arista, Archer Pechawis, and Suzanne Kite. 2018. Mak- ing Kin with the Machines. Journal of Design and Science.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "NLTK: The Natural Language Toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the ACL-02 Workshop on Effective Tools and Methodologies for Teaching Natural Language Processing and Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "63--70", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1118108.1118117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Loper and Steven Bird. 2002. NLTK: The Natu- ral Language Toolkit. In Proceedings of the ACL-02 Workshop on Effective Tools and Methodologies for Teaching Natural Language Processing and Com- putational Linguistics -Volume 1, ETMTNLP '02, pages 63-70, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "11 Case studies", |
|
"authors": [ |
|
{ |
|
"first": "Bella", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruce", |
|
"middle": [], |
|
"last": "Hanington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Universal Methods of Design: 100 Ways to Research Complex Problems, Develop Innovative Ideas, and Design Effective Solutions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bella Martin and Bruce Hanington. 2012. 11 Case studies. In Universal Methods of Design: 100 Ways to Research Complex Problems, Develop Innovative Ideas, and Design Effective Solutions, Beverly, USA. Rockport Publishers.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "When Your Only Tool Is A Hammer: Ethical Limitations of Algorithmic Fairness Solutions in Healthcare Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Mccradden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mjaye", |
|
"middle": [], |
|
"last": "Mazwi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shalmali", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Anderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3375627.3375824" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melissa McCradden, Mjaye Mazwi, Shalmali Joshi, and James A. Anderson. 2020. When Your Only Tool Is A Hammer: Ethical Limitations of Algorithmic Fairness Solutions in Healthcare Machine Learning, page 109. Association for Computing Machinery, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Model Cards for Model Reporting", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zaldivar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parker", |
|
"middle": [], |
|
"last": "Barnes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Spitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deborah", |
|
"middle": [], |
|
"last": "Inioluwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Raji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency -FAT * '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3287560.3287596" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model Cards for Model Reporting. Proceedings of the Conference on Fairness, Account- ability, and Transparency -FAT * '19.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Algorithms of Oppression: How Search Engines Reinforce Racism", |
|
"authors": [ |
|
{ |
|
"first": "Noble", |
|
"middle": [], |
|
"last": "Safiya Umoja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Safiya Umoja Noble. 2018. Algorithms of Oppression: How Search Engines Reinforce Racism. New York University Press, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Text mining Mill: Computationally detecting influence in the writings of John Stuart Mill from library records", |
|
"authors": [ |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Helen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Neill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welsh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glenn", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Roe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Terras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Digital Scholarship in the Humanities", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1013--1029", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helen O'Neill, Anne Welsh, David A Smith, Glenn Roe, and Melissa Terras. 2021. Text mining Mill: Computationally detecting influence in the writings of John Stuart Mill from library records. Digital Scholarship in the Humanities, 36(4):1013-1029.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "On a Collections as Data Imperative", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Padilla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Padilla. 2017. On a Collections as Data Im- perative. UC Santa Barbara Previously Published Works.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Responsible Operations: Data Science", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Padilla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Machine Learning, and AI in Libraries. OCLC Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.25333/xk7z-9g97" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Padilla. 2019. Responsible Operations: Data Science, Machine Learning, and AI in Libraries. OCLC Research.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Invisible Women: Exposing Data Bias in a World Designed for Men", |
|
"authors": [ |
|
{ |
|
"first": "Caroline Criado", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Caroline Criado Perez. 2019. Invisible Women: Expos- ing Data Bias in a World Designed for Men. Vintage, London, UK.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Continuing the Journey: Articulating Dimensions of Feminist Participatory Action Research (FPAR)", |
|
"authors": [ |
|
{ |
|
"first": "Colleen", |
|
"middle": [], |
|
"last": "Reid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Frisby", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "The SAGE Handbook of Action Research", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "93--105", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.4135/9781848607934.n12" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colleen Reid and Wendy Frisby. 2008. 6 Continuing the Journey: Articulating Dimensions of Feminist Participatory Action Research (FPAR). In The SAGE Handbook of Action Research, pages 93-105. SAGE Publications Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "The Origin and Value of Disagreement Among Data Labelers: A Case Study of Individual Differences in Hate Speech Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Yisi", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Stanton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Information for a Better World: Shaping the Global Future", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "425--444", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yisi Sang and Jeffrey Stanton. 2022. The Origin and Value of Disagreement Among Data Labelers: A Case Study of Individual Differences in Hate Speech Annotation. In Information for a Better World: Shap- ing the Global Future, Lecture Notes in Computer Science, pages 425-444. Springer International Pub- lishing, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "The risk of racial bias in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saadia", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1163" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A. Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Annual Meeting of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1668--1678", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 1668-1678, Florence, Italy. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "HCI Guidelines for Gender Equity and Inclusion: Misgendering", |
|
"authors": [ |
|
{ |
|
"first": "Katta", |
|
"middle": [], |
|
"last": "Morgan Klaus Scheuerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Spiel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Foad", |
|
"middle": [], |
|
"last": "Haimson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stacy", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Hamidi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Branham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Morgan Klaus Scheuerman, Katta Spiel, Oliver L. Haim- son, Foad Hamidi, and Stacy M. Branham. 2020. HCI Guidelines for Gender Equity and Inclusion: Misgendering.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "The Semantic Derogation of Women", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Muriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schulz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "The Routledge language and cultural theory reader. Routledge", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muriel R. Schulz. 2000. The Semantic Derogation of Women. In Lucy Burke, Tony Crowley, and Alan Girvin, editors, The Routledge language and cultural theory reader. Routledge, London, UK.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "A Practical Guide to Searching LGBTQIA Historical Records", |
|
"authors": [ |
|
{ |
|
"first": "Norena", |
|
"middle": [], |
|
"last": "Shopland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.4324/9781003006787" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Norena Shopland. 2020. A Practical Guide to Search- ing LGBTQIA Historical Records. Taylor & Francis Group, Milton.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Uses of Heritage", |
|
"authors": [ |
|
{ |
|
"first": "Laurajane", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Routledge", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurajane Smith. 2006. Uses of Heritage. Routledge, London, UK.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Language and reality: Who made the world?", |
|
"authors": [ |
|
{ |
|
"first": "Dale", |
|
"middle": [], |
|
"last": "Spencer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "The Routledge language and cultural theory reader. Routledge", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dale Spencer. 2000. Language and reality: Who made the world? (1980). In Lucy Burke, Tony Crowley, and Alan Girvin, editors, The Routledge language and cultural theory reader. Routledge, London, UK.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "A survey on gender bias in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Karolina", |
|
"middle": [], |
|
"last": "Sta\u0144czak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Augenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karolina Sta\u0144czak and Isabelle Augenstein. 2021. A survey on gender bias in natural language processing. CoRR, abs/2112.14168.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "brat: a Web-based Tool for NLP-Assisted Text Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Topi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia Ananiadou Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Demonstrations Session at EACL 2012. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topi\u0107, Sophia Ananiadou Tomoko Ohta, and Jun'ichi Tsujii. 2012. brat: a Web-based Tool for NLP-Assisted Text Annotation. In Proceedings of the Demonstrations Session at EACL 2012. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "2 Participatory Action Research as Practice", |
|
"authors": [ |
|
{ |
|
"first": "Marja Liisa", |
|
"middle": [], |
|
"last": "Swantz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "The SAGE Handbook of Action Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--48", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.4135/9781848607934.n8" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marja Liisa Swantz. 2008. 2 Participatory Action Re- search as Practice. In The SAGE Handbook of Action Research, pages 31-48. SAGE Publications Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "A transparent framework for evaluating unintended demographic bias in word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Sweeney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Najafian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1662--1667", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Sweeney and Maryam Najafian. 2019. A trans- parent framework for evaluating unintended demo- graphic bias in word embeddings. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1662-1667, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Discrimination in online ad delivery", |
|
"authors": [ |
|
{ |
|
"first": "Latanya", |
|
"middle": [], |
|
"last": "Sweeney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Communications of the ACM", |
|
"volume": "56", |
|
"issue": "5", |
|
"pages": "44--54", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2447976.2447990" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Latanya Sweeney. 2013. Discrimination in online ad delivery. Communications of the ACM, 56(5):44-54.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Assessing Social and Intersectional Biases in Contextualized Word Representations", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"Elisa" |
|
], |
|
"last": "Celis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Chern Tan and L. Elisa Celis. 2019. Assessing Social and Intersectional Biases in Contextualized Word Representations. CoRR, abs/1911.01485.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "The World as Archive", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Tanselle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Common Knowledge", |
|
"volume": "8", |
|
"issue": "2", |
|
"pages": "402--406", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Thomas Tanselle. 2002. The World as Archive. Com- mon Knowledge, 8(2):402-406.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "The dark side of gendered language: The masculine-generic form as a cause for self-report bias", |
|
"authors": [ |
|
{ |
|
"first": "Sigal", |
|
"middle": [], |
|
"last": "Vainapel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Opher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulie", |
|
"middle": [], |
|
"last": "Shamir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Tenenbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gilam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Psychological Assessment", |
|
"volume": "27", |
|
"issue": "4", |
|
"pages": "1513--1519", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1037/pas0000156" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sigal Vainapel, Opher Y. Shamir, Yulie Tenenbaum, and Gadi Gilam. 2015. The dark side of gen- dered language: The masculine-generic form as a cause for self-report bias. Psychological Assessment, 27(4):1513-1519.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Mind the GAP: A Balanced Corpus of Gendered Ambiguous Pronouns", |
|
"authors": [ |
|
{ |
|
"first": "Kellie", |
|
"middle": [], |
|
"last": "Webster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Axelrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.05201" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kellie Webster, Marta Recasens, Vera Axelrod, and Ja- son Baldridge. 2018. Mind the GAP: A Balanced Corpus of Gendered Ambiguous Pronouns. Comput- ing Research Repository, arXiv:1810.05201.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "The Rare Books Catalog and the Scholarly Database. Cataloging & Classification Quarterly", |
|
"authors": [ |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Welsh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "54", |
|
"issue": "", |
|
"pages": "317--337", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1080/01639374.2016.1188433" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne Welsh. 2016. The Rare Books Catalog and the Scholarly Database. Cataloging & Classification Quarterly, 54(5-6):317-337.", |
|
"links": null |
|
}, |
|
"BIBREF83": { |
|
"ref_id": "b83", |
|
"title": "Metrology for AI: From Benchmarks to Instruments. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Welty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Paritosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lora", |
|
"middle": [], |
|
"last": "Aroyo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Welty, Praveen Paritosh, and Lora Aroyo. 2019. Metrology for AI: From Benchmarks to Instruments. CoRR, abs/1911.01875.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Total Annotations Per Label in the Aggregated Dataset. The stacked bar chart groups annotation labels into bars by category. Across all three categories, there are 55,260 annotations in the aggregated dataset. Non-binary (a Person Name label) and Empowering (a Contextual label) both have a count of zero.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Total Annotations Per Label in the Aggregated Dataset. The stacked bar chart groups annotation labels into bars by category. Across all three categories, there are 55,260 annotations in the aggregated dataset. Non-binary (a Person Name label) and Empowering (a Contextual label) both have a count of zero.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Total Annotations Per Label in the Aggregated Dataset. The stacked bar chart groups annotation labels into bars by category. Across all three categories, there are 55,260 annotations in the aggregated dataset. Non-binary (a Person Name label) and Empowering (a Contextual label) both have a count of zero.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "An example of GLAM documentation from the archival catalog of the Centre for Research Collections at the University of Edinburgh (2018). Metadata field names bolded in blue and their descriptions, regular, black text. The 'Title' field, however, is bolded in blue at the top of the page (\"Papers and artwork of...\").", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"text": "An example of a \"Biographical / Historical\" metadata field's description annotated with all labels from the taxonomy in the online annotation platform brat(Stenetorp et al., 2012).", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"text": "Total Annotations Per Annotator in the Aggregated Dataset. The bar chart displays the total annotations from each annotator included in the aggregated dataset, with colors indicating the category of labels each annotator used. For annotations that matched or overlapped, only one was added to the aggregated dataset, so the total number of annotations in the aggregated dataset (55,260) is 21,283 less than the sum of the annotators' annotations in this chart (76,543).", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Total counts, words and sentences for descriptive metadata fields in the aggregated dataset. Calculations were made using Punkt tokenizers in the Natural Language Toolkit Python library", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"text": "the University of Edinburgh's Edinburgh Futures Institute, Centre for Data, Culture &Society, Institute for Language, Cognition and Computation, and School of Informatics; and the UK's Engineering and Physical Sciences Research Council. Additional thanks go to the organizers of the Fourth Workshop on Gender Bias in Natural Language Processing, for the opportunity to submit this paper, and to the reviewers who gave feedback on this paper. Elizabeth Yale. 2015. The History of Archives: The State of the Discipline. Book History, 18(1):332-359. Jieyu Zhao, Tianlu Wang, Mark Yatskar, Ryan Cotterell, Vicente Ordonez, and Kai-Wei Chang. 2019. Gender bias in contextualized word embeddings. In Proceedings of the 2019 Conference of the North American", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>Chapter of the Association for Computational Lin-guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 629-634, Min-neapolis, Minnesota. Association for Computational Linguistics.</td></tr><tr><td>Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Or-donez, and Kai-Wei Chang. 2018. Gender Bias in Coreference Resolution: Evaluation and Debiasing Methods. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 15-20, New Orleans, USA. Association for Computational Linguistics.</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |