|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:02:23.447163Z" |
|
}, |
|
"title": "An Empirical Study on the Fairness of Pre-trained Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Emeralda", |
|
"middle": [], |
|
"last": "Sesari", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University College London", |
|
"location": {} |
|
}, |
|
"email": "emeralda.sesari.20@alumni.ucl.ac.uk" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Hort", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University College London", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Federica", |
|
"middle": [], |
|
"last": "Sarro", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University College London", |
|
"location": {} |
|
}, |
|
"email": "f.sarro@ucl.ac.uk" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Pre-trained word embedding models are easily distributed and applied, as they alleviate users from the effort to train models themselves. With widely distributed models, it is important to ensure that they do not exhibit undesired behaviour, such as biases against population groups. For this purpose, we carry out an empirical study on evaluating the bias of 15 publicly available, pre-trained word embeddings model based on three training algorithms (GloVe, word2vec, and fastText) with regard to four bias metrics (WEAT, SEMBIAS, DIRECT BIAS, and ECT). The choice of word embedding models and bias metrics is motivated by a literature survey over 37 publications which quantified bias on pre-trained word embeddings. Our results indicate that fastText is the least biased model (in 8 out of 12 cases) and small vector lengths lead to a higher bias.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Pre-trained word embedding models are easily distributed and applied, as they alleviate users from the effort to train models themselves. With widely distributed models, it is important to ensure that they do not exhibit undesired behaviour, such as biases against population groups. For this purpose, we carry out an empirical study on evaluating the bias of 15 publicly available, pre-trained word embeddings model based on three training algorithms (GloVe, word2vec, and fastText) with regard to four bias metrics (WEAT, SEMBIAS, DIRECT BIAS, and ECT). The choice of word embedding models and bias metrics is motivated by a literature survey over 37 publications which quantified bias on pre-trained word embeddings. Our results indicate that fastText is the least biased model (in 8 out of 12 cases) and small vector lengths lead to a higher bias.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Word embeddings are a powerful tool and are applied in variety of Natural Language Processing tasks, such as text classification (Aydogan and Karci, 2020; Alwehaibi and Roy, 2018; Jo and Cinarel, 2019; Bailey and Chopra, 2018; Rescigno et al., 2020) and sentiment analysis (Araque et al., 2017; Rezaeinia et al., 2019; Fu et al., 2017; Ren et al., 2016; Tang et al., 2014) . However, analogies such as \"Man is to computer programmer as woman is to homemaker\" (Bolukbasi et al., 2016a) contain worrisome biases that are present in society and hence embedded in language. In recent years, numerous studies have attempted to examine the fairness of word embeddings by proposing different bias metrics (Caliskan et al., 2016; Garg et al., 2018; Sweeney and Najafian, 2019; Manzini et al., 2019; , and comparing them (Badilla et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 154, |
|
"text": "(Aydogan and Karci, 2020;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 179, |
|
"text": "Alwehaibi and Roy, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "Jo and Cinarel, 2019;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 226, |
|
"text": "Bailey and Chopra, 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 249, |
|
"text": "Rescigno et al., 2020)", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 294, |
|
"text": "(Araque et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 318, |
|
"text": "Rezaeinia et al., 2019;", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 335, |
|
"text": "Fu et al., 2017;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 353, |
|
"text": "Ren et al., 2016;", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 372, |
|
"text": "Tang et al., 2014)", |
|
"ref_id": "BIBREF86" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 484, |
|
"text": "(Bolukbasi et al., 2016a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 721, |
|
"text": "(Caliskan et al., 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 740, |
|
"text": "Garg et al., 2018;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 768, |
|
"text": "Sweeney and Najafian, 2019;", |
|
"ref_id": "BIBREF83" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 790, |
|
"text": "Manzini et al., 2019;", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 812, |
|
"end": 834, |
|
"text": "(Badilla et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The quality of word embedding models differs depending on the task and training corpus used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Due to the relatively expensive costs, constructing large-scale labelled datasets is a huge barrier for NLP applications, notably for syntax and semantically related tasks (Qiu et al., 2020) . Recent research has shown that by using pre-trained word embedding models, trained on a large corpus, considerable performance gains on various NLP tasks can be achieved (Qiu et al., 2020; Erhan et al., 2010) . A number of studies (Mikolov et al., 2013; Pennington et al., 2014; Bojanowski et al., 2017) have published these embeddings learned from large text corpora which are versatile enough to be used in a variety of NLP tasks (Li and Yang, 2018) . Despite their widespread use, many researchers use word embeddings without performing an indepth study on their characteristics; instead, they utilised default settings that come with ready-made word embedding toolkits (Patel and Bhattacharyya, 2017) . On top of that, these pre-trained models are susceptible to inheriting stereotyped social biases (e.g., ethnicity, gender and religion) from the text corpus they are trained on (Caliskan, 2017; Garg et al., 2018; Vidgen et al., 2021) and the researchers building these models (Field et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 190, |
|
"text": "(Qiu et al., 2020)", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 381, |
|
"text": "(Qiu et al., 2020;", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 401, |
|
"text": "Erhan et al., 2010)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 446, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 471, |
|
"text": "Pennington et al., 2014;", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 496, |
|
"text": "Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 644, |
|
"text": "(Li and Yang, 2018)", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 866, |
|
"end": 897, |
|
"text": "(Patel and Bhattacharyya, 2017)", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 1077, |
|
"end": 1093, |
|
"text": "(Caliskan, 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1094, |
|
"end": 1112, |
|
"text": "Garg et al., 2018;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1133, |
|
"text": "Vidgen et al., 2021)", |
|
"ref_id": "BIBREF88" |
|
}, |
|
{ |
|
"start": 1176, |
|
"end": 1196, |
|
"text": "(Field et al., 2021)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Moreover, word embedding models are sensitive to a number of parameters, including corpus size, seeds for random number generation, vector dimensions, etc. (Borah et al., 2021) . According to Levy et al. (2015) changes in parameters, are responsible for the majority of empirical differences between embedding models. As a result, there has been an increasing interest among researchers to investigate the impact of parameters on word embedding model properties (e.g., consistency, stability, variety, and reliability) (Borah et al., 2021; Chugh et al., 2018; Dridi et al., 2018; Hellrich and Hahn, 2016; Pierrejean and Tanguy, 2018; Wendlandt et al., 2018; Antoniak and Mimno, 2018) . However, much uncertainty still exists about the relation between word embedding parameters and its fairness. With the in-depth investigation of fair-ness, we hope that this research will lead to a more directed and fairness-aware usage of pre-trained word embeddings. Therefore, this study investigates the performance of pre-trained word embedding models with respect to multiple bias metrics. Furthermore, the impact of each pre-trained word embedding model's vector length on the model's fairness is explored. We investigate 15 different scenarios in total as a combination of model, training corpus, and parameter settings. We make the scripts used to determine the fairness of pre-trained word embedding models publicly available. 1 Bias statement. Word embeddings are used to group words with similar meanings (i.e., generalise notions from language) (Goldberg and Hirst, 2017) . However, word embedding models are prone to inherit social biases from the corpus they are trained upon. The fundamental concern is that training a system on unbalanced data may lead to people using these systems to develop inaccurate, intrinsic word associations, thus propagating biases (Costa-juss\u00e0 and de Jorge, 2020). For example, stereotypes such as man : woman :: computer programmer : homemaker in word2vec trained on news text can be found (Bolukbasi et al., 2016a) . If such an embedding is used in an algorithm as part of its search for prospective programmers, documents with women's names may be wrongly downweighted (Jurafsky and Martin, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 176, |
|
"text": "(Borah et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 210, |
|
"text": "Levy et al. (2015)", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 539, |
|
"text": "(Borah et al., 2021;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 559, |
|
"text": "Chugh et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 579, |
|
"text": "Dridi et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 604, |
|
"text": "Hellrich and Hahn, 2016;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 633, |
|
"text": "Pierrejean and Tanguy, 2018;", |
|
"ref_id": "BIBREF71" |
|
}, |
|
{ |
|
"start": 634, |
|
"end": 657, |
|
"text": "Wendlandt et al., 2018;", |
|
"ref_id": "BIBREF92" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 683, |
|
"text": "Antoniak and Mimno, 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1423, |
|
"end": 1424, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1544, |
|
"end": 1570, |
|
"text": "(Goldberg and Hirst, 2017)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 2022, |
|
"end": 2047, |
|
"text": "(Bolukbasi et al., 2016a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 2203, |
|
"end": 2230, |
|
"text": "(Jurafsky and Martin, 2020)", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our research helps practitioners to make an informed choice of fair word embedding models, in particular pre-trained models, for their application with regards to intrinsic biases (i.e., gender, race, age).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It has been discovered that word embeddings do not only reflect but also have the tendency to amplify the biases present in the data they are trained on (Wang and Russakovsky, 2021) which can lead to the spread of unfavourable stereotypes (Zhao et al., 2017) . The implicit associations which are a feature of human reasoning are also encoded by embeddings (Greenwald et al., 1998; Caliskan et al., 2016) . Using the Implicit Association Test (IAT), Greenwald et al. (1998) reported that people in the United States demonstrated to link African American names with bad connotations more than Eu-1 https://figshare.com/s/ 23f5b7164e521cf65fb5 ropean American names, female names with art related words and male names with math related words. In 2016, Caliskan et al. (2016) used GloVe vectors and cosine similarity to recreate IAT and discovered that African American names like Jamal and Tamika showed higher cosine similarity with unpleasant words like abuse and terrible. On the contrary, European American names such as Matthew and Ann had a greater cosine similarity with pleasant terms such as love and peace. These are an example of representational harm where a system causes harm that is demeaning some social groups (Blodgett et al., 2020; Crawford, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 181, |
|
"text": "(Wang and Russakovsky, 2021)", |
|
"ref_id": "BIBREF90" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 258, |
|
"text": "(Zhao et al., 2017)", |
|
"ref_id": "BIBREF96" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 381, |
|
"text": "(Greenwald et al., 1998;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 404, |
|
"text": "Caliskan et al., 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 473, |
|
"text": "Greenwald et al. (1998)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 772, |
|
"text": "Caliskan et al. (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1225, |
|
"end": 1248, |
|
"text": "(Blodgett et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1249, |
|
"end": 1264, |
|
"text": "Crawford, 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the context of word embeddings, it is not only of importance to show that bias exists, but also to determine the degree of bias. For this purpose, bias metrics can be used. Bias metrics can be applied either to a single word, a pair of words, or an entire list of words. Percent Male Neighbours (PMN) (Gonen and Goldberg, 2019 ) is a bias metric that operates on a single word, where one could see the percentage of how many male-gendered words surrounded a target word. For instance, Badilla et al. (2020) discovered that using PMN, 16% of the words around nurse are male-gendered words. However, when engineer is the target term, 78% of words surrounding it are male-gendered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 329, |
|
"text": "(Gonen and Goldberg, 2019", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 509, |
|
"text": "Badilla et al. (2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Moreover, Bolukbasi et al. (2016a) sought to measure bias by comparing the embeddings of a pair of gender-specific terms to a word embedding. The authors introduced DIRECT BIAS, in which a connection is calculated between a gender neutral word (e.g., nurse) and an obvious gender pair (e.g., brother \u2212 sister). They also took into account gender-neutral word connections that are clearly derived from gender (i.e., INDIRECT BIAS). For instance, female associations with both receptionist and softball may explain why the word receptionist is significantly closer to softball than football.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "Bolukbasi et al. (2016a)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Similarly, SEMBIAS (Zhao et al., 2018) also uses word pairs to evaluate the degree of gender bias in a word embedding. SEMBIAS identifies the correct analogy of he\u2212she in a word embedding according to four pairs of words: a gender definition word pair (e.g., waiter \u2212 waitress), a gender-stereotype word pair (e.g., doctor \u2212 nurse) and two other pairs of words that have similar meanings (e.g., dog \u2212 cat, cup \u2212 lid).", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 38, |
|
"text": "(Zhao et al., 2018)", |
|
"ref_id": "BIBREF97" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition, Word Embedding Association Test (WEAT) (Caliskan et al., 2016; Sweeney and Najafian, 2019) determines the degree of association between lists of words (target and attribute words), to automatically assess biases emerging from word embeddings. A target word set is a collection of words that represent a specific social group and are used to assess fairness (e.g., Muslims, African American, men). While an attribute word set is a set of words denoting traits, characteristics, and other things that can be used to show a bias toward one of the targets (e.g., career vs family).", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 75, |
|
"text": "(Caliskan et al., 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 76, |
|
"end": 103, |
|
"text": "Sweeney and Najafian, 2019)", |
|
"ref_id": "BIBREF83" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another significant aspect of these metrics is that there is lack of a clear relationship between them (Badilla et al., 2020) . They function with diverse inputs, resulting in incompatibility between the outputs. As a result, a number of studies began to examine the use of word embedding fairness frameworks, such as Embeddings Fairness Evaluation Framework (WEFE) (Badilla et al., 2020) and Fair Embedding Engine (FEE) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 125, |
|
"text": "(Badilla et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 388, |
|
"text": "(Badilla et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The aim of paper selection is to gather published work that refers to word embedding models and metrics used to evaluate the fairness of word embeddings. Following that, we choose the most commonly used pre-trained word embedding models and bias metrics to support our experiments. Due to the scope and recent emergence of this topic, we conduct a comprehensive literature review according to guidelines by Kitchenham (2004) . The selection starts with searching for the relevant publications and then extracts pertinent information. Below, we discuss our search methodology in detail, starting with preliminary search, defining keywords, repository search, followed by selecting relevant papers based on the inclusion criteria and snowballing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 424, |
|
"text": "Kitchenham (2004)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paper Selection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A preliminary search was carried out prior to systematically searching online repositories. This search is particularly useful in understanding the field and the extent to which fairness of word embeddings is covered in previous studies. The results were used to determine keywords (Table 1) which then guided the repository search.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 291, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Preliminary Search", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Following the preliminary search, a search on the online libraries of six widely known repositories, namely, ACM Digital Library, arXiv, IEEE Xplore, Google Scholar, ScienceDirect, and Scopus, was conducted. Notable, Google Scholar contains publications from the ACL Anthology. 2 The search took place on 8 June, 2021. Unlike Hort et al. (2021) , this search was not restricted by year. However, prior to commencing the search, an agreement was reached on the specific data field used in the search of each repository, thereby limiting it to the specific parts of a document record. Appendix A shows the data fields used during this search. In particular, the repository search investigates the combination of each keyword pair among the two categories (as shown in Table 1 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 279, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 344, |
|
"text": "Hort et al. (2021)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 766, |
|
"end": 773, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Repository Search", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We evaluate the following inclusion criteria to ensure that the publications found during the search are relevant to the topic of fairness of pre-trained word embeddings:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "\u2022 The publication investigates the fairness of pre-trained word embeddings; \u2022 The publication describes the specific metric or measurement of assessing the fairness of word embeddings; \u2022 The studied metrics are intrinsic, i.e., measuring bias directly in word embedding spaces (Goldfarb-Tarrant et al., 2021a); \u2022 The studied word embeddings are in English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "To determine if the publications met the inclusion criteria, we manually analysed each publication following the process of Martin et al. (Martin et al., 2017 3. Body: Publications that have passed the first two steps are then reviewed in full. In case the material does not meet the inclusion criterion or contribute to the survey, they are excluded.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 158, |
|
"text": "(Martin et al., 2017", |
|
"ref_id": "BIBREF65" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "The number of publications gathered from online repositories was reduced by removing the duplicates and applying both the aforesaid process and inclusion criteria. The first and second author participated in this process, and differences were discussed until an agreement was made. In the section 3.3, we investigate the set of relevant publications as the result of this paper selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "After selecting a set of relevant papers from the repository search, one level of backwards snowballing (Wohlin, 2014) was done to examine their references. It entails reviewing the bibliographies of selected publications, determining whether they are relevant, and adding them to the list.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 118, |
|
"text": "(Wohlin, 2014)", |
|
"ref_id": "BIBREF93" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Snowballing", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "The results of the repository search are shown in Table 2 . The first column contains the six online repositories mentioned in Section 3.1.2, in which Google Scholar is abbreviated with GS and Science Direct is abbreviated with SD. The overall number of publications found using the keywords (Table 1) and filters (Appendix A) provided is shown in the first row, while the number of relevant publications filtered based on the paper title, abstract, and body is shown in the last three rows. In addition to the 37 publications retrieved from the repository search, we considered 7 publications from a preliminary search and 1 additional from snowballing.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 57, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 301, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Selected Publications", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Through a comprehensive search, this study looked at the current literature on the fairness of pretrained word embeddings. In total, we compiled a list of 23 distinct bias metrics that were used to evaluate the fairness of pre-trained word embeddings. It is worth noting that a publication might use multiple pre-trained models and bias metrics (Schlender and Spanakis, 2020; Splieth\u00f6ver and Wachsmuth, 2020; Friedrich et al., 2021; Wang et al., 2020; Vargas and Cotterell, 2020; May et al., 2019; Dev et al., 2020) . The more detailed explanation of the result is discussed in the following sections.", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 375, |
|
"text": "(Schlender and Spanakis, 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 408, |
|
"text": "Splieth\u00f6ver and Wachsmuth, 2020;", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 432, |
|
"text": "Friedrich et al., 2021;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 451, |
|
"text": "Wang et al., 2020;", |
|
"ref_id": "BIBREF91" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 479, |
|
"text": "Vargas and Cotterell, 2020;", |
|
"ref_id": "BIBREF87" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 497, |
|
"text": "May et al., 2019;", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 515, |
|
"text": "Dev et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "3.3.1 The most frequently used pre-trained static word embedding model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "One of the goals of the paper selection was to extract the most relevant pre-trained word embedding models from the many that have been studied. While recent research on contextual embeddings has proven immensely beneficial, static embeddings remain crucial in many situations (Gupta and Jaggi, 2021) . Many NLP applications fundamentally depend on static word embeddings for metrics that are designed non-contextual (Shoemark et al., 2019), such as examining word vector spaces (Vulic et al., 2020) and bias study (Gonen and Goldberg, 2019; Kaneko and Bollegala, 2019; Manzini et al., 2019) . Furthermore, according to Strubell et al. (2019) , the computational cost of employing static word embeddings is often tens of millions of times lower than the cost of using contextual embedding models (Clark et al., 2020) , which is significant in terms of NLP models financial and environmental costs (Strubell et al., 2019) . Therefore, we focus our proceeding investigation to static models. The number of papers that have looked into fairness on a pre-trained static word embedding model is shown in Figure 1a . It is apparent from this chart that pre-trained model GloVe is the most popular in this research field. The second and third most frequently used models are word2vec and fastText, respectively. Appendix C Table 7 lists all seven distinct pre-trained word embedding models we found during our search.", |
|
"cite_spans": [ |
|
{ |
|
"start": 277, |
|
"end": 300, |
|
"text": "(Gupta and Jaggi, 2021)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 499, |
|
"text": "(Vulic et al., 2020)", |
|
"ref_id": "BIBREF89" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 541, |
|
"text": "(Gonen and Goldberg, 2019;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 569, |
|
"text": "Kaneko and Bollegala, 2019;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 591, |
|
"text": "Manzini et al., 2019)", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 642, |
|
"text": "Strubell et al. (2019)", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 816, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 897, |
|
"end": 920, |
|
"text": "(Strubell et al., 2019)", |
|
"ref_id": "BIBREF81" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1099, |
|
"end": 1108, |
|
"text": "Figure 1a", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1316, |
|
"end": 1323, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The paper selection's next aim was to select the most commonly used bias metrics from among the numerous that have been used to examine the fairness of a pre-trained word embedding model. 23 metrics were gathered and sorted based on the number of papers that used them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The most frequently used bias metrics", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "To minimise space, bias metrics that have only been utilised in one study have been labelled as Others. As can be seen from Figure 1b , WEAT is by far the most prevalent bias metric, with 21 out of 32 of the publications using it to quantify bias 132 in pre-trained word embeddings. The second most used metric is SEMBIAS which was used by 4 out of 32 publications. In addition, we found 5 bias metrics which were used by 2 out of 32 publications: NEIGHBOURHOOD METRIC, DIRECT BIAS, DOUBLE BIND, ABW STEREOTYPE and ECT. Appendix C Table 8 lists the detailed information for these metrics including sixteen other metrics that were only utilised in one research.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 133, |
|
"text": "Figure 1b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 538, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The most frequently used bias metrics", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "The answer to the following research questions is sought to raise awareness on biased behaviour in commonly used pre-trained word embedding models:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "RQ1 How do pre-trained word embeddings perform with respect to multiple fairness measures?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A series of experiments were carried out to better understand how pre-trained word embeddings perform when subjected to different fairness measures. The most commonly used bias metrics (WEAT, SEMBIAS, DI-RECT BIAS, and ECT) were used to assess the fairness of the three most popular pretrained embeddings: GloVe, word2vec, and fastText (see Sections 3.3.1 and 3.3.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Fairness here refers to the absence of bias in a word embedding model; if the bias is high, the degree of fairness is low, and vice versa. Hence, we examined the most fair embedding after the bias values were acquired.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "RQ2 How does the vector affect word embedding fairness?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To investigate the effect of vector length on the fairness of pre-trained word embedding models, we compare embeddings trained on the same corpus. Therefore, we investigate GloVe Twitter and GloVe Wiki Gigaword to determine the effect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research Questions", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We performed experiments using publicly available pre-trained word embeddings. Please refer to Table 3 for the details about the embeddings. These embeddings are provided by the three most used embedding models described in Section 3.3.1. GloVe was trained under three different corpora, resulting in 10 pre-trained word embeddings: four embeddings from 2 billion tweets of Twitter corpus, four embeddings from 6 billion tokens of Wikipedia and Gigaword corpus, two embeddings each from 42 billion and 840 billion tokens of Common Crawl corpus. Pre-trained embeddings trained on Twitter and Wikipedia + Gigaword corpus have varying dimensionalities (i.e., vector length). We also investigated a pre-trained word2vec embedding model, which was trained on 3 billion tokens on a Google News corpus with a vector length of 300. Finally, we evaluated four pre-trained embeddings from fastText, each with and without subword information, on 16 billion tokens from Wikipedia + UMBCWeb Base + statmt.org News and 600 billion tokens from Common Crawl.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 102, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pre-Trained Embeddings", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We evaluated the fairness of pre-trained word embeddings stated in Section 4.2.1 by focusing on 4 most frequently used and publicly available bias metrics: WEAT, SEMBIAS, DIRECT BIAS, and ECT. To ensure that we measure bias correctly, we focus our evaluation on the metrics that have been used at least twice and are implemented by existing fairness frameworks (e.g., WEFE, FEE). We explain each of these measures below. In order to unveil bias, WEAT detects whether there is a difference in the strength of association between the two target sets (X, Y ) towards attribute sets (A, B):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "s(X, Y, A, B) = x\u03f5X sw(x, A, B) \u2212 y\u03f5Y sw(x, A, B) sw(w, A, B) = meana\u03f5Acos( \u2212 \u2192 w , \u2212 \u2192 a )\u2212mean b\u03f5B cos( \u2212 \u2192 w , \u2212 \u2192 b )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "A and B are attribute sets of identical size. s(X, Y, A, B) computes the test statistic and s w (w, A, B) calculates the difference in similarity of attribute sets to a word w. We focused only on the degree of bias (i.e., we do not consider the direction of bias) and thus only used absolute bias scores for metrics such as WEAT. We utilised WEFE for WEAT experiments and we applied 7 out of 10 WEAT tests provided by Caliskan et al. (2016) . We only selected tests that are concerned with protective attributes concerning human biases (i.e., race, gender, and age). We categorised 7 WEAT tests as: racial bias (T3, T4, and T5); gender bias (T6, T7, and T8); and age bias (T10). Please refer to Appendix B for more information about target and attribute sets. We also evaluated the degree of bias in pretrained word embeddings by using the SEMBIAS metric provided in FEE. Zhao et al. (2018) developed this analogous dataset with 20 genderstereotype word pairs and 22 gender-definitional word pairs, resulting in 440 instances using their Cartesian product. Each instance consists of fourword pairs: a gender definition word pair or Definition (e.g., waiter \u2212 waitress), a gender-stereotype word pair or Stereotype (e.g., doctor \u2212 nurse), and two none-type word pairs or None (e.g., dog \u2212 cat, cup \u2212 lid). The bias according to SEMBIAS is then measured by iterating over each instance and determining the distance vector of each of the four word pairs. The percentage of times that each word pair type achieves the highest similarity to he \u2212 she based on their distance vector is measured, with a \"Definition\" percentage close to 1 is desirable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 418, |
|
"end": 440, |
|
"text": "Caliskan et al. (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 872, |
|
"end": 890, |
|
"text": "Zhao et al. (2018)", |
|
"ref_id": "BIBREF97" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "We applied DIRECT BIAS (Bolukbasi et al., 2016a) to measure bias with regards to a list gender neutral words N and the gender directions g:", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 48, |
|
"text": "(Bolukbasi et al., 2016a)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "DirectBias = 1 |N | w\u03f5N |cos( \u2212 \u2192 w , g)| c", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "The parameter c determines how strict the bias measurement is. We conducted the experiment by using DIRECT BIAS that has been implemented in FEE with a 320 profession word list 3 provided by Bolukbasi et al. (2016a) and c = 1. Lower DIRECT BIAS scores indicate that a word embeddings is less biased. The EMBEDDING COHERENCE TEST (ECT) (Dev and Phillips, 2019) computes gender bias based on the rank of the nearest neighbors of gendered word pairs \u03b5 (e.g., \"she\" -\"he\"). These gendered word pairs, consisting of female and male terms, are averaged, such that two mean embedding vectors m and s remain (one for female terms and one for male terms). Given a list of words affected with indirect bias P , in this case a list of professions proposed by Bolukbasi et al. (Bolukbasi et al., 2016a) , the similarity of each word to m and s is determined. The cosine similarities are then replaced by rank order, and given m and s, we receive two rank orders for the words in P . Next, the Spearman Coefficient is calculated once the ranks are compared. For each word pair, ECT is optimised with a Spearman Coefficient towards 1. Here, we experimented with ECT that has been implemented in WEFE using male and female names as target sets, and professions as attribute set. All word list are available in the ECT online repository. 4 The measures used in this paper only examine for particular bias types, not all of them. As a result, these measures can only be used to indicate the presence of these specific types of bias and cannot be used to establish the absence of all biases. Table 4 reports the bias score obtained from the experiment described in Section 4.1 together with pre-trained embeddings and bias metrics chosen in Section 4.2. Bold bias score indicates the best score of the corresponding measure while arrows next to the measure represent the interpretation of the score: downward arrow means the lower the value, the less biased an embedding is; upward arrow means the higher the score, the less biased an embedding is.", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 215, |
|
"text": "Bolukbasi et al. (2016a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 790, |
|
"text": "(Bolukbasi et al., 2016a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1322, |
|
"end": 1323, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1574, |
|
"end": 1581, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bias Metrics", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "The purpose of this experiment is to measure the degree of association between target and attribute words defined by Caliskan (2017) to assess biases emerging from the pre-trained word embeddings. From Table 4 , it can be seen that pretrained fastText models resulted in the lowest bias for tests concerned with racial bias, age bias, and gender bias with gendered names involved. fastText Wiki News scored the lowest on Test 3 and Test 4, whereas fastText Wiki News with subword information scored the lowest on Test 5. fastText Wiki News is also the least biased embedding in terms of age bias (Test 10). Interestingly, among all tests with respect to gender bias: Test 6, Test 7, and Test 8, fastText only outperforms other models on Test 6, particularly fastText that has been trained under Common Crawl corpus with subword information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 132, |
|
"text": "Caliskan (2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 209, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WEAT", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Turning now to WEAT tests with respect to gender bias which use male and female terms as the attribute words: Test 7 and Test 8. Closer inspection of the Table 4 reveals that pre-trained embeddings trained with GloVe model using Twitter corpus with vector lengths of 200 and 100, outperform other embeddings across the two tests, respectively. Taken together, these results acquired from WEAT tests suggest that fastText is the least biased model for 5 out of the 7 WEAT tests.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 161, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WEAT", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "This experiment is aimed at identifying the correct analogy of he \u2212 she in various pre-trained word embeddings according to four pairs of words defined by Zhao et al. (2018) . The results obtained from the SEMBIAS experiment can be compared in Table 4 . It is expected to have a high accuracy for Definitions and low accuracy for Stereotypes and Nones.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 173, |
|
"text": "Zhao et al. (2018)", |
|
"ref_id": "BIBREF97" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 251, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SEMBIAS", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "This table is quite revealing in several ways. First, all embeddings trained using fastText outperform the other pre-trained embeddings. fastText embeddings achieve high semantic, definition scores above 86.8% while keeping stereotypical and none loss to a minimum, below 1% and 3% respectively. Second, among the four embeddings trained with fastText, the one trained with Common Crawl is shown to be the least biased. The percentage of Definition, Stereotype, and None predictions achieved by this embeddings are 92.5%, 5% and 2.5%, respectively. Despite the fact that fastText Wiki News with subword information embeddings achieved the lowest percentage of None, the Stereotype prediction must not be forgotten. Compared to the Stereotype prediction of fastText Common Crawl, fastText Wiki News with subword information embeddings correctly classified 0.4% more words as a genderstereotype word pair, which makes it slightly more biased.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SEMBIAS", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "Together, these results provide important insights into how most word pairs in fastText pre-trained embeddings are correctly classified as a gender-definition word pair but only few word pairs are correctly categorised as a gender-stereotype word pair and gender unrelated word pairs. Also according to these data, we can infer that fastText model trained on the Common Crawl corpus generates the least biased pre-trained word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SEMBIAS", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "DIRECT BIAS calculates the connection between gender neutral words and gender direction learned from word embeddings. One unanticipated finding is that the word embeddings generated from the GloVe model trained on Wiki Gigaword corpus with vector length 300, is found to be the Table 4 : Bias scores obtained after applying four metrics to several pre-trained word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 285, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DIRECT BIAS", |
|
"sec_num": "5.1.3" |
|
}, |
|
{ |
|
"text": "least biased pre-trained embeddings with a score of 0.004. This score confirms that the embeddings have the least gender direction when the gender neutral words being applied to it. Across all bias metrics, DIRECT BIAS is the first one that generates the best score for GloVe pre-trained embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DIRECT BIAS", |
|
"sec_num": "5.1.3" |
|
}, |
|
{ |
|
"text": "Similar to WEAT, ECT measures the degree of association between one attribute set and two target sets described in Section 4.2.2. In accordance with WEAT results, a pre-trained fastText model was found to be the least biased. Particularly, the fastText model that has been trained on the Common Crawl corpus without subword information, has the lowest bias score of 0.692. This score reflects the lack of correlation of the mean vectors distances between the male and female name sets and the occupation words, which result in the smallest presence of bias among all of the embeddings. This result supports evidence from previous experiment with SEMBIAS. The consistency may be due to how both metrics aim to identify a gender bias by utilising occupations as gender neutral words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ECT", |
|
"sec_num": "5.1.4" |
|
}, |
|
{ |
|
"text": "We can infer from these data that fastText pretrained word embeddings perform the best with respect to three of the four most used bias metrics. According to SEMBIAS and ECT scores, FastText Common Crawl is the least biased. Using the same corpus but with addition of subword information, the embeddings has the least biased according to WEAT Test 6. Furthermore, FastText Wiki News is least biased on WEAT Test 5. In addition, the embeddings has the least bias on WEAT Test 3, Test 4, and Test 10 while including subword information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "The second RQ investigates the impact of parameters on the fairness of pre-trained word embedding models. We conduct experiments to bias in regards to vector length. Figure 2a and Figure 2d present the results obtained from the analysis of WEAT scores with respect to the vector length. On four of the seven WEAT tests: Test 3, Test 4, Test 6, and Test 7 (after 50 dimension) there is a clear trend of decreasing bias in GloVe Twitter with the rise value of vector length (Figure 2a ). On the other hand, Figure 2d indicates that the bias in GloVe Wiki drops as the vector length increases in four WEAT tests: Test 3, Test 5 (after 100 dimension), Test 6, and Test 7. In summary, 8 from 14 WEAT's findings imply that the greater the GloVe Twitter and GloVe Wiki dimension, the less biased they are.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 175, |
|
"text": "Figure 2a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 189, |
|
"text": "Figure 2d", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 482, |
|
"text": "(Figure 2a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 514, |
|
"text": "Figure 2d", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RQ2: Effect of Vector Length on Fairness", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Turning now to the analysis on SEMBIAS scores, it is apparent from Figure 2b and Figure 2e that the fairness improves with the increase in the number of dimensions. Note that in SEMBIAS, a high accuracy for Definitions and low accuracy for Stereotypes and Nones are expected. That is why as the dimension rises, the Definition's accuracy increases, but the Stereotype and None's accuracy decreases. Overall, this finding indicates that according to SEMBIAS, words in GloVe Twitter and GloVe Wiki embeddings are more likely to be correctly identified as gender-definition word pair but less likely to be correctly classified as a genderstereotype word pair and gender unrelated word pairs if they were trained with large vector lengths.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 76, |
|
"text": "Figure 2b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 81, |
|
"end": 90, |
|
"text": "Figure 2e", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RQ2: Effect of Vector Length on Fairness", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The next analysis of this experimental result is concerned with how the DIRECT BIAS scores would be affected by the vector length. Figure 2c shows that following the increase of vector length in GloVe Twitter, we observe a decrease in the bias score. In Figure 2f , bias score of GloVe Wiki Gigaword increases from lower dimensions 50 to 100 but decreases beyond dimension 100. These results show that from four vector lengths used in each of the two corpora, most of them support the hypothesis that the larger dimension used resulted in smaller presence of gender bias. The rise of bias score of GloVe trained in Wiki Gigaword corpus from 50 to 100 dimension is the only instance that counters our hypothesis.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 140, |
|
"text": "Figure 2c", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 263, |
|
"text": "Figure 2f", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RQ2: Effect of Vector Length on Fairness", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Lastly, Figure 2c shows a decrease in ECT score as vector length increases in GloVe Twitter only within dimensions of 25, 50, and 100. However, between 100 and 200, the bias score increases by 0.016. In addition, Figure 2f illustrates that the discovery of GloVe Wiki Gigaword in ECT is similar to that in DIRECT BIAS, that the bias increases from lower dimensions 50 to 100 but rapidly declines beyond dimension 100. Six of the eight pre-trained embeddings examined in this investigation support the finding that fairness improves as the number of dimensions increases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 17, |
|
"text": "Figure 2c", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 222, |
|
"text": "Figure 2f", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RQ2: Effect of Vector Length on Fairness", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Finally, most observations from the WEAT, SEMBIAS, DIRECT BIAS, and ECT scores indicate evidence for improved fairness in pre-trained word embeddings when the number of dimensions is increased. This result implies that lower dimensionality word embeddings are not expressive enough to capture all word associations and analogies, and that when the bias metric is applied to them, they become more biased than embeddings with larger dimensions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RQ2: Effect of Vector Length on Fairness", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "There has been a growing interest among researchers to tackle bias in word embeddings, herein we focus on previous work comparing different models and their characteristics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Lauscher and Glava\u0161 (2019) evaluated embedding space biases caused by four different models and found that GloVe embeddings are biased according to all 10 WEAT tests, while fastText exhibits significant biases only for a subset of tests. This finding broadly supports our finding where all smallest WEAT scores belong to GloVe pretrained embeddings. However, their focus is different from our as their approach aims at understanding the consistency of the bias effects across languages, corpora, and embedding models. Borah et al. (2021) compared the stability of the fairness results to those of the word embedding models used: fastText, GloVe, and word2vec, all of which were trained on Wikipedia. Among the three models, they discovered that fastText is the best stable word embedding model which results in the highest stability for its WEAT results. Badilla et al. (2020) implemented their proposed fairness framework, WEFE, by conducting case study where six publicly available pre-trained word embedding models are compared with respect to four bias metrics (e.g., WEAT, WEAT-ES, RND, RNSB). Consistent with our finding, they discovered that fastText rank first in WEAT. proposed a general debiasing framework Debiasing Embeddings Implicitly and Explicitly (DEBIE). They used two bias metrics: WEAT Test 8 and ECT to compare the bias of CBOW, GloVe, and fastText trained in Wikipedia. They observed that fastText is more biased than GloVe in both metrics. While this contradicts our observations, their study did not utilise pre-trained models but manually trained them on the same corpus. Popovi\u0107 et al. (2020) demonstrated the viability of their modified WEAT metric on three classes of biases (religion, gender and race) in three different publicly available word embeddings with vector length of 300: fastText, GloVe and word2vec. Their findings yielded that before debiasing, fastText has the least religion and race bias, while word2vec has the least gender bias. However, one of the study's discoveries opposes our findings where word2vec does not have the least gender bias. This difference may occur given the fact that the authors collected word sets from a number of different literature.", |
|
"cite_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 537, |
|
"text": "Borah et al. (2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 876, |
|
"text": "Badilla et al. (2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1597, |
|
"end": 1618, |
|
"text": "Popovi\u0107 et al. (2020)", |
|
"ref_id": "BIBREF72" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Furthermore, previous work considers the impact of word embedding vector length on the performance and the relation to fairness. Borah et al. (2021) looked at how the length of the vectors used in training fastText, GloVe, and word2vec affected their stability. The models' stability improves as the vector dimensions grow larger. On the other hand, Goldberg and Hirst (2017) found that word embeddings with smaller vectors are better at grouping similar words. This generalisation means that word embeddings with shorter vector lengths have a higher tendency to be biased. The results of our empirical study, obtained using more data and metrics, corroborate the above findings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 148, |
|
"text": "Borah et al. (2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 375, |
|
"text": "Goldberg and Hirst (2017)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Much of the previous research has focused on proposing and evaluating debiasing techniques, modified metrics and fairness frameworks. Therefore, our study makes a major contribution to the research on fairness of word embeddings by empirically comparing the degree of bias of the most popular and easily accessible pre-trained word embeddings according to a variety of popular bias metrics, as well as the impact of vector length involved in the training process to its fairness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The purpose of this study was to empirically assess the degree of fairness exhibited by different publicly available pre-trained word embeddings based on different bias metrics. To this end, we first analysed what are the most used pre-trained word embeddings and bias metrics by conducting a comprehensive literature survey. The results pointed out that the majority of the papers used three word embedding models (namely GloVe, word2vec, and fastText) and four bias metrics (namely WEAT, SEMBIAS, DIRECT BIAS, and ECT). Our results revealed that the most fair of the three pre-trained word embedding models evaluated is fastText. We also found that while using pre-trained embeddings, the influence of vector length on fairness must be carefully considered.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The scope of this study was limited in terms of selecting word list used to apply bias metrics to the word embeddings. We closely examined the earlier studies that may have influenced bias scores. In the future, we need a deeper analysis and explanation of the numerous fairness tendencies discovered in this study, such as the correlation with explicit gender gaps and survey data (Friedman et al., 2019a,b) , and the extent to which the embeddings reproduce bias . Moreover, the study could be replicated by not only using pre-trained word embeddings models, but manually training models with different parameters on an identical text corpus. Further study could also be conducted to explore the fairness of contextual word embeddings (e.g., ELMo, Bert), the application bias in word embeddings (Goldfarb-Tarrant et al., 2021b) , and bias in word embedding in languages with grammatical gender (Zhou et al., 2019) . : WEAT tests used in this study. Number 5, 7 and 9 next to the set refer to the sources (Caliskan et al., 2016) used to define the word list in their paper. The names in Test 3 differ from those in Test 4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 382, |
|
"end": 408, |
|
"text": "(Friedman et al., 2019a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 797, |
|
"end": 829, |
|
"text": "(Goldfarb-Tarrant et al., 2021b)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 896, |
|
"end": 915, |
|
"text": "(Zhou et al., 2019)", |
|
"ref_id": "BIBREF98" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1029, |
|
"text": "(Caliskan et al., 2016)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://aclanthology.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/tolga-b/debiaswe", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/sunipa/ Attenuating-Bias-in-Word-Vec", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "M. Hort and F. Sarro are supported by the ERC grant 741278 (EPIC).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Data Fields ACM Publication title, abstract, keywords arXiv All Google Scholar In the title with exact phrase IEEE All metadata Science Direct Title, abstract or author-specified keywords Scopus TITLE-ABS-KEY", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Repository", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Year Venue GloVe (Bolukbasi et al., 2016a ) (Garg et al., 2018 ) (Sutton et al., 2018 ) ) (Yang and Feng, 2019 ) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 41, |
|
"text": "(Bolukbasi et al., 2016a", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 44, |
|
"end": 62, |
|
"text": "(Garg et al., 2018", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 65, |
|
"end": 85, |
|
"text": "(Sutton et al., 2018", |
|
"ref_id": "BIBREF82" |
|
}, |
|
{ |
|
"start": 90, |
|
"end": 110, |
|
"text": "(Yang and Feng, 2019", |
|
"ref_id": "BIBREF94" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Comparison of pre-trained word vectors for arabic text classification using deep learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Alwehaibi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaushik", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "17th IEEE International Conference on Machine Learning and Applications (ICMLA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1471--1474", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICMLA.2018.00239" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Alwehaibi and Kaushik Roy. 2018. Comparison of pre-trained word vectors for arabic text classification using deep learning approach. In 2018 17th IEEE International Conference on Machine Learning and Applications (ICMLA), pages 1471-1474.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Evaluating the stability of embedding-based word similarities", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Antoniak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mimno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "107--119", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Antoniak and David Mimno. 2018. Evaluating the stability of embedding-based word similarities. Transactions of the Association for Computational Linguistics, 6:107-119.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Enhancing deep learning sentiment analysis with ensemble techniques in social applications", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Araque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ignacio", |
|
"middle": [], |
|
"last": "Corcuera-Platas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Fernando" |
|
], |
|
"last": "S\u00e1nchez-Rada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Iglesias", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Expert Systems with Applications", |
|
"volume": "77", |
|
"issue": "", |
|
"pages": "236--246", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.eswa.2017.02.002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar Araque, Ignacio Corcuera-Platas, J. Fernando S\u00e1nchez-Rada, and Carlos A. Iglesias. 2017. Enhanc- ing deep learning sentiment analysis with ensemble techniques in social applications. Expert Systems with Applications, 77:236-246.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Improving the accuracy using pre-trained word embeddings on deep neural networks for turkish text classification", |
|
"authors": [ |
|
{ |
|
"first": "Murat", |
|
"middle": [], |
|
"last": "Aydogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Karci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Physica A: Statistical Mechanics and its Applications", |
|
"volume": "541", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.physa.2019.123288" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Murat Aydogan and Ali Karci. 2020. Improving the accuracy using pre-trained word embeddings on deep neural networks for turkish text classification. Phys- ica A: Statistical Mechanics and its Applications, 541:123288.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Quantifying Gender Bias in Different Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marzieh", |
|
"middle": [], |
|
"last": "Babaeianjelodar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Lorenz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeanna", |
|
"middle": [], |
|
"last": "Matthews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Companion Proceedings of the Web Conference 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "752--759", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3366424.3383559" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marzieh Babaeianjelodar, Stephen Lorenz, Josh Gordon, Jeanna Matthews, and Evan Freitag. 2020. Quantify- ing Gender Bias in Different Corpora. In Companion Proceedings of the Web Conference 2020, pages 752- 759, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "WEFE: The word embeddings fairness evaluation framework", |
|
"authors": [ |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Badilla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "P\u00e9rez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IJCAI International Joint Conference on Artificial Intelligence", |
|
"volume": "2021", |
|
"issue": "", |
|
"pages": "430--436", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.24963/ijcai.2020/60" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo Badilla, Felipe Bravo-Marquez, and Jorge P\u00e9rez. 2020. WEFE: The word embeddings fairness evalua- tion framework. In IJCAI International Joint Confer- ence on Artificial Intelligence, volume 2021-January, pages 430-436. International Joint Conferences on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Fewshot text classification with pre-trained word embeddings and a human in the loop", |
|
"authors": [ |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Bailey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunny", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.02063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katherine Bailey and Sunny Chopra. 2018. Few- shot text classification with pre-trained word em- beddings and a human in the loop. arXiv preprint arXiv:1804.02063.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Model choices influence attributive word associations: A semi-supervised analysis of static word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Geetanjali", |
|
"middle": [], |
|
"last": "Bihani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [ |
|
"Taylor" |
|
], |
|
"last": "Rayz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "568--573", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geetanjali Bihani and Julia Taylor Rayz. 2020. Model choices influence attributive word associations: A semi-supervised analysis of static word embeddings. In 2020 IEEE/WIC/ACM International Joint Con- ference on Web Intelligence and Intelligent Agent Technology (WI-IAT), pages 568-573. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Language (technology) is power: A critical survey of \"bias", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Solon Barocas, Hal Daum\u00e9 III au2, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in nlp.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Stereotyping Norwegian Salmon: An Inventory of Pitfalls in Fairness Benchmark Datasets", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gilsinia", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Olteanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Sim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/V1/2021.ACL-LONG.81" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Gilsinia Lopez, Alexandra Olteanu, Robert Sim, and Hanna Wallach. 2021. Stereotyping Norwegian Salmon: An Inventory of Pitfalls in Fair- ness Benchmark Datasets. ACL-IJCNLP 2021 -59th", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, Proceedings of the Conference", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1004--1015", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing, Proceedings of the Conference, pages 1004-1015.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Enriching Word Vectors with Subword Information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00051" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching Word Vectors with Subword Information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Man is to computer programmer as woman is to homemaker? Debiasing word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [ |
|
"Wei" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4356--4364", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai. 2016a. Man is to com- puter programmer as woman is to homemaker? De- biasing word embeddings. In Advances in Neural Information Processing Systems, pages 4356-4364. Neural information processing systems foundation.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Quantifying and reducing stereotypes in word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.06121" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai. 2016b. Quan- tifying and reducing stereotypes in word embeddings. arXiv preprint arXiv:1606.06121.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Manash Pratim Barman, and Amit Awekar. 2021. Are Word Embedding Methods Stable and Should We Care About It?", |
|
"authors": [ |
|
{ |
|
"first": "Angana", |
|
"middle": [], |
|
"last": "Borah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angana Borah, Manash Pratim Barman, and Amit Awekar. 2021. Are Word Embedding Methods Stable and Should We Care About It?", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Beyond Big Data: What Can We Learn from AI Models?", |
|
"authors": [ |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--1", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3128572.3140452" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aylin Caliskan. 2017. Beyond Big Data: What Can We Learn from AI Models? In Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security, pages 1-1, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Semantics derived automatically from language corpora contain human-like biases", |
|
"authors": [ |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bryson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Science", |
|
"volume": "356", |
|
"issue": "6334", |
|
"pages": "183--186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1126/science.aal4230" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aylin Caliskan, Joanna J. Bryson, and Arvind Narayanan. 2016. Semantics derived automatically from language corpora contain human-like biases. Science, 356(6334):183-186.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Stability of word embeddings using word2vec", |
|
"authors": [ |
|
{ |
|
"first": "Mansi", |
|
"middle": [], |
|
"last": "Chugh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Whigham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grant", |
|
"middle": [], |
|
"last": "Dick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AI 2018: Advances in Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "812--818", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mansi Chugh, Peter A. Whigham, and Grant Dick. 2018. Stability of word embeddings using word2vec. In AI 2018: Advances in Artificial Intelligence, pages 812-818, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Assessing Bias Removal from Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Clare", |
|
"middle": [], |
|
"last": "Arrington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clare Arrington. 2019. Assessing Bias Removal from Word Embeddings. Student Research Submissions.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Electra: Pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than generators. arXiv preprint arXiv:2003.10555.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Finetuning neural machine translation on gender-balanced datasets", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adri\u00e0", |
|
"middle": [], |
|
"last": "De", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "26--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta R. Costa-juss\u00e0 and Adri\u00e0 de Jorge. 2020. Fine- tuning neural machine translation on gender-balanced datasets. In Proceedings of the Second Workshop on Gender Bias in Natural Language Processing, pages 26-34, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The trouble with bias -nips 2017 keynote -kate crawford nips2017", |
|
"authors": [ |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kate Crawford. 2017. The trouble with bias -nips 2017 keynote -kate crawford nips2017.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "On Measuring and Mitigating Biased Inferences of Word Embeddings. 34th AAAI Conference on Artificial Intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Sunipa", |
|
"middle": [], |
|
"last": "Dev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srikumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "7659--7666", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i05.6267" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunipa Dev, Tao Li, Jeff Phillips, and Vivek Srikumar. 2019. On Measuring and Mitigating Biased Infer- ences of Word Embeddings. 34th AAAI Conference on Artificial Intelligence, AAAI 2020, 34(05):7659- 7666.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Oscar: Orthogonal subspace correction and rectification of biases in word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Sunipa", |
|
"middle": [], |
|
"last": "Dev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srikumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.00049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunipa Dev, Tao Li, Jeff M Phillips, and Vivek Srikumar. 2020. Oscar: Orthogonal subspace correction and rectification of biases in word embeddings. arXiv preprint arXiv:2007.00049.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Attenuating bias in word vectors", |
|
"authors": [ |
|
{ |
|
"first": "Sunipa", |
|
"middle": [], |
|
"last": "Dev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Phillips", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunipa Dev and Jeff M. Phillips. 2019. Attenuating bias in word vectors. CoRR, abs/1901.07656.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "2018. k-nn embedding stability for word2vec hyper-parametrisation in scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Amna", |
|
"middle": [], |
|
"last": "Dridi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"Medhat" |
|
], |
|
"last": "Gaber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Azad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jagdev", |
|
"middle": [], |
|
"last": "Bhogal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "International Conference on Discovery Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--343", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amna Dridi, Mohamed Medhat Gaber, R Azad, and Jagdev Bhogal. 2018. k-nn embedding stability for word2vec hyper-parametrisation in scientific text. In International Conference on Discovery Science, pages 328-343. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Exploring human gender stereotypes with word association test", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 Conference on Empirical Methods in Natural Language Processing and 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6133--6143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y Du, Y Wu, and M Lan. 2020. Exploring human gen- der stereotypes with word association test. In 2019 Conference on Empirical Methods in Natural Lan- guage Processing and 9th International Joint Con- ference on Natural Language Processing, EMNLP- IJCNLP 2019, pages 6133-6143, Department of Computer Science and Technology, East China Nor- mal University, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "MDR Cluster-Debias: A Nonlinear WordEmbedding Debiasing Pipeline. 13th International Conference on Social Computing, Behavioral-Cultural Modeling and Prediction and Behavior Representation in Modeling and Simulation", |
|
"authors": [ |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "45--54", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-61255-9_5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuhao Du and Kenneth Joseph. 2020. MDR Cluster- Debias: A Nonlinear WordEmbedding Debiasing Pipeline. 13th International Conference on Social Computing, Behavioral-Cultural Modeling and Pre- diction and Behavior Representation in Modeling and Simulation, SBP-BRiMS 2020, 12268 LNCS:45-54.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Why does unsupervised pre-training help deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre-Antoine", |
|
"middle": [], |
|
"last": "Manzagol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "11", |
|
"issue": "19", |
|
"pages": "625--660", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dumitru Erhan, Yoshua Bengio, Aaron Courville, Pierre-Antoine Manzagol, Pascal Vincent, and Samy Bengio. 2010. Why does unsupervised pre-training help deep learning? Journal of Machine Learning Research, 11(19):625-660.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Understanding Undesirable Word Embedding Associations", |
|
"authors": [ |
|
{ |
|
"first": "Kawin", |
|
"middle": [], |
|
"last": "Ethayarajh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Duvenaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "57th Annual Meeting of the Association for Computational Linguistics, ACL 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1696--1705", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p19-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kawin Ethayarajh, David Duvenaud, and Graeme Hirst. 2019. Understanding Undesirable Word Embedding Associations. 57th Annual Meeting of the Associa- tion for Computational Linguistics, ACL 2019, pages 1696-1705.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A Survey of Race, Racism, and Anti-Racism in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Anjalie", |
|
"middle": [], |
|
"last": "Field", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [ |
|
"Lin" |
|
], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.48550/arxiv.2106.11410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anjalie Field, Su Lin Blodgett, Zeerak Waseem, and Yulia Tsvetkov. 2021. A Survey of Race, Racism, and Anti-Racism in NLP. ACL-IJCNLP 2021 -59th", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, Proceedings of the Conference", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1905--1925", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing, Proceedings of the Conference, pages 1905-1925.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Relating word embedding gender biases to gender gaps: A cross-cultural analysis", |
|
"authors": [ |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonja", |
|
"middle": [], |
|
"last": "Schmer-Galunder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Rye", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "18--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott Friedman, Sonja Schmer-Galunder, Anthony Chen, and Jeffrey Rye. 2019a. Relating word embed- ding gender biases to gender gaps: A cross-cultural analysis. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 18-24.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Relating Linguistic Gender Bias, Gender Values, and Gender Gaps: An International Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonja", |
|
"middle": [], |
|
"last": "Schmer-Galunder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Rye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Goldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott Friedman, Sonja Schmer-Galunder, Jeffrey Rye, Robert Goldman, and Anthony Chen. 2019b. Re- lating Linguistic Gender Bias, Gender Values, and Gender Gaps: An International Analysis.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Debie: A platform for implicit and explicit debiasing of word embedding spaces", |
|
"authors": [ |
|
{ |
|
"first": "Niklas", |
|
"middle": [], |
|
"last": "Friedrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Lauscher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Niklas Friedrich, Anne Lauscher, Simone Paolo Ponzetto, and Goran Glava\u0161. 2021. Debie: A plat- form for implicit and explicit debiasing of word em- bedding spaces. In Proceedings of the 16th Confer- ence of the European Chapter of the Association for Computational Linguistics: System Demonstrations, pages 91-98.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Combine hownet lexicon to train phrase recursive autoencoder for sentence-level sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Xianghua", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wangwang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingying", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laizhong", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Neurocomputing", |
|
"volume": "241", |
|
"issue": "", |
|
"pages": "18--27", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.neucom.2017.01.079" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xianghua Fu, Wangwang Liu, Yingying Xu, and Laizhong Cui. 2017. Combine hownet lexicon to train phrase recursive autoencoder for sentence-level sentiment analysis. Neurocomputing, 241:18-27.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Word embeddings quantify 100 years of gender and ethnic stereotypes", |
|
"authors": [ |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Londa", |
|
"middle": [], |
|
"last": "Schiebinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the National Academy of Sciences of the United States of America", |
|
"volume": "115", |
|
"issue": "", |
|
"pages": "3635--3644", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1073/pnas.1720347115" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikhil Garg, Londa Schiebinger, Dan Jurafsky, and James Zou. 2018. Word embeddings quantify 100 years of gender and ethnic stereotypes. Proceedings of the National Academy of Sciences of the United States of America, 115(16):E3635-E3644.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "WordBias: An Interactive Visual Tool for Discovering Intersectional Biases Encoded in Word Embeddings. 2021 CHI Conference on Human Factors in Computing Systems: Making Waves, Combining Strengths, CHI EA 2021", |
|
"authors": [ |
|
{ |
|
"first": "Bhavya", |
|
"middle": [], |
|
"last": "Ghai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Md Naimul Hoque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mueller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3411763.3451587" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhavya Ghai, Md Naimul Hoque, and Klaus Mueller. 2021. WordBias: An Interactive Visual Tool for Dis- covering Intersectional Biases Encoded in Word Em- beddings. 2021 CHI Conference on Human Factors in Computing Systems: Making Waves, Combining Strengths, CHI EA 2021.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Neural Network Methods in Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg and Graeme Hirst. 2017. Neural Net- work Methods in Natural Language Processing. Mor- gan;", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Intrinsic bias metrics do not correlate with application bias", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Marchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [ |
|
"Mu\u00f1oz" |
|
], |
|
"last": "S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mugdha", |
|
"middle": [], |
|
"last": "Pandya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1926--1940", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Rebecca Marchant, Ri- cardo Mu\u00f1oz S\u00e1nchez, Mugdha Pandya, and Adam Lopez. 2021a. Intrinsic bias metrics do not correlate with application bias. In Proceedings of the 59th An- nual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 1926-1940.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Intrinsic Bias Metrics Do Not Correlate with Application Bias", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Marchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [ |
|
"Mu\u00f1oz" |
|
], |
|
"last": "S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mugdha", |
|
"middle": [], |
|
"last": "Pandya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings ofthe 59th Annual Meeting ofthe Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1926--1940", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.48550/arxiv.2012.15859" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Rebecca Marchant, Ri- cardo Mu\u00f1oz S\u00e1nchez, Mugdha Pandya, and Adam Lopez. 2021b. Intrinsic Bias Metrics Do Not Corre- late with Application Bias. Proceedings ofthe 59th Annual Meeting ofthe Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing, pages 1926- 1940.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Lipstick on a Pig: Debiasing Methods Cover up Systematic Gender Biases in Word Embeddings But do not Remove Them", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "609--614", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a Pig: Debiasing Methods Cover up Systematic Gen- der Biases in Word Embeddings But do not Remove Them. 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019, 1:609-614.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Measuring individual differences in implicit cognition: The implicit association test", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Greenwald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debbie", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Mcghee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [ |
|
"L K" |
|
], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Journal of Personality and Social Psychology", |
|
"volume": "74", |
|
"issue": "6", |
|
"pages": "1464--1480", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1037/0022-3514.74.6.1464" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony G. Greenwald, Debbie E. McGhee, and Jor- dan L.K. Schwartz. 1998. Measuring individual dif- ferences in implicit cognition: The implicit associa- tion test. Journal of Personality and Social Psychol- ogy, 74(6):1464-1480.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Detecting emergent intersectional biases: Contextualized word embeddings contain a distribution of human-like biases", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--133", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Guo and Aylin Caliskan. 2021. Detecting emergent intersectional biases: Contextualized word embed- dings contain a distribution of human-like biases. In Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pages 122-133.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Obtaining Better Static Word Embeddings Using Contextual Embedding Models", |
|
"authors": [ |
|
{ |
|
"first": "Prakhar", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Jaggi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings ofthe 59th Annual Meeting ofthe Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5241--5253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prakhar Gupta and Martin Jaggi. 2021. Obtaining Better Static Word Embeddings Using Contextual Embed- ding Models. In Proceedings ofthe 59th Annual Meet- ing ofthe Association for Computational Linguistics, pages 5241-5253.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Deb2viz: Debiasing gender in word embedding data using subspace visualization", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "E O Gyamfi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Gou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "11th International Conference on Graphics and Image Processing", |
|
"volume": "2019", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1117/12.2557465" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E O Gyamfi, Y Rao, M Gou, and Y Shao. 2020. Deb2viz: Debiasing gender in word embedding data using subspace visualization. In 11th Interna- tional Conference on Graphics and Image Processing, ICGIP 2019, volume 11373, School of Information and Software Engineering, University of Electronic Science and Technology of China Chengdu, Sichuan, 610054, China. SPIE.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Bad Company-Neighborhoods in neural embedding spaces considered harmful", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hellrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Udo", |
|
"middle": [], |
|
"last": "Hahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2785--2796", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hellrich and Udo Hahn. 2016. Bad Company-Neighborhoods in neural embedding spaces considered harmful. In Proceedings of COL- ING 2016, the 26th International Conference on Com- putational Linguistics: Technical Papers, pages 2785- 2796, Osaka, Japan. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A survey of performance optimization for mobile applications", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Hort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Kechagia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federica", |
|
"middle": [], |
|
"last": "Sarro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Harman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "IEEE Transactions on Software Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--1", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TSE.2021.3071193" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Hort, Maria Kechagia, Federica Sarro, and Mark Harman. 2021. A survey of performance optimiza- tion for mobile applications. IEEE Transactions on Software Engineering, pages 1-1.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Delta-training: Simple semi-supervised text classification using pretrained word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Hwiyeol", |
|
"middle": [], |
|
"last": "Jo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ceyda", |
|
"middle": [], |
|
"last": "Cinarel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3458--3463", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwiyeol Jo and Ceyda Cinarel. 2019. Delta-training: Simple semi-supervised text classification using pre- trained word embeddings. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3458-3463.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "English colour terms carry gender and valence biases: A corpus study using word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jonauskaite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cristianini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "PLoS ONE", |
|
"volume": "16", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0251559" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D Jonauskaite, A Sutton, N Cristianini, and C Mohr. 2021. English colour terms carry gender and va- lence biases: A corpus study using word embeddings. PLoS ONE, 16(6 June).", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Speech and language processing: An introduction to natural language processing, computational linguistics, and speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Jurafsky and James H. Martin. 2020. Speech and language processing: An introduction to nat- ural language processing, computational linguis- tics, and speech recognition, 3rd edition draft. Https://web.stanford.edu/ jurafsky/slp3/.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Gender-preserving Debiasing for Pre-trained Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Masahiro", |
|
"middle": [], |
|
"last": "Kaneko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danushka", |
|
"middle": [], |
|
"last": "Bollegala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "57th Annual Meeting of the Association for Computational Linguistics, ACL 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1641--1650", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p19-1160" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masahiro Kaneko and Danushka Bollegala. 2019. Gender-preserving Debiasing for Pre-trained Word Embeddings. 57th Annual Meeting of the Associa- tion for Computational Linguistics, ACL 2019, pages 1641-1650.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Debiasing pre-trained contextualised embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Masahiro", |
|
"middle": [], |
|
"last": "Kaneko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danushka", |
|
"middle": [], |
|
"last": "Bollegala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1256--1266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masahiro Kaneko and Danushka Bollegala. 2021. De- biasing pre-trained contextualised embeddings. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Lin- guistics: Main Volume, pages 1256-1266.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Conceptor debiasing of word representations evaluated on weat", |
|
"authors": [ |
|
{ |
|
"first": "Saket", |
|
"middle": [], |
|
"last": "Karve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lyle", |
|
"middle": [], |
|
"last": "Ungar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saket Karve, Lyle Ungar, and Jo\u00e3o Sedoc. 2019. Con- ceptor debiasing of word representations evaluated on weat. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 40-48.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Procedures for performing systematic reviews", |
|
"authors": [ |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Kitchenham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "1--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara Kitchenham. 2004. Procedures for performing systematic reviews. Keele, UK, Keele University, 33(2004):1-26.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Fair embedding engine: A library for analyzing and mitigating gender bias in word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tenzin Singhay", |
|
"middle": [], |
|
"last": "Bhotia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.13168" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaibhav Kumar and Tenzin Singhay Bhotia. 2020. Fair embedding engine: A library for analyzing and mit- igating gender bias in word embeddings. arXiv preprint arXiv:2010.13168.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Nurse is Closer to Woman than Surgeon? Mitigating Gender-Biased Proximities in Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tenzin", |
|
"middle": [], |
|
"last": "Singhay Bhotia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "486--503", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00327" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaibhav Kumar, Tenzin Singhay Bhotia, Vaibhav Ku- mar, and Tanmoy Chakraborty. 2020. Nurse is Closer to Woman than Surgeon? Mitigating Gender-Biased Proximities in Word Embeddings. Transactions of the Association for Computational Linguistics, 8:486- 503.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Measuring bias in contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Keita", |
|
"middle": [], |
|
"last": "Kurita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nidhi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Pareek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "166--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keita Kurita, Nidhi Vyas, Ayush Pareek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in contex- tualized word representations. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 166-172.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Are we consistently biased? multidimensional analysis of biases in distributional word vectors", |
|
"authors": [ |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Lauscher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (* SEM 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne Lauscher and Goran Glava\u0161. 2019. Are we con- sistently biased? multidimensional analysis of biases in distributional word vectors. In Proceedings of the Eighth Joint Conference on Lexical and Computa- tional Semantics (* SEM 2019), pages 85-91.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "A General Framework for Implicit and Explicit Debiasing of Distributional Word Vector Spaces", |
|
"authors": [ |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Lauscher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "34th AAAI Conference on Artificial Intelligence", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "8131--8138", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i05.6325" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne Lauscher, Goran Glava\u0161, Simone Paolo Ponzetto, and Ivan Vuli\u0107. 2019. A General Framework for Im- plicit and Explicit Debiasing of Distributional Word Vector Spaces. 34th AAAI Conference on Artificial Intelligence, AAAI 2020, 34(05):8131-8138.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Araweat: Multidimensional analysis of biases in arabic word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Lauscher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafik", |
|
"middle": [], |
|
"last": "Takieddin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fifth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne Lauscher, Rafik Takieddin, Simone Paolo Ponzetto, and Goran Glava\u0161. 2020. Araweat: Mul- tidimensional analysis of biases in arabic word em- beddings. In Proceedings of the Fifth Arabic Natural Language Processing Workshop, pages 192-199.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Gender bias in dictionary-derived word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Lee. 2020. Gender bias in dictionary-derived word embeddings. Technical report.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Improving distributional similarity with lessons learned from word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "211--225", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00134" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Im- proving distributional similarity with lessons learned from word embeddings. Transactions of the Associa- tion for Computational Linguistics, 3:211-225.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Word Embedding for Understanding Natural Language: A Survey", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "83--104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-53817-4_4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Li and Tao Yang. 2018. Word Embedding for Understanding Natural Language: A Survey, pages 83-104. Springer International Publishing, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Black is to Criminal as Caucasian is to Police: Detecting and Removing Multiclass Bias in Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Manzini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chong", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "615--621", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/n19-1062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Manzini, Yao Chong Lim, Yulia Tsvetkov, and Alan W. Black. 2019. Black is to Criminal as Cau- casian is to Police: Detecting and Removing Multi- class Bias in Word Embeddings. 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019, 1:615-621.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "A survey of app store analysis for software engineering", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federica", |
|
"middle": [], |
|
"last": "Sarro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuanyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Harman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Transactions on Software Engineering", |
|
"volume": "43", |
|
"issue": "9", |
|
"pages": "817--847", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TSE.2016.2630689" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Martin, Federica Sarro, Yue Jia, Yuanyuan Zhang, and Mark Harman. 2017. A survey of app store analysis for software engineering. IEEE Trans- actions on Software Engineering, 43(9):817-847.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "On Measuring Social Biases in Sentence Encoders", |
|
"authors": [ |
|
{ |
|
"first": "Chandler", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "622--628", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/n19-1063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chandler May, Alex Wang, Shikha Bordia, Samuel R. Bowman, and Rachel Rudinger. 2019. On Measuring Social Biases in Sentence Encoders. 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, NAACL HLT 2019, 1:622-628.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "1st International Conference on Learning Representations, ICLR 2013 -Workshop Track Proceedings. International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representa- tions in vector space. In 1st International Conference on Learning Representations, ICLR 2013 -Work- shop Track Proceedings. International Conference on Learning Representations, ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Reducing Word Embedding Bias Using Learned Latent Structure", |
|
"authors": [ |
|
{ |
|
"first": "Harshit", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AI for Social Good Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harshit Mishra. 2020. Reducing Word Embedding Bias Using Learned Latent Structure. In AI for Social Good Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Towards lower bounds on number of dimensions for word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "31--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Patel and Pushpak Bhattacharyya. 2017. Towards lower bounds on number of dimensions for word em- beddings. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 31-36, Taipei, Tai- wan. Asian Federation of Natural Language Process- ing.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP 2014 -2014 Conference on Empirical Methods in Natural Language Processing, Proceedings of the Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/d14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. GloVe: Global vectors for word rep- resentation. In EMNLP 2014 -2014 Conference on Empirical Methods in Natural Language Processing, Proceedings of the Conference, pages 1532-1543. Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Towards qualitative word embeddings evaluation: Measuring neighbors variation", |
|
"authors": [ |
|
{ |
|
"first": "B\u00e9n\u00e9dicte", |
|
"middle": [], |
|
"last": "Pierrejean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Tanguy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--39", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-4005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B\u00e9n\u00e9dicte Pierrejean and Ludovic Tanguy. 2018. To- wards qualitative word embeddings evaluation: Mea- suring neighbors variation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Stu- dent Research Workshop, pages 32-39, New Orleans, Louisiana, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Joint Multiclass Debiasing of Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Radomir", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Lemmerich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Strohmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "25th International Symposium on Methodologies for Intelligent Systems", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "79--89", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-59491-6_8" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Radomir Popovi\u0107, Florian Lemmerich, and Markus Strohmaier. 2020. Joint Multiclass Debiasing of Word Embeddings. 25th International Symposium on Methodologies for Intelligent Systems, ISMIS 2020, 12117 LNAI:79-89.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Pretrained models for natural language processing: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianxiang", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yige", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunfan", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ning", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Science China Technological Sciences", |
|
"volume": "63", |
|
"issue": "10", |
|
"pages": "1872--1897", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11431-020-1647-3" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "XiPeng Qiu, TianXiang Sun, YiGe Xu, YunFan Shao, Ning Dai, and XuanJing Huang. 2020. Pre- trained models for natural language processing: A survey. Science China Technological Sciences, 63(10):1872-1897.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "A topic-enhanced word embedding for twitter sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Yafeng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruimin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghong", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Information Sciences", |
|
"volume": "369", |
|
"issue": "", |
|
"pages": "188--198", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.ins.2016.06.040" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yafeng Ren, Ruimin Wang, and Donghong Ji. 2016. A topic-enhanced word embedding for twitter sentiment classification. Information Sciences, 369:188-198.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "A case study of natural gender phenomena in translation. a comparison of google translate, bing microsoft translator and deepl for english to italian, french and spanish", |
|
"authors": [ |
|
{ |
|
"first": "Argentina", |
|
"middle": [ |
|
"Anna" |
|
], |
|
"last": "Rescigno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Vanmassenhove", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Monti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "CLiC-it", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Argentina Anna Rescigno, Eva Vanmassenhove, Jo- hanna Monti, and Andy Way. 2020. A case study of natural gender phenomena in translation. a compari- son of google translate, bing microsoft translator and deepl for english to italian, french and spanish. In CLiC-it.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Sentiment analysis based on improved pre-trained word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Rouhollah", |
|
"middle": [], |
|
"last": "Seyed Mahdi Rezaeinia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Rahmani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hadi", |
|
"middle": [], |
|
"last": "Ghodsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Veisi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Expert Systems with Applications", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "139--147", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.eswa.2018.08.044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seyed Mahdi Rezaeinia, Rouhollah Rahmani, Ali Gh- odsi, and Hadi Veisi. 2019. Sentiment analysis based on improved pre-trained word embeddings. Expert Systems with Applications, 117:139-147.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "2020. 'thy algorithm shalt not bear false witness': An evaluation of multiclass debiasing methods on word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Thalea", |
|
"middle": [], |
|
"last": "Schlender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerasimos", |
|
"middle": [], |
|
"last": "Spanakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Benelux Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thalea Schlender and Gerasimos Spanakis. 2020. 'thy algorithm shalt not bear false witness': An evalua- tion of multiclass debiasing methods on word embed- dings. In Benelux Conference on Artificial Intelli- gence, pages 141-156. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "Neutralizing gender bias in word embeddings with latent disentanglement and counterfactual generation", |
|
"authors": [ |
|
{ |
|
"first": "Seungjae", |
|
"middle": [], |
|
"last": "Shin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyungwoo", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joonho", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyemi", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3126--3140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seungjae Shin, Kyungwoo Song, JoonHo Jang, Hyemi Kim, Weonyoung Joo, and Il-Chul Moon. 2020. Neu- tralizing gender bias in word embeddings with latent disentanglement and counterfactual generation. In Findings of the Association for Computational Lin- guistics: EMNLP 2020, pages 3126-3140.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "Room to Glo: A systematic comparison of semantic change detection approaches with word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Philippa", |
|
"middle": [], |
|
"last": "Shoemark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ferdousi", |
|
"middle": [], |
|
"last": "Farhana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Liza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Hale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcgillivray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--76", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philippa Shoemark, Farhana Ferdousi Liza, Dong Nguyen, Scott Hale, and Barbara McGillivray. 2019. Room to Glo: A systematic comparison of semantic change detection approaches with word embeddings. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 66-76, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "Argument from old man's view: Assessing social bias in argumentation", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Splieth\u00f6ver", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henning", |
|
"middle": [], |
|
"last": "Wachsmuth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 7th Workshop on Argument Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--87", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Splieth\u00f6ver and Henning Wachsmuth. 2020. Argument from old man's view: Assessing social bias in argumentation. In Proceedings of the 7th Workshop on Argument Mining, pages 76-87.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Energy and policy considerations for deep learning in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananya", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Ananya Ganesh, and Andrew McCal- lum. 2019. Energy and policy considerations for deep learning in NLP. CoRR, abs/1906.02243.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "Biased embeddings from wild data: Measuring, understanding and removing", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Sutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lansdall-Welfare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nello", |
|
"middle": [], |
|
"last": "Cristianini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Symposium on Intelligent Data Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Sutton, Thomas Lansdall-Welfare, and Nello Cristianini. 2018. Biased embeddings from wild data: Measuring, understanding and removing. In Interna- tional Symposium on Intelligent Data Analysis, pages 328-339. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF83": { |
|
"ref_id": "b83", |
|
"title": "A transparent framework for evaluating unintended demographic bias in word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Sweeney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Najafian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1662--1667", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Sweeney and Maryam Najafian. 2019. A trans- parent framework for evaluating unintended demo- graphic bias in word embeddings. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1662-1667, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF84": { |
|
"ref_id": "b84", |
|
"title": "Reducing Sentiment Polarity for Demographic Attributes in Word Embeddings using Adversarial Learning", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Sweeney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Najafian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "3rd ACM Conference on Fairness, Accountability, and Transparency, FAT* 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "359--368", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3351095.3372837" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Sweeney and Maryam Najafian. 2020. Reduc- ing Sentiment Polarity for Demographic Attributes in Word Embeddings using Adversarial Learning. In 3rd ACM Conference on Fairness, Accountabil- ity, and Transparency, FAT* 2020, pages 359-368, MIT, Cambridge, MA, United States. Association for Computing Machinery, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF85": { |
|
"ref_id": "b85", |
|
"title": "Assessing Social and Intersectional Biases in Contextualized Word Representations. 33rd Annual Conference on Neural Information Processing Systems", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"Elisa" |
|
], |
|
"last": "Celis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Chern Tan and L. Elisa Celis. 2019. Assessing Social and Intersectional Biases in Contextualized Word Representations. 33rd Annual Conference on Neural Information Processing Systems, NeurIPS 2019, 32.", |
|
"links": null |
|
}, |
|
"BIBREF86": { |
|
"ref_id": "b86", |
|
"title": "Learning sentiment-specific word embedding for Twitter sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1555--1565", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-1146" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Duyu Tang, Furu Wei, Nan Yang, Ming Zhou, Ting Liu, and Bing Qin. 2014. Learning sentiment-specific word embedding for Twitter sentiment classification. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1555-1565, Baltimore, Mary- land. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF87": { |
|
"ref_id": "b87", |
|
"title": "Exploring the Linear Subspace Hypothesis in Gender Bias Mitigation", |
|
"authors": [ |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Vargas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2902--2913", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.232" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francisco Vargas and Ryan Cotterell. 2020. Explor- ing the Linear Subspace Hypothesis in Gender Bias Mitigation. Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2902-2913.", |
|
"links": null |
|
}, |
|
"BIBREF88": { |
|
"ref_id": "b88", |
|
"title": "Introducing cad: the contextual abuse dataset", |
|
"authors": [ |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Margetts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patricia", |
|
"middle": [], |
|
"last": "Rossini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebekah", |
|
"middle": [], |
|
"last": "Tromble", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2289--2303", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bertie Vidgen, Dong Nguyen, Helen Margetts, Patri- cia Rossini, Rebekah Tromble, Kristina Toutanova, Anna Rumshisky, Luke Zettlemoyer, Dilek Hakkani- Tur, Iz Beltagy, et al. 2021. Introducing cad: the contextual abuse dataset. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2289-2303. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF89": { |
|
"ref_id": "b89", |
|
"title": "Are all good word vector spaces isomorphic? CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vulic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Vulic, Sebastian Ruder, and Anders S\u00f8gaard. 2020. Are all good word vector spaces isomorphic? CoRR, abs/2004.04070.", |
|
"links": null |
|
}, |
|
"BIBREF90": { |
|
"ref_id": "b90", |
|
"title": "Directional bias amplification", |
|
"authors": [ |
|
{ |
|
"first": "Angelina", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Russakovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angelina Wang and Olga Russakovsky. 2021. Direc- tional bias amplification. CoRR, abs/2102.12594.", |
|
"links": null |
|
}, |
|
"BIBREF91": { |
|
"ref_id": "b91", |
|
"title": "Double-Hard Debias: Tailoring Word Embeddings for Gender Bias Mitigation", |
|
"authors": [ |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Victoria Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazneen", |
|
"middle": [], |
|
"last": "Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5443--5453", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.484" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianlu Wang, Xi Victoria Lin, Nazneen Fatema Ra- jani, Bryan McCann, Vicente Ordonez, and Caim- ing Xiong. 2020. Double-Hard Debias: Tailoring Word Embeddings for Gender Bias Mitigation. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5443- 5453, Stroudsburg, PA, USA. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF92": { |
|
"ref_id": "b92", |
|
"title": "Factors influencing the surprising instability of word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Wendlandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Kummerfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/n18-1190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Wendlandt, Jonathan K. Kummerfeld, and Rada Mihalcea. 2018. Factors influencing the surprising instability of word embeddings. Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers).", |
|
"links": null |
|
}, |
|
"BIBREF93": { |
|
"ref_id": "b93", |
|
"title": "Guidelines for snowballing in systematic literature studies and a replication in software engineering", |
|
"authors": [ |
|
{ |
|
"first": "Claes", |
|
"middle": [], |
|
"last": "Wohlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 18th International Conference on Evaluation and Assessment in Software Engineering, EASE '14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2601248.2601268" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claes Wohlin. 2014. Guidelines for snowballing in sys- tematic literature studies and a replication in software engineering. In Proceedings of the 18th International Conference on Evaluation and Assessment in Soft- ware Engineering, EASE '14, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF94": { |
|
"ref_id": "b94", |
|
"title": "A Causal Inference Method for Reducing Gender Bias in Word Embedding Relations", |
|
"authors": [ |
|
{ |
|
"first": "Zekun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "34th AAAI Conference on Artificial Intelligence", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "9434--9441", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i05.6486" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zekun Yang and Juan Feng. 2019. A Causal Inference Method for Reducing Gender Bias in Word Embed- ding Relations. 34th AAAI Conference on Artificial Intelligence, AAAI 2020, 34(05):9434-9441.", |
|
"links": null |
|
}, |
|
"BIBREF95": { |
|
"ref_id": "b95", |
|
"title": "Robustness and reliability of gender bias assessment in word embeddings: The role of base pairs", |
|
"authors": [ |
|
{ |
|
"first": "Haiyang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alison", |
|
"middle": [], |
|
"last": "Sneyd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "759--769", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haiyang Zhang, Alison Sneyd, and Mark Stevenson. 2020. Robustness and reliability of gender bias as- sessment in word embeddings: The role of base pairs. In Proceedings of the 1st Conference of the Asia- Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Confer- ence on Natural Language Processing, pages 759- 769.", |
|
"links": null |
|
}, |
|
"BIBREF96": { |
|
"ref_id": "b96", |
|
"title": "Men also like shopping: Reducing gender bias amplification using corpus-level constraints", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Or- donez, and Kai-Wei Chang. 2017. Men also like shopping: Reducing gender bias amplification us- ing corpus-level constraints. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF97": { |
|
"ref_id": "b97", |
|
"title": "Learning Gender-Neutral Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yichao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4847--4853", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Yichao Zhou, Zeyu Li, Wei Wang, and Kai- Wei Chang. 2018. Learning Gender-Neutral Word Embeddings. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, EMNLP 2018, pages 4847-4853.", |
|
"links": null |
|
}, |
|
"BIBREF98": { |
|
"ref_id": "b98", |
|
"title": "Examining Gender Bias in Languages with Grammatical Gender", |
|
"authors": [ |
|
{ |
|
"first": "Pei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weijia", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuan-Hao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.48550/arxiv.1909.02224" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pei Zhou, Weijia Shi, Jieyu Zhao, Kuan-Hao Huang, Muhao Chen, Ryan Cotterell, and Kai-Wei Chang. 2019. Examining Gender Bias in Languages with Grammatical Gender. EMNLP-IJCNLP 2019 -2019", |
|
"links": null |
|
}, |
|
"BIBREF99": { |
|
"ref_id": "b99", |
|
"title": "Conference on Empirical Methods in Natural Language Processing and 9th International Joint Conference on Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5276--5284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Lan- guage Processing and 9th International Joint Confer- ence on Natural Language Processing, pages 5276- 5284.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "(a) Collected pre-trained static word embedding models. (b) Collected bias metrics." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Publications investigating fairness on pretrained static word embedding model" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "(a) WEAT scores for GloVe Twitter (b) SEMBIAS scores for GloVe Twitter (c) DIRECT BIAS and ECT scores for GloVe Twitter (d) WEAT scores for GloVe Wiki Gigaword (e) SEMBIAS scores for GloVe Wiki Gigaword (f) DIRECT BIAS and ECT scores for GloVe Wiki Gigaword Figure 2: Bias scores with respect to the vector length." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Keywords defined from the preliminary search." |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Repository search results." |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Pre-trained word embeddings learned on different sources provided by GloVe, word2vec, and fastText." |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Test</td><td>Target Sets</td><td>Attribute Sets</td></tr><tr><td>3</td><td>European American names vs African American names (5)</td><td>Pleasant vs Unpleasant (5)</td></tr><tr><td>4</td><td>European American names vs African American names (7)</td><td>Pleasant vs Unpleasant (5)</td></tr><tr><td>5</td><td>European American names vs African American names (7)</td><td>Pleasant vs Unpleasant (9)</td></tr><tr><td>6</td><td colspan=\"2\">Male names vs Female names Career vs Family</td></tr><tr><td>7</td><td>Math vs Arts</td><td>Male terms vs Female Terms</td></tr><tr><td>8</td><td>Science vs Arts</td><td>Male terms vs Female Terms</td></tr><tr><td>10</td><td>Young people's names vs Old people's names</td><td>Pleasant vs Unpleasant (9)</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Data Fields Used during Repository Search B WEAT Target and Attribute Sets" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |