|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:35:24.916928Z" |
|
}, |
|
"title": "Context-sensitive evaluation of automatic speech recognition: considering user experience & language variation", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Markl", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "nina.markl@ed.ac.uk" |
|
}, |
|
{ |
|
"first": "Catherine", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Centre for Speech Technology Research Linguistics and English Language", |
|
"institution": "The University of Edinburgh", |
|
"location": {} |
|
}, |
|
"email": "c.lai@ed.ac.uk" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Commercial Automatic Speech Recognition (ASR) systems tend to show systemic predictive bias for marginalised speaker/user groups. We highlight the need for an interdisciplinary and context-sensitive approach to documenting this bias incorporating perspectives and methods from sociolinguistics, speech & language technology and human-computer interaction in the context of a case study. We argue evaluation of ASR systems should be disaggregated by speaker group, include qualitative error analysis, and consider user experience in a broader sociolinguistic and social context.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Commercial Automatic Speech Recognition (ASR) systems tend to show systemic predictive bias for marginalised speaker/user groups. We highlight the need for an interdisciplinary and context-sensitive approach to documenting this bias incorporating perspectives and methods from sociolinguistics, speech & language technology and human-computer interaction in the context of a case study. We argue evaluation of ASR systems should be disaggregated by speaker group, include qualitative error analysis, and consider user experience in a broader sociolinguistic and social context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic Speech Recognition (ASR) has become a common tool in human-computer interaction, enabling, for example, voice user interfaces and (imperfect) automatic captioning of multimedia content. As with other language technologies (e.g. Sap et al., 2019; Blodgett and O'Connor, 2017) , rapid improvements in performance have not been equal for different user groups. As Blodgett et al. (2020) show, discussions of this \"bias\" are often poorly defined, not grounded in explicit normative judgments and divorced from socio-historical contexts, origins and harms of the system behaviours. In this paper, we argue that researchers at the intersection of speech and language technologies (SLT), humancomputer interaction (HCI), and sociolinguistics are well-placed to consider the experiences and social context of different speaker/user groups in critical quantitative and qualitative evaluations of ASR systems. Knowledge about language variation and its relation to society coupled with expertise from HCI allows us to understand how predictive biases reflect larger social structures and ideologies about language, and how they affect users.", |
|
"cite_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 255, |
|
"text": "Sap et al., 2019;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 256, |
|
"end": 284, |
|
"text": "Blodgett and O'Connor, 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 393, |
|
"text": "Blodgett et al. (2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "After presenting prior work on language variation and ASR, a case study of self-recorded au-dio diaries collected for the Lothian Diary Project 1 highlights the need for a context-sensitive approach to ASR evaluation which we outline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Blodgett et al. (2020)'s critique notwithstanding, predictive bias, defined here as error and outcome disparities for different user groups (Shah et al., 2020) , has become a research focus in SLT and other machine learning fields as applications are extended to high-stakes contexts such as hiring, policing and banking where they have been shown to (re)produce structural inequalities (see e.g. Benjamin, 2019). Predictive bias also appears to be prevalent in commercial ASR systems for English 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 159, |
|
"text": "(Shah et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language variation, bias and ASR", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recent work describes stark racial bias in commercial American English ASR systems (including Google's Cloud Speech) (Koenecke et al., 2020) , with much higher word error rates (WER) 3 for speakers of African American English (AAE) than white speakers of (Californian) American English. Notably, these types of error disparities appear to be driven by under-representation of AAE training data both for the acoustic modelling (Koenecke et al., 2020) and the language model used to decode sequences of phones into utterances (Martin and Tang, 2020) . \"Regional\" variation has also been reported as a source of unequal performance, with particularly high error rates reported on YouTube's captions for speakers from Scotland and (the US state) Georgia (Tatman, 2017) . Similar to more recent work, YouTube captions have been found to perform worse for African American speakers (Tatman and Kasten, 2017) . These problems are not limited to proprietary systems, as Mozilla's open source system DeepSpeech performs significantly worse for speakers of Indian English than \"American English\" 4 (Meyer et al., 2020) , and also fails to transcribe AAE morpho-syntactic variation correctly (Martin and Tang, 2020) . While some early research has suggested ASR performance differences based on (binary) speaker gender (Adda-Decker and Lamel, 2005; Benzeghiba et al., 2007; Tatman, 2017) , it is unclear that gender by itself is a significant factor in recent systems (Tatman and Kasten, 2017; Meyer et al., 2020) . Koenecke et al. (2020) suggest that the interaction of gender and race is significant, with differences between Black men and Black women being more significant than between white men and white women or men and women across race 5 . These results appear to be linked to speaker's speech styles (e.g. in Adda-Decker and Lamel, 2005 ) and use of dialect features (Koenecke et al., 2020) , both of which have long been documented to pattern with gender (see Labov, 1990 , for a classic paper) and could be correlated with gender in training and test sets. Other work in this space has focused on the potential of ASR to improve accessibility of audio media and digital technologies, looking at experiences of Deaf and hard of hearing users (Glasser, 2019) and dysarthric speakers (De Russis and Corno, 2019; Young and Mihailidis, 2010). For both groups commercial ASR systems perform quite poorly, though the severity and amount of errors varies by speaker. Research on predictive bias in commercial ASR for regional varieties of English beyond the United States and in the context of systems not exclusively trained on American English, as well as experiences of second language learners of English, and other groups who are potentially particularly reliant on ASR to access computing technologies such as elderly people, is sparse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 140, |
|
"text": "(Koenecke et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 449, |
|
"text": "(Koenecke et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 547, |
|
"text": "(Martin and Tang, 2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 764, |
|
"text": "(Tatman, 2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 901, |
|
"text": "(Tatman and Kasten, 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1088, |
|
"end": 1108, |
|
"text": "(Meyer et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1181, |
|
"end": 1204, |
|
"text": "(Martin and Tang, 2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1308, |
|
"end": 1337, |
|
"text": "(Adda-Decker and Lamel, 2005;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1338, |
|
"end": 1362, |
|
"text": "Benzeghiba et al., 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1363, |
|
"end": 1376, |
|
"text": "Tatman, 2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1457, |
|
"end": 1482, |
|
"text": "(Tatman and Kasten, 2017;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1483, |
|
"end": 1502, |
|
"text": "Meyer et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1505, |
|
"end": 1527, |
|
"text": "Koenecke et al. (2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1808, |
|
"end": 1835, |
|
"text": "Adda-Decker and Lamel, 2005", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1866, |
|
"end": 1889, |
|
"text": "(Koenecke et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1960, |
|
"end": 1971, |
|
"text": "Labov, 1990", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 2242, |
|
"end": 2257, |
|
"text": "(Glasser, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language variation, bias and ASR", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "From a linguistic perspective, no language variety or speech style is inherently more difficult, incorrect, or inappropriate than any other. There are, however, powerful ideologies regarding the relative status of different varieties and styles which are rooted in broader socio-historical contexts and reflect the social status of the groups who speak them (Woolard and Schieffelin, 1994) . In addition to being stigmatised in \"traditional\" contexts of power in society, varieties spoken by marginalised communities appear to be (not coincidentally) underrepresented in the data we use to build and evaluate speech technologies, leading to substantial predictive biases making speech technologies less accessible to already marginalised groups.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 389, |
|
"text": "(Woolard and Schieffelin, 1994)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language variation, bias and ASR", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Lothian Diary project is an ongoing interdisciplinary research project inviting residents of the Lothians region of Scotland to contribute selfrecorded audio and video diaries about their experiences of the COVID-19 pandemic. The more than 120 diaries collected so far are highly variable in recording quality, number of speakers and topics discussed, and participants are diverse 6 in terms of age, gender, linguistic background, ethnicity, socio-economic class and level of education. Edinburgh and the surrounding Lothians region are of particular interest for sociolinguistic research because of the capital region's status as a centre for higher education, finance and tourism. In addition to the variation within Scottish English 7 between different areas and different socio-economic groups within the city, there is also a wide range of other first and second language varieties of English, as well as other languages. The Lothian Diary project also includes many of these other varieties of English, rather than focusing on speakers with long residential histories in a particular area (as is often the case in sociolinguistic work) or first language speakers (as is usually the case in SLT evaluation). The recordings form a highly naturalistic and exceptionally varied data set. ASR is used here to facilitate social science research which requires accurate and complete transcriptions (achieved through manual correction).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lothian Diaries: A case study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "So far, 13 diaries submitted by participants who agreed to have them made public, have been processed with the Google Cloud Speech-to-Text API 8 (GC STT). Diaries (16 kHz FLAC files) were processed in their entirety using the model used for long audio files which uses asynchronous speech recognition. WER was computed separately for each speaker using sclite 9 . In the following section, we present a brief qualitative error analysis. WER for individual speakers varies dramatically (see Table 1 ). Some of these errors appear to be related to accent differences. For example, Scottish speakers' pronunciations of I or I've are frequently mistranscribed as ah or of and other accent-based errors include: cat [kaP] > car, living > leaving, hating our > heating are. However, there is also significant variation within each accent group. GC STT fails to transcribe filled pauses (uh, um) and word fragments and occasionally deletes false starts and repetitions. Furthermore, errors appear to be more prevalent in the vicinity of hesitations and repetitions. As a result speakers who produce more hesitations and repetitions tend to have higher error rates, while people who appear to read from prepared notes tend to be more fluent and have lower error rates. The highest WER in this sample derives from a recording by a Scottish English speaker who produces many false starts, word fragments and a number of Scots words (which the system likely would not recognise under any circumstances). Words are also often substituted by a wrong (but often grammatically appropriate) inflectional form (e.g. past tense > present tense).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 490, |
|
"end": 497, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lothian Diaries: A case study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "All of these errors are particularly challenging for the accurate and complete transcription of spontaneous and conversational speech, especially for social science research where researchers (users) might consider hesitations, false starts and filled pauses important as they convey pragmatic information. Considering impacts of this predictive bias, transcripts of speakers who produce more \"fluent\" speech are much more easily interpretable. Retrieving speech content and speech style of less fluent speakers as well as some second language speakers, on the other hand, requires more labour and time, potentially negating any benefits of ASR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lothian Diaries: A case study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To document predictive bias in ASR in a way that is mindful of 1) user experience, 2) socio-historical and (socio)linguistic context, 3) (potential) harms (re)produced by the system, and 4) technical aspects of ASR, we need to draw on methodologies and knowledge from HCI, sociolinguistics, research on fairness in AI, and SLT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "ASR systems are usually evaluated in terms of their WER, for one or more unseen test sets (often including well-established benchmark sets). As seen in the case study above, word error rates vary strongly across individual recordings and speakers, and (benchmark) test sets (e.g. Barker et al., 2017) are becoming increasingly naturalistic and (potentially) diverse; a recent state-of-the-art system by Google (Chiu et al., 2018 ) was trained and tested on \"representative\" data drawn from Google's voicesearch traffic. However, even assuming that the test sets are representative of the developer's users, it is 1) not clear that the intended or current user base is reflective of all use cases or potential users (especially if the system is sold to third parties as with GC STT), and 2) possible or even likely that significant variation in performance between user groups is hidden by reporting an average across all tested recordings. Importantly, as Black feminist scholarship has pointed out, multiple demographic axes linked to interlocking structures of oppression (e.g. race and gender) cannot be considered separately (Crenshaw, 1991) . It is thus important that in addition to disaggregating by language variety to also consider, for example, gender to create an \"intersectional\" benchmark (see also Costanza-Chock, 2020) . This approach has been successful in highlighting disproportionate predictive bias for particular subgroups in other ML domains (e.g. darkerskinned women in facial analysis: Buolamwini and Gebru, 2018; Raji and Buolamwini, 2019) , and SLT (Jiang and Fellbaum, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 300, |
|
"text": "Barker et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 428, |
|
"text": "(Chiu et al., 2018", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1129, |
|
"end": 1145, |
|
"text": "(Crenshaw, 1991)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1312, |
|
"end": 1333, |
|
"text": "Costanza-Chock, 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1510, |
|
"end": 1537, |
|
"text": "Buolamwini and Gebru, 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1538, |
|
"end": 1564, |
|
"text": "Raji and Buolamwini, 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1575, |
|
"end": 1601, |
|
"text": "(Jiang and Fellbaum, 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intersectional benchmarks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To apply an intersectional benchmark to a larger sample of the Lothian Diaries, we intend to match short audio snippets with the same reference transcript produced by different speaker groups to isolate pronunciation effects, and look systematically at potential differences in content and speech style (following Koenecke et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "Koenecke et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intersectional benchmarks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Intersectional benchmarks alone are not enough however, as WER does not account for the context or effect of an error. Understanding the context of errors is useful since errors are both more likely to occur and to be severe in particular phonetic, prosodic and lexical contexts. Like us (though working with a very different system and data), Goldwater et al. (2010) find that words before or after hesitations, repetitions and word fragments, turn-initial words and infrequent words are more likely to be misrecognised and that erroneous substitutions are often different forms of the same lexeme (e.g. ask/asked). While some of these errors can be easily disambiguated through context, others (e.g. can/can't) could be quite disruptive to communication. Word errors can also lead to domino effects, where one wrongly decoded word feeds into further erroneous predictions (Martin and Tang, 2020) . While metrics which are more sensitive to the type and context of the error or directly model human evaluations have been proposed (Nanjo and Kawahara, 2005; Morris et al., 2004; Mishra et al., 2011; Kafle and Huenerfauth, 2020) they are not widely adopted and extensive qualitative error analysis is rare. A context-sensitive approach would be particularly interested in the type of error and its effect given the linguistic context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 344, |
|
"end": 367, |
|
"text": "Goldwater et al. (2010)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 897, |
|
"text": "(Martin and Tang, 2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1031, |
|
"end": 1057, |
|
"text": "(Nanjo and Kawahara, 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1058, |
|
"end": 1078, |
|
"text": "Morris et al., 2004;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1079, |
|
"end": 1099, |
|
"text": "Mishra et al., 2011;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1100, |
|
"end": 1128, |
|
"text": "Kafle and Huenerfauth, 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative error analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Evaluations of SLT systems rarely reflect explicitly on how users interact with them 10 . However, because both (perceived) severity and impact as well as prevalence of errors depends on recording and task, understanding how people use ASR-based technologies in their daily life is important. Future work concerning predictive bias in ASR would benefit from incorporating HCI methodologies like interviews, ethnography and qualitative surveys to gain a deeper understanding of users' experiences. So far, researchers in HCI have been particularly interested in how people interact with voice user interfaces (e.g. Porcheron et al., 2018; Luger and Sellen, 2016) , though little attention has been paid to the role of accent and dialect. Furthermore, especially given the context of the recent shift to increased remote work and education, applications of cloud-based speech recognition for personal or business use extend beyond voice user interfaces to automatic captioning of audio and video lectures and meetings. Domain-general and naturalistic recordings of continuous spontaneous speech pose a particular challenge to ASR systems, and insights into what types of errors users perceive to be particularly disruptive and common depending on their linguistic and demographic background should inform development and evaluation of ASR systems. For example, in the context of the Lothian Diary Project the goal of ASR is to produce transcriptions which can be used by linguists and other social science researchers to analyse both what participants are saying and how they are saying it. Every aspect of their speech, including disfluencies and repetitions as well as specific lexical choices (e.g. past tense vs present tense) are relevant to this analysis and should as such be preserved in a transcript. Furthermore, because most speech in this context is largely unplanned, higher error rates around disfluent or informal speech are particularly disruptive. When applying the proposed methodology to other use cases (e.g. automatic captioning of video lectures or business meetings) interviews with stakeholders can clarify what types of errors are particularly disruptive.", |
|
"cite_spans": [ |
|
{ |
|
"start": 614, |
|
"end": 637, |
|
"text": "Porcheron et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 661, |
|
"text": "Luger and Sellen, 2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User experience", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Considering the broader societal context in which an ASR system is developed and implemented allows us to identify the specific harms it could inflict on users and (sometimes at least) see the underlying societal structures giving rise to predictive bias. Identifying risk and causes in turn allows us to mitigate harms (and, in future systems, bias). In the case of commercial ASR (in English), research suggests that predictive bias is a result of under-representation of varieties of marginalised speaker groups in proprietary training and test sets. For many open source and licensed corpora used to train and benchmark ASR systems, incomplete documentation makes it difficult to es-timate representation; the commonly used Switchboard (Godfrey and Holliman, 1993) and TIMIT corpora (Garofolo et al., 1993 ) (both US English) and Mozilla's recent open-source Common Voice corpus 11 , for example, do not record speaker race. The speaker characteristics of training sets depends on the broader societal context. For example, use of commercial speech recognition (e.g. in the case of Google's system) and participation in scientific studies (e.g. the licensed corpora) or crowd-source tasks (e.g. Mozilla Common Voice) differs across demographic groups (for example based on income and education). Imbalanced corpora are also tied to ideologies around whose ways of speaking are considered \"legitimate\", \"correct\" or \"native\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 740, |
|
"end": 768, |
|
"text": "(Godfrey and Holliman, 1993)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 809, |
|
"text": "(Garofolo et al., 1993", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Considering context and impacts", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Some of the more obvious specific harms of predictive bias include difficulties using voice user interfaces, which for some users are crucial assistive technology. As ASR spreads into high-stakes contexts such as hiring, substantial harms could be incurred if systems perform worse for already marginalised groups, effectively encoding \"accentism\" and linguistic prejudice in automatic systems. Even assuming no prediction bias across different speaker groups, the use of ASR in automatic analysis of video interviews to recommend or rank applicants (e.g. HireVue 12 ) risks real harm in the case of even small recognition errors and potentially entrenches existing language ideologies around \"professional\", \"fluent\" or \"competent\" speech patterns. For example, HireNet (Hemamou et al., 2019) extracts information about prosody and speech fluency to predict \"hireability\" (as annotated by recruiters). Other harms include less usable automatic captions and potential downstream effects as described in our case study.", |
|
"cite_spans": [ |
|
{ |
|
"start": 771, |
|
"end": 793, |
|
"text": "(Hemamou et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Considering context and impacts", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We have proposed an approach to ASR evaluation which considers the experiences of different user/speaker groups, sociolinguistic context and potential impacts of predictive bias. We argue that this interdisciplinary approach is necessary to significantly advance our understanding of ASR usability. We particularly invite perspectives from the fields of human-computer interaction in order evaluate speech and language technologies as systems situated in specific sociolinguistic and socio-technical contexts which perform specific tasks for specific (language) users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://lothianlockdown.org/ 2 The focus here is on English, but predictive bias is likely to affect stigmatised and unstandardised varieties vis-a-vis standardised varieties of other languages too.3 WER is an edit-distance measure capturing the number of deletions, substitutions and insertions required per word to match a reference transcript.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Meyer et al. (2020)/Mozilla do not specify speaker race or region within the US.5 A finding which echoes work in other ML domains and other areas of SLT highlighting the way that multiple demographic axes linked to interacting structures of oppression (e.g. gender and race) cannot be considered separately(Buolamwini and Gebru, 2018;Jiang and Fellbaum, 2020)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "though not representative of the Scottish population 7 \"Scottish English\" is used here as a broad term including the continuum between Scots and Scottish Standard English (seeStuart-Smith, 2004) 8 https://cloud.google.com/ speech-to-text", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/usnistgov/SCTK", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Though intended use is sometimes implicit in the choice of training and test data: e.g. Google's use of voice search data(Chiu et al., 2018)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "available here: https://commonvoice.mozilla. org/en/datasets 12 https://www.hirevue.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by the UKRI Centre for Doctoral Training in Natural Language Processing, funded by the UKRI (grant EP/S022481/1) and the University of Edinburgh, School of Informatics and School of Philosophy, Psychology & Language Sciences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Do speech recognizers prefer female speakers?", |
|
"authors": [ |
|
{ |
|
"first": "Martine", |
|
"middle": [], |
|
"last": "Adda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Decker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lori", |
|
"middle": [], |
|
"last": "Lamel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "9th European Conference on Speech Communication and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2205--2208", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martine Adda-Decker and Lori Lamel. 2005. Do speech recognizers prefer female speakers? 9th Eu- ropean Conference on Speech Communication and Technology, (January 2005):2205-2208.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The CHiME Challenges: Robust Speech Recognition in Everyday Environments", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Barker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricard", |
|
"middle": [], |
|
"last": "Marxer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanuel", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinji", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "327--344", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-64680-0_14" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon P. Barker, Ricard Marxer, Emmanuel Vincent, and Shinji Watanabe. 2017. The CHiME Challenges: Robust Speech Recognition in Everyday Environ- ments, pages 327-344. Springer International Pub- lishing, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Race after technology : abolitionist tools for the New Jim Code", |
|
"authors": [ |
|
{ |
|
"first": "Ruha", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruha Benjamin. 2019. Race after technology : abo- litionist tools for the New Jim Code. Polity Press, Newark.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automatic speech recognition and speech variability: A review", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Benzeghiba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"De" |
|
], |
|
"last": "Mori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Deroo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Dupont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Erbes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jouvet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Fissore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Laface", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mertins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Ris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Tyagi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Wellekens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Speech Communication", |
|
"volume": "49", |
|
"issue": "", |
|
"pages": "763--786", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2007.02.006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Benzeghiba, R. De Mori, O. Deroo, S. Dupont, T. Erbes, D. Jouvet, L. Fissore, P. Laface, A. Mertins, C. Ris, R. Rose, V. Tyagi, and C. Wellekens. 2007. Automatic speech recognition and speech variability: A review. Speech Communication, 49(10-11):763- 786.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Language (technology) is power: A critical survey of \"bias\" in NLP", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5454--5476", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.485" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Solon Barocas, Hal Daum\u00e9 III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in NLP. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5454- 5476, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Racial disparity in natural language processing: A case study of social media african-american english", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan O'", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett and Brendan O'Connor. 2017. Racial disparity in natural language processing: A case study of social media african-american english.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Gender shades: Intersectional accuracy disparities in commercial gender classification", |
|
"authors": [ |
|
{ |
|
"first": "Joy", |
|
"middle": [], |
|
"last": "Buolamwini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 1st Conference on Fairness, Accountability and Transparency", |
|
"volume": "81", |
|
"issue": "", |
|
"pages": "77--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joy Buolamwini and Timnit Gebru. 2018. Gender shades: Intersectional accuracy disparities in com- mercial gender classification. In Proceedings of the 1st Conference on Fairness, Accountability and Transparency, volume 81 of Proceedings of Ma- chine Learning Research, pages 77-91, New York, NY, USA. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "State-of-the-art speech recognition with sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Sainath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Prabhavalkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kannan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gonina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Chorowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bacchiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4774--4778", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2018.8462105" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Chiu, T. N. Sainath, Y. Wu, R. Prabhavalkar, P. Nguyen, Z. Chen, A. Kannan, R. J. Weiss, K. Rao, E. Gonina, N. Jaitly, B. Li, J. Chorowski, and M. Bacchiani. 2018. State-of-the-art speech recognition with sequence-to-sequence models. In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4774-4778.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Design values: Hardcoding liberation? In Design Justice", |
|
"authors": [ |
|
{ |
|
"first": "Sasha", |
|
"middle": [], |
|
"last": "Costanza-Chock", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sasha Costanza-Chock. 2020. Design values: Hard- coding liberation? In Design Justice. MIT Press. Https://design-justice.pubpub.org/pub/3h2zq86d.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Mapping the Margins: Intersectionality, Identity Politics, and Violence against Women of Color", |
|
"authors": [ |
|
{ |
|
"first": "Kimberle", |
|
"middle": [], |
|
"last": "Crenshaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Stanford Law Review", |
|
"volume": "43", |
|
"issue": "6", |
|
"pages": "1241--1299", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/1229039" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kimberle Crenshaw. 1991. Mapping the Margins: Intersectionality, Identity Politics, and Violence against Women of Color. Stanford Law Review, 43(6):1241-1299.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "On the impact of dysarthric speech on contemporary ASR cloud platforms", |
|
"authors": [ |
|
{ |
|
"first": "Luigi", |
|
"middle": [], |
|
"last": "De Russis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fulvio", |
|
"middle": [], |
|
"last": "Corno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of Reliable Intelligent Environments", |
|
"volume": "5", |
|
"issue": "3", |
|
"pages": "163--172", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s40860-019-00085-y" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luigi De Russis and Fulvio Corno. 2019. On the impact of dysarthric speech on contemporary ASR cloud platforms. Journal of Reliable Intelligent En- vironments, 5(3):163-172.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "TIMIT Acoustic-Phonetic Continuous Speech Corpus LDC93S1", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Garofolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lori", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Lamel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Fiscus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Pallett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Dahlgren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.35111/17gk-bn40" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John S. Garofolo, Lori F. Lamel, William M. Fisher, Jonathan G. Fiscus, David S. Pallett, Nancy L. Dahlgren, and Victor Zue. 1993. TIMIT Acoustic- Phonetic Continuous Speech Corpus LDC93S1.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Automatic speech recognition services: Deaf and hard-of-hearing usability", |
|
"authors": [ |
|
{ |
|
"first": "Abraham", |
|
"middle": [], |
|
"last": "Glasser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3290607.3308461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abraham Glasser. 2019. Automatic speech recognition services: Deaf and hard-of-hearing usability. In Ex- tended Abstracts of the 2019 CHI Conference on Hu- man Factors in Computing Systems, page 1-6, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Switchboard-1 Release 2 LDC97S62", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Godfrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Holliman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.35111/sw3h-rw02" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John J. Godfrey and Edward Holliman. 1993. Switchboard-1 Release 2 LDC97S62.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Which words are hard to recognize? prosodic, lexical, and disfluency factors that increase speech recognition error rates", |
|
"authors": [ |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Speech Communication", |
|
"volume": "52", |
|
"issue": "3", |
|
"pages": "181--200", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2009.10.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sharon Goldwater, Dan Jurafsky, and Christopher D. Manning. 2010. Which words are hard to recognize? prosodic, lexical, and disfluency factors that increase speech recognition error rates. Speech Communica- tion, 52(3):181 -200.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Hirenet: A hierarchical attention model for the automatic analysis of asynchronous video job interviews", |
|
"authors": [ |
|
{ |
|
"first": "L\u00e9o", |
|
"middle": [], |
|
"last": "Hemamou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ghazi", |
|
"middle": [], |
|
"last": "Felhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vandenbussche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Claude", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Clavel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "573--581", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.3301573" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L\u00e9o Hemamou, Ghazi Felhi, Vincent Vandenbuss- che, Jean-Claude Martin, and Chlo\u00e9 Clavel. 2019. Hirenet: A hierarchical attention model for the auto- matic analysis of asynchronous video job interviews. Proceedings of the AAAI Conference on Artificial In- telligence, 33(01):573-581.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Interdependencies of gender and race in contextualized word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "May", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "May Jiang and Christiane Fellbaum. 2020. Interdepen- dencies of gender and race in contextualized word embeddings. In Proceedings of the Second Work- shop on Gender Bias in Natural Language Process- ing, pages 17-25, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Usability evaluation of captions for people who are deaf or hard of hearing", |
|
"authors": [ |
|
{ |
|
"first": "Sushant", |
|
"middle": [], |
|
"last": "Kafle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACM SIGACCESS Accessibility and Computing", |
|
"volume": "", |
|
"issue": "122", |
|
"pages": "1--1", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3386410.3386411" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sushant Kafle and Matt Huenerfauth. 2020. Usabil- ity evaluation of captions for people who are deaf or hard of hearing. ACM SIGACCESS Accessibility and Computing, (122):1-1.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Racial disparities in automated speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Allison", |
|
"middle": [], |
|
"last": "Koenecke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Nam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Lake", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Nudell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minnie", |
|
"middle": [], |
|
"last": "Quartey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zion", |
|
"middle": [], |
|
"last": "Mengesha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Connor", |
|
"middle": [], |
|
"last": "Toups", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Rickford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharad", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "117", |
|
"issue": "14", |
|
"pages": "7684--7689", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1073/pnas.1915768117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allison Koenecke, Andrew Nam, Emily Lake, Joe Nudell, Minnie Quartey, Zion Mengesha, Connor Toups, John R. Rickford, Dan Jurafsky, and Sharad Goel. 2020. Racial disparities in automated speech recognition. Proceedings of the National Academy of Sciences, 117(14):7684-7689.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The intersection of sex and social class in the course of linguistic change. Language Variation and", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Labov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Change", |
|
"volume": "2", |
|
"issue": "2", |
|
"pages": "205--254", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/S0954394500000338" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Labov. 1990. The intersection of sex and so- cial class in the course of linguistic change. Lan- guage Variation and Change, 2(2):205-254.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "like having a really bad pa\": The gulf between user expectation and experience of conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Ewa", |
|
"middle": [], |
|
"last": "Luger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "Sellen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5286--5297", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2858036.2858288" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ewa Luger and Abigail Sellen. 2016. \"like having a really bad pa\": The gulf between user expectation and experience of conversational agents. In Proceed- ings of the 2016 CHI Conference on Human Factors in Computing Systems, page 5286-5297, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Understanding Racial Disparities in Automatic Speech Recognition: The Case of Habitual \"be", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. Interspeech 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "626--630", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2020-2893" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua L. Martin and Kevin Tang. 2020. Understand- ing Racial Disparities in Automatic Speech Recog- nition: The Case of Habitual \"be\". In Proc. Inter- speech 2020, pages 626-630.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Artie bias corpus: An open dataset for detecting demographic bias in speech applications", |
|
"authors": [ |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lindy", |
|
"middle": [], |
|
"last": "Rauchenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Eisenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Howell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6462--6468", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Josh Meyer, Lindy Rauchenstein, Joshua D. Eisen- berg, and Nicholas Howell. 2020. Artie bias corpus: An open dataset for detecting demographic bias in speech applications. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 6462-6468, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Predicting Human Perceived Accuracy of ASR Systems", |
|
"authors": [ |
|
{ |
|
"first": "Taniya", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Ljolje", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mazin", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "INTERSPEECH-2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1945--1948", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taniya Mishra, Andrej Ljolje, Mazin Gilbert, Park Avenue, and Florham Park. 2011. Predicting Hu- man Perceived Accuracy of ASR Systems. In INTERSPEECH-2011, August, pages 1945-1948.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "From WER and RIL to MER and WIL : improved evaluation measures for connected speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viktoria", |
|
"middle": [], |
|
"last": "Morris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Maier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "INTERSPEECH-2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2765--2768", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew C Morris, Viktoria Maier, and Phil Green. 2004. From WER and RIL to MER and WIL : improved evaluation measures for connected speech recognition. In INTERSPEECH-2004, pages 2765- 2768.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A new asr evaluation measure and minimum bayes-risk decoding for open-domain speech understanding", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Nanjo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2005.1415298" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Nanjo and T. Kawahara. 2005. A new asr eval- uation measure and minimum bayes-risk decoding for open-domain speech understanding. In Proceed- ings. (ICASSP '05). IEEE International Conference on Acoustics, Speech, and Signal Processing, 2005., volume 1, pages I/1053-I/1056 Vol. 1.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Voice Interfaces in Everyday Life", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Porcheron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Fischer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Reeves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Sharples", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Association for Computing Machinery", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3173574.3174214" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Porcheron, Joel E. Fischer, Stuart Reeves, and Sarah Sharples. 2018. Voice Interfaces in Everyday Life, page 1-12. Association for Computing Machin- ery, New York, NY, USA.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Actionable Auditing", |
|
"authors": [ |
|
{ |
|
"first": "Deborah", |
|
"middle": [], |
|
"last": "Inioluwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joy", |
|
"middle": [], |
|
"last": "Raji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buolamwini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "AIES '19: Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "429--435", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3306618.3314244" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Inioluwa Deborah Raji and Joy Buolamwini. 2019. Ac- tionable Auditing. In AIES '19: Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and So- ciety, pages 429-435.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The risk of racial bias in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saadia", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1668--1678", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1163" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A. Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1668-1678, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Predictive biases in natural language processing models: A conceptual framework and overview", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Deven Santosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"Andrew" |
|
], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5248--5264", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.468" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deven Santosh Shah, H. Andrew Schwartz, and Dirk Hovy. 2020. Predictive biases in natural language processing models: A conceptual framework and overview. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 5248-5264, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Scottish English", |
|
"authors": [ |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Stuart-Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "A Handbook of Varieties of English", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jane Stuart-Smith. 2004. Scottish English. In Bernd Kortmann, Kate Burridge, Rajend Mesthrie, Edgar W. Schneider, and Clive Upton, editors, A Handbook of Varieties of English, pages 47-67.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Gender and dialect bias in YouTube's automatic captions", |
|
"authors": [ |
|
{ |
|
"first": "Rachael", |
|
"middle": [], |
|
"last": "Tatman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First ACL Workshop on Ethics in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "53--59", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-1606" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachael Tatman. 2017. Gender and dialect bias in YouTube's automatic captions. In Proceedings of the First ACL Workshop on Ethics in Natural Lan- guage Processing, pages 53-59, Valencia, Spain. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Effects of talker dialect, gender race on accuracy of bing speech and youtube automatic captions", |
|
"authors": [ |
|
{ |
|
"first": "Rachael", |
|
"middle": [], |
|
"last": "Tatman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Conner", |
|
"middle": [], |
|
"last": "Kasten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. Interspeech 2017", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "934--938", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2017-1746" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachael Tatman and Conner Kasten. 2017. Effects of talker dialect, gender race on accuracy of bing speech and youtube automatic captions. In Proc. In- terspeech 2017, pages 934-938.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Language ideology", |
|
"authors": [ |
|
{ |
|
"first": "Kathryn", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Woolard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bambi", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Schieffelin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Annual Review of Anthropology", |
|
"volume": "23", |
|
"issue": "", |
|
"pages": "55--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathryn A. Woolard and Bambi B. Schieffelin. 1994. Language ideology. Annual Review of Anthropol- ogy, 23:55-82.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Difficulties in automatic speech recognition of dysarthric speakers and implications for speech-based applications used by the elderly: A literature review", |
|
"authors": [ |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Mihailidis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Assistive Technology", |
|
"volume": "22", |
|
"issue": "2", |
|
"pages": "99--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1080/10400435.2010.483646" |
|
], |
|
"PMID": [ |
|
"20698428" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victoria Young and Alex Mihailidis. 2010. Difficulties in automatic speech recognition of dysarthric speak- ers and implications for speech-based applications used by the elderly: A literature review. Assistive Technology, 22(2):99-112. PMID: 20698428.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"text": "Word Error Rates for different participants vary widely both across and within groups (lower is better).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |