|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:32:09.346549Z" |
|
}, |
|
"title": "Improving Hate Speech Type and Target Detection with Hateful Metaphor Features", |
|
"authors": [ |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lemmens", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp Lange", |
|
"location": { |
|
"addrLine": "Winkelstraat 40 2000", |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ilia", |
|
"middle": [], |
|
"last": "Markov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp Lange", |
|
"location": { |
|
"addrLine": "Winkelstraat 40 2000", |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Antwerp Lange", |
|
"location": { |
|
"addrLine": "Winkelstraat 40 2000", |
|
"settlement": "Antwerp", |
|
"country": "Belgium" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We study the usefulness of hateful metaphors as features for the identification of the type and target of hate speech in Dutch Facebook comments. For this purpose, all hateful metaphors in the Dutch LiLaH corpus were annotated and interpreted in line with Conceptual Metaphor Theory and Critical Metaphor Analysis. We provide SVM and BERT/RoBERTa results, and investigate the effect of different metaphor information encoding methods on hate speech type and target detection accuracy. The results of the conducted experiments show that hateful metaphor features improve model performance for the both tasks. To our knowledge, it is the first time that the effectiveness of hateful metaphors as an information source for hate speech classification is investigated.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We study the usefulness of hateful metaphors as features for the identification of the type and target of hate speech in Dutch Facebook comments. For this purpose, all hateful metaphors in the Dutch LiLaH corpus were annotated and interpreted in line with Conceptual Metaphor Theory and Critical Metaphor Analysis. We provide SVM and BERT/RoBERTa results, and investigate the effect of different metaphor information encoding methods on hate speech type and target detection accuracy. The results of the conducted experiments show that hateful metaphor features improve model performance for the both tasks. To our knowledge, it is the first time that the effectiveness of hateful metaphors as an information source for hate speech classification is investigated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In this paper, the usefulness of hateful metaphors used as features for detecting the type and target of Dutch online hate speech comments is investigated. Although both hate speech and metaphor detection have been researched widely (e.g., MacAvaney et al., 2019; Basile et al., 2019; Leong et al., 2018 Leong et al., , 2020 , and figurative language used in hateful content has been identified as one of the main challenges in (implicit) hate speech detection (MacAvaney et al., 2019; van Aken et al., 2018) , the question whether detecting (hateful) metaphors and using them as features improves hate speech detection models has remained unstudied in previous research. Therefore, it is the goal of the present paper to address this question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 263, |
|
"text": "MacAvaney et al., 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 284, |
|
"text": "Basile et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 303, |
|
"text": "Leong et al., 2018", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 324, |
|
"text": "Leong et al., , 2020", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 485, |
|
"text": "(MacAvaney et al., 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 486, |
|
"end": 508, |
|
"text": "van Aken et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to achieve this goal, we used the Dutch LiLaH 1 corpus which consists Facebook comments on online newspaper articles related to either migrants or the LGBT community. The comments were annotated for the type of hate speech and the target of hate speech, and for \"hateful metaphors\", 1 https://lilah.eu/ i.e., metaphors that express hate towards a specific target (e.g., \"het parlement is een circus!\"; the parliament is a circus). We investigate whether features based on these manual annotations can improve Natural Language Processing (NLP) models that predict the type (e.g., violence, offense) and target (e.g., migrants, LGBT, journalist) of hateful content. Our experimental setup is therefore different from the commonly-used one in the sense that we are focusing only on the fine-grained hate speech categories and not on classification of hateful and non-hateful content. We hypothesize that hateful metaphors contain valuable information for type and target classification, especially in cases of implicit hate speech, and can therefore improve classification accuracy when used as features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Prior to the classification experiments, a linguistic analysis of the annotated metaphors is conducted in the framework of Conceptual Metaphor Theory and Critical Metaphor Analysis. We would like to warn that for clarity of exposition, randomly chosen examples of hate speech from our corpus will be provided in this paper, and that some readers could find those offensive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hate speech detection Hate speech -frequently defined as a form of communication that disparages a person or a group on the basis of some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other (Nockleby, 2000) -has been extensively researched in the field of NLP. Pretrained language models such as Bidirectional Encoder Representations from Transformers (BERT) and Robustly Optimized BERT Pretraining Approach (RoBERTa) (Devlin et al., 2019; Liu et al., 2019) provide the best results for hate speech detection, including type and target classification (Basile et al., 2019; Zampieri et al., 2019b Zampieri et al., , 2020 , while shallow machine learning models (e.g., Support Vector Ma-chines (SVM)) can achieve a near state-of-the-art performance (MacAvaney et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 261, |
|
"text": "(Nockleby, 2000)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 494, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 512, |
|
"text": "Liu et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 627, |
|
"text": "(Basile et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 628, |
|
"end": 650, |
|
"text": "Zampieri et al., 2019b", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 674, |
|
"text": "Zampieri et al., , 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 802, |
|
"end": 826, |
|
"text": "(MacAvaney et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related research", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Examples of successful machine learning models include the winning teams of both subtasks A (binary hate speech detection) and B (binary target classification) of task 5 of SemEval 2019: multilingual detection of hate speech against women and immigrants on Twitter (Basile et al., 2019) . These teams all used SVM-based approaches for both languages provided (English and Spanish) with the exception of the winner of task B for Spanish, who used various other classifiers and combined them by means of majority voting. For English, the winning teams obtained an F1-score of 65% for task A and an EMR score of 57% for task B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 286, |
|
"text": "(Basile et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related research", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Examples of effective neural approaches can be found in OffensEval 2020 (Zampieri et al., 2020) . This shared task consisted of three subtasks: (A) offensive language identification, (B) categorization of offensive types and (C) target identification for multiple languages. For English, each of the top 10 teams for all three tasks used pretrained language models such as BERT and RoBERTa. The highest macro F1-scores obtained for task A, B, and C were 92%, 75% and 71%, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 95, |
|
"text": "(Zampieri et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related research", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In their hate speech detection survey, MacAvaney et al. (2019) highlight current challenges in hate speech detection. One of the main challenges mentioned is the use of figurative and implicit language such as sarcasm and metaphors, which can lead to classification errors, as evidenced by their experiments. An SVM classifier with TF-IDF weighted character n-gram features was used to perform hate speech detection on the Stormfront, TRAC, HatEval and HatebaseTwitter datasets (de Gibert et al., 2018; Kumar et al., 2018; Basile et al., 2019; Davidson et al., 2017) . An error analysis of the misclassified instances showed that sarcastic and metaphorical posts were the main causes of misclassifications, next to too little context (posts containing fewer than 6 tokens) and aggressive statements occurring in posts that were not annotated as \"hateful\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 478, |
|
"end": 502, |
|
"text": "(de Gibert et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 522, |
|
"text": "Kumar et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 543, |
|
"text": "Basile et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 566, |
|
"text": "Davidson et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similar findings were observed by van Aken et al. (2018) . An ensemble of machine learning and deep learning models was used for multi-class classification of toxic online comments and an error analysis of the incorrect predictions showed that metaphors can lead to classification errors because the models require significant world knowledge to process them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 56, |
|
"text": "(2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To address the problem of implicit language in hate speech, more recent studies have used datasets that distinguish between implicit and explicit hate speech, such as AbuseEval v1.0 (Caselli et al., 2020) . This dataset was created by annotating the OLID/OffensEval dataset (Zampieri et al., 2019a) for implicitness/explicitness. The authors of AbuseEval v1.0 provide results with BERT for binary classification (abusive, non-abusive) and multi-class classification (non-abusive, implicit abuse, explicit abuse) for the same train/test split and show that the binary classification task (71.6% macro F1-score) becomes substantially more complex when distinguishing between implicit and explicit abusive language (61.4% macro F1-score). Additionally, they show that the results for implicit hate speech detection (24% precision, 23% recall) are substantially lower than for explicit hate speech detection (64% precision, 51% recall).", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 204, |
|
"text": "(Caselli et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 298, |
|
"text": "(Zampieri et al., 2019a)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Metaphors The foundations of the state-of-theart way of thinking about metaphors is presented in \"Metaphors We Live By\" (Lakoff and Johnson, 1980) , in which metaphors are defined as utterances that describe a target concept in terms of a source concept that is semantically distinct from the target concept, this includes idiomatic expressions and dead metaphors such as \"the body of a paper\" and \"the foot of a mountain\". The authors argue that specific metaphorical expressions can be traced back to more abstract metaphor schemes that overarch similar metaphors. This is what they call \"Conceptual Metaphor Theory\" (CMT). Examples are utterances such as \"he attacked my arguments\" and \"I destroyed him during our discussion\" which can be traced back to the conceptual metaphor argument is war.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 146, |
|
"text": "(Lakoff and Johnson, 1980)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Charteris-Black (2004) , Critical Metaphor Analysis (CMA), an integration of various linguistic disciplines such as cognitive linguistics, corpus linguistics and discourse analysis, is applied to CMT. According to CMA, metaphors highlight certain aspects of the target concept while hiding other aspects. At the same time, they uncover the speaker's thought patterns and ideological views. Therefore, metaphors -this includes dead metaphors used subconsciously -provide insights into how a speaker or community perceives the target domain. In short, metaphors reveal speaker bias. This is particularly valuable in the present study, since the toxicity that is spread through hateful metaphors resides in the source domains, more precisely in the aspect of the source domain that is highlighted by the metaphor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 25, |
|
"text": "Charteris-Black (2004)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Metaphor detection in NLP Recent advances in NLP-related metaphor studies can be found in the 2020 VUA and TOEFL metaphor detection shared task (Leong et al., 2020) . The participating models showed substantial improvements compared to previous research, such as the 2018 VUA metaphor detection shared task (Leong et al., 2018) , due to the effectiveness of (pretrained) transformer and language models. More than half of the participants used BERT (or related) models and all participating teams obtained higher F1-scores on the VUA metaphor corpus than the best-performing approach that participated in the 2018 shared task (65.1% F1-score). Further, the 2020 winning model, which consists of transformer stacks with linguistic features such as part-of-speech (PoS) tags, outperformed its predecessor of 2018 by more than 10% (76.9% F1-score, Su et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "(Leong et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 327, |
|
"text": "(Leong et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 845, |
|
"end": 861, |
|
"text": "Su et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Contributions To our knowledge, we are the first to use hateful metaphor features for hate speech detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We provide SVM and BERT/RoBERTa results and show the impact of using hateful metaphors as features on predicting the type and target of hateful content. In addition, the qualitative analysis of the annotated metaphors provide insights into what linguistic strategies are used to convey hate towards specific target groups.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figurative and implicit language in hate speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Dutch LiLaH corpus consists of approximately 36,000 Facebook comments on online news articles related to migrants or the LGBT community mined from three popular Flemish newspaper pages (HLN, Het Nieuwsblad and VRT) 2 . The corpus, which has been used in several recent studies on hate speech detection in Dutch, e.g., (Markov et al., 2021; Ljube\u0161i\u0107 et al., 2020) , was annotated for the type and target of hateful comments following the same procedure and annotation guidelines as presented in (Ljube\u0161i\u0107 et al., 2019) , that is, with respect to the type of hate speech, the possible classes were violent speech and offensive speech (either triggered by the target's personal background, e.g., religion, gender, sexual orientation, nationality, etc., or on the basis of individual characteristics), inappropriate speech (without a specific target), and appropriate speech. The targets, on the other hand, were divided into migrants and the LGBT community, people related to either of these communities (e.g., people who support them), the journalist who wrote or medium that provided the article, another commenter, other targets and no target. The comments were labeled by two trained annotators (both Master's students and native speakers of Dutch) and the final labels were determined by a single expert annotator (PhD student and native speaker of Dutch).", |
|
"cite_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 343, |
|
"text": "(Markov et al., 2021;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 366, |
|
"text": "Ljube\u0161i\u0107 et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 521, |
|
"text": "(Ljube\u0161i\u0107 et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus description", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As mentioned, our analysis deviates from the more \"standard\" experimental setup in hate speech research, namely classifying comments into hate speech or non-hate speech. In contrast, we consider only the fine-grained hate speech categories, i.e., discarding the non-hate speech classes (i.e., \"inappropriate speech\" and \"appropriate speech\" for the type class; \"no target\" for the target class) and focusing only the type and target of hateful content. Additionally, the four hate speech type categories (violent-background, violent-other, offensive-background, offensive-other) were converted to binary classes (violent, offensive).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus description", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The statistics of the hate speech comments used for our metaphor analyses are shown in Table 1 . For the machine learning experiments, we selected a balanced subset in terms of the number of comments per class and the number of literal and non-literal comments per class (whenever possible). The statistics of the train/test partitions used for these machine learning experiments are shown in Table 2 . In the subsets used, Cohen's Kappa equals 0.46 for the target classes and 0.54 for the type classes, indicating a \"moderate\" agreement between the two annotators for both the type and target annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 95, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 401, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus description", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "All hateful metaphors in our corpus were annotated by the same expert annotator mentioned above. For this task, the definition of a metaphor presented in Lakoff and Johnson (1980) , described in Section 2, was adopted. More specifically, we define hateful metaphors as metaphorical utterances (including similes) that express hate towards a specific target, and therefore occur in hate speech comments, that are not used to refer to someone else's opinion or previous comments, and that are written in Dutch.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 179, |
|
"text": "Lakoff and Johnson (1980)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We found that 2,758 (14.7%) out of all 18,770 hateful comments in our corpus contain at least one hateful metaphor. In those comments, 282 were LGBT-related, whereas all other 2,476 nonliteral comments were related to migrants. In other words, 15.7% of all hate speech comments on LGBT-related news articles (1,797 in total) contain one or more hateful metaphor(s), whereas 14.6% of all hate speech comments on migrants-related news articles (16,973 in total) contain one or more hateful metaphor(s). See Table 1 for more finegrained information on (non-)literal comments per type/target.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 505, |
|
"end": 512, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A qualitative analysis showed that many similar metaphors occurred in the corpus (in line with CMT). Therefore, we manually determined the source domains of the metaphors in a bottom-up fashion. If only one variation of a metaphor occurred for a certain source domain, it was added to the category \"other\". A list of the source domains, the number of comments in our corpus that contain them, a Dutch example, and its English translation can be found below together with a linguistic analysis in line with CMT and CMA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Animals (646), e.g., \"migranten zijn bruine apen\" (migrants are brown apes) \u2022 Dirt and personal hygiene (529), e.g., \"de EU is een beerput\" (the EU is a cesspool) \u2022 Body parts (299), e.g., \"bij jouw geboorte hebben ze de baby weggegooid en de moederkoek gehouden\" (when you were born, they threw away the baby and kept the placenta) \u2022 Disease and illness (228), e.g., \"jij bent vergif\" (you're poison) \u2022 History (192), e.g., \"die minister is Hitler\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(that minister is Hitler) \u2022 Food (147), e.g., \"bootvluchtelingen zijn vissoep\" (boat refugees are fish soup) \u2022 Fiction (139), e.g., \"de Bijbel is een sprook-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "jesboek\" (the Bible is a collection of fairy tales) \u2022 Mental conditions (119), e.g., \"ik dacht dat het internetuurtje in het gekkenhuis al voorbij was\" (I thought that internet time in the madhouse was already over) \u2022 Products (107), e.g., \"migranten zijn importbelgen\" (migrants are imported Belgians) \u2022 Children (80), e.g., \"politici zijn kleuters\" (politicians are toddlers)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Carnival and circus (75), e.g., \"politici zijn clowns\" (politicians are clowns) \u2022 Home and kitchen linen (68), e.g., \"hoofddoeken zijn keukenhanddoeken\" (head scarfs are kitchen towls) \u2022 Sight (65), e.g., \"je draagt paardenkleppen\" (you're wearing horse blinkers) \u2022 Religious mythology (44), e.g., \"het paard van Troje is al binnen\" (the Trojan horse is already inside, referring to migrants) \u2022 Sand (24), e.g., \"die migranten moeten terug naar hun zandbak\" (those migrants should return to their sand boxes) \u2022 Tourism (19), e.g., \"oorlogsvluchtelingen zijn gewoon citytrippers\" (war refugees are just on a citytrip) \u2022 Machines 14, e.g., \"IS strijders zijn moordmachines\" (IS warriors are murder machines) \u2022 Physical conditions (7), e.g., \"trans-atleten zijn paralympi\u00ebrs\" (trans-athletes are paralympians) \u2022 Lottery (4), e.g., \"die migranten denken dat ze de Euromillions gewonnen hebben zeker?\" (those migrants must think that they've won Euromillions) \u2022 Other (349), e.g., \"migranten zijn geleide projectielen\" (migrants are guided missiles)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In our corpus, the source domains in metaphors that express hate towards migrants frequently refer to animals, especially pests (e.g., \"parasites\", \"cockroaches\") and primates (e.g. \"apes\"), commodities (e.g., \"import Belgians/criminality\") and food (e.g., \"rotten apples\", \"boat refugees are fish soup\"). These findings are in line with previous work on English and cross-lingual hate speech (Demjen and Hardaker, 2017; Dervinyt\u00e9, 2009) . Given the persuasive, ideological nature of metaphors (cf. CMA), the usage of these metaphors suggests that the speaker wishes for migrants and their \"species\" to be \"exterminated\", \"kept in the zoo\", \"returned to sender\", \"thrown in the bin\", and to stop \"breeding\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 393, |
|
"end": 420, |
|
"text": "(Demjen and Hardaker, 2017;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 437, |
|
"text": "Dervinyt\u00e9, 2009)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Conversely, the source domains that were found in hateful metaphors that target the LGBT community often refer to diseases, and mental and physical conditions. This indicates that the user of these metaphors believes that the LGBT community should be \"cured\", \"hospitalized\"or \"internalized\". Other hateful metaphors that target the LGBT community highlight aspects such as appearance and therefore refer to carnival or the circus, such as \"de Antwerp Pride is een carnavalsstoet\" (the Antwerp Pride is a carnival parade).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Journalists or newspapers, on the other hand, are often described as \"linkse\" (left-wing) or \"rechtse\" (right-wing) \"ratten\" (rats) that need to be \"uitgeroeid\" (exteriminated). Other metaphors often refer to dirt and personal hygiene such as \"strontgazet\" (literally \"excrement newspaper\"), \"rioolgazet\" (literally \"sewer newspaper\"), and \"riooljournalist\" (literally \"sewer journalist\") highlighting the quality of journalism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Other social media users and commenters are metaphorized in a variety of ways in our corpus, depending on the context and on what aspect the speaker wants to highlight. Examples are \"vuile hond\" (dirty dog), \"domme geit\" (stupid goat), \"schaap\" (sheep), \"mongool\" (person with Down syndrome), \"kleuters\" (toddlers), and \"middeleeuwers\" (people who live in the middle ages).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Finally, the \"other\" category is complex, due to its variety of target groups that it contains. Politicians, for example, are often metaphorized as leftwing or right-wing \"rats\", similar to how journalists, newspapers, other social media users, and the followers of those political parties are occasionally metaphorized as well. Further, religious institutions are often characterized as a circus or a hos-pital for the mentally ill, whereas religion itself is described as a fairytale or a disease.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hateful metaphor annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "An SVM model was established with Sklearn (version 0.23.1, Pedregosa et al., 2011 ) by using token 1-and 2-grams with TF-IDF weights fed into a linear SVM, henceforth referred to as \"SVM\". Grid search under 10-fold cross-validation was conducted to determine the optimal settings for the \"C\", \"loss\", and \"penalty\" parameters 3 . Then, the following methods were used to integrate the hateful metaphor features: Generic metaphor features which do not take into account the source domains of the metaphors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 81, |
|
"text": "Pedregosa et al., 2011", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 N tokens -the number of hateful metaphorical tokens was counted and appended to the feature vectors. \u2022 N expressions -the number of hateful metaphorical expressions was counted and appended to the feature vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Suffix -a suffix in the form of the placeholder 4 \"MET\" was added at the end of all hateful metaphorical tokens before vectorization, e.g., \"You're a pigMET.\" This way, the model distinguishes between a hateful, nonliteral token and the same token used literally and in a non-hateful way (e.g., \"That farmer bought a pig\"). \u2022 Tokens -the token \"MET\" was added after all metaphorical tokens before vetorization, e.g., \"You're a pig MET\". This allows the model to see similarities between a word form used literally and the same word form used figuratively, yet distinguish between them because of the placeholder that follows. \u2022 Tags -all subsequent metaphorical tokens were enclosed in tags, such as in \"You're a MET dirty pig MET\". This method allows the model to focus on the on-and offset tokens of the metaphorical expressions. \u2022 All features -the combination of all feature sets described above. For example, this encoding method would transform the utterance \"migrants are a Trojan Horse\" into \"migrants are a MET trojanMET MET horseMET MET\" and append the numerical features (\"2\" and \"1\" in this case) to its feature vector after vectorization to represent the number of hateful metaphorical tokens and expressions in the text, respectively. Source domain metaphor features Since the source domains of the hateful metaphors could contain useful information for the predictions of the type and target of hate speech, because they highlight certain aspects of the target domain and reflect the way that the speaker perceives it (as described in Section 2), all methods described above were also used to encode hateful metaphor information while considering the source domains of the metaphors. More specifically, when using intext metaphor information encoding methods, the \"MET\" placeholder was replaced with the first three characters of the names of the source domain of the metaphor (e.g., \"ANI\" for animal, \"HIS\" for history, etc.). For the numerical features, on the other hand, 20-dimensional vectors were used to count the number of metaphorical tokens/expressions in each comment (each dimension representing one of the 20 source domains Table 4 : 10-fold cross-validation and test set performances (%) on the type prediction task with generic metaphor features (best results in bold).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2154, |
|
"end": 2161, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "that were observed in the linguistic analysis of the metaphors).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Predictions for both tasks were made with BERTje and RobBERT (de Vries et al., 2019; Delobelle et al., 2020; the Dutch versions of BERT and RobBERTa) using HuggingFace 4.0.0 (Wolf et al., 2020) . In an attempt to improve these models, the \"tags\" method described above was used, but with the \"<met>\" (onset) and \"</met>\" (offset) placeholders for generic features and the same more fine-grained placeholders as described above when using source domain features. This tagging method is frequently used to highlight textual features or external knowledge in sequence-to-sequence tasks such as machine translation and named entity recognition (e.g., Chatterjee et al., 2017; Li et al., 2018) . Four epochs were used for training and all other parameters were set to default. The experiments were conducted five times with different seeds and we report the median of these runs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 84, |
|
"text": "RobBERT (de Vries et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 108, |
|
"text": "Delobelle et al., 2020;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 109, |
|
"text": "", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 194, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 648, |
|
"end": 672, |
|
"text": "Chatterjee et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 689, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERTje and RobBERT", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The 10-fold cross-validation and test results of the SVM model 5 , BERTje and RobBERT without additional features, with generic features or with source domain features for both tasks can be found in Table 3 , 4, 5 and 6, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 206, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "No additional features Without using additional features, it can be observed that BERTje performed best for both the target and type prediction tasks, closely followed by RobBERT and finally the SVM classifier. It can also be observed that target prediction accuracy is substantially lower than type prediction accuracy for all the models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Generic features Regarding the SVM model, all proposed feature implementation methods improved the performance of the SVM classifier, with the exceptions of the token labels and number of metaphorical expressions for the target prediction task, and the suffix labels for the type prediction task. The best SVM-based approach for target predictions used the combination of all features, which showed a 2.4% F1-score improvement over the SVM classifier without additional features. For the type prediction task, the number of hateful metaphorical tokens used as feature improved the SVM baseline by 4.6% F1-score. Further, the performance of both BERTje and RobBERT improved by 1.9% when adding metaphor features to the text data for the type prediction task. Adding these labels before training on the target prediction task, however, did not improve the performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Source domain features With respect to the SVM approach, all feature implementation methods improved its performance for both the type and target prediction tasks, with the exception of the suffix features used for the type prediction task. Amongst the different types of source domain features, both numerical features (number of metaphorical tokens and number of metaphorical expressions) improved the SVM approach the most for type predictions (4% in F1-score). Conversely, adding the source domains after all hateful metaphors as tokens improved target prediction with SVM the most (1.6% in F1-score). On 5 The optimal SVM parameter settings for the target prediction task were {\"C\": 1, \"loss\": \"squared_hinge\", \"penalty\": \"l2\"} and {\"C\": 0.5, \"loss\": \"hinge\", \"penalty\": \"l2\", \"class_weight\": \"balanced\"} for the type prediction task. Table 6 : 10-fold cross-validation and test set performances (%) on the type prediction task with source domain metaphor features (best results in bold).", |
|
"cite_spans": [ |
|
{ |
|
"start": 609, |
|
"end": 610, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 840, |
|
"end": 847, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "the other hand, the performance of the language models could only be improved marginally: when adding in-text features before training RobBERT on the type prediction task, its performance increased by 0.1% in F1-score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CV", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Overall Substantial improvements up to 4.6% and 2.4% could be observed in the type and target classification tasks, respectively. These results indicate that hateful metaphor features contribute to type and target classification of hate speech comments in the current experimental setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CV", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, individual instances that were classified correctly only after adding hateful metaphor features are discussed. We focus on two comparisons, namely between the model that showed the highest increase in performance after adding metaphor information and the same model without additional features (per task). For the target prediction task, these are SVM and SVM to which all generic features have been added. For the type prediction task, on the other hand, these are the baseline SVM classifier and the SVM classifier enriched with numerical features based on the number of hateful metaphorical tokens (regardless of their source domains). The confusion matrices of these models are provided in Figures 1, 2, 3 and 4, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 711, |
|
"end": 726, |
|
"text": "Figures 1, 2, 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Target prediction task For this task, it can be observed that the additional features improved the classification accuracy for all classes. The only exception was the \"journalist/medium\" class, which is the most accurately predicted class using the SVM baseline and is predicted equally accurately when using additional features. On a deeper level, we observed that 52.8% of all instances in the target prediction task that were classified correctly only after adding metaphor features to the SVM baseline contained at least one hateful metaphor. These metaphors were often implicit cases of hate speech, such as \"nep Belgen\" (fake Belgians), \"soortgenoten\" (conspecifics), and \"die leven nog in de middeleeuwen\" (they still live in the Middle Ages). Still, we also found less subtle hateful metaphors, e.g., \"strontvretende kakkerlakken\" (shit eating cockroaches).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Type prediction task As evidenced by Figures 3 and 4, adding hateful metaphor features to the SVM model drastically decreases the number of cases where violent comments are confused with offensive comments, while retaining high classification accuracy for the \"offensive\" class. More specifically, 36.4% of all instances that were classified correctly only after adding hateful metaphor features contained at least one hateful metaphor. Similar to the improvements in the target prediction task, these metaphors were often implicit forms of hate speech, such as \"op [ANONIEM]'s gezicht kan je pannenkoeken bakken\" (\"you could cook pancakes on [ANONYMOUS]'s face\") and afschaffen da klubke (abolish that little club, referring to the Catholic Church).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In this paper, we investigated the usefulness of hateful metaphors as predictive features for two less studied hate speech detection subtasks (namely type and target prediction) and analyzed the annotated hateful metaphors in our corpus in line with Conceptual Metaphor Theory and Critical Metaphor Analysis. LGBT\", 2=\"related to migrants/LGBT\", 3=\"journalist/medium\", 4=\"commenter\", 5=\"other\"). Figure 2 : Confusion matrix for the target classification SVM enriched with all generic features (1=\"migrants/LGBT\", 2=\"related to migrants/LGBT\", 3=\"journalist/medium\", 4=\"commenter\", 5=\"other\"). Performances of SVM, BERTje and RobBERT were provided for both type and target prediction tasks and these models were then enriched with the hateful metaphor features in various ways to show their usefulness. The results show that the target SVM baseline improved by 2.4%. Conversely, BERTje and RobBERT could not be improved with additional features for this task. Regarding the type prediction task, an improvement up to 4.6% was observed for the SVM baseline, whereas the already high-performing BERTje and RobBERT baselines improved by 1.9% F1-score each. From the qualitative analysis that was conducted, it was observed that these improvements contained a large number of implicit forms of hate speech, which is considered to be one of the main challenges of hate speech detection at the moment.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 404, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This paper is a starting point for further research into the new area of (hateful) metaphors as predictive features for the hate speech classification tasks. Further research may include investigating whether the same results achieved with an upperbound baseline in this paper (provided by our manually annotated features) can also be obtained when using labels predicted by models that have been trained to detect hateful metaphors. Other future research directions could include investigating more feature encoding methods and conducting ablation studies when combining multiple ways to encode hateful metaphors. In addition, it was observed that the SVM model cen be improved more strongly than BERTje and RobBERT, which suggest that the latter models already contain metaphorical information due to pretraining. Whether this is indeed the case is yet another subject worth investigating in future studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://www.facebook.com/hln.be; https://www.facebook.com/nieuwsblad.be; https://www.facebook.com/vrtnws", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since the classes are not distributed equally in the subset used for type classification, the \"class weight\" parameter was also optimized in the type prediction task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to ensure that the placeholders were not confused with actual text, all text was lowercased and all placeholders were uppercased before training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported by an IOF SEP project of the University of Antwerp. It also received funding from the Flemish Government (AI Research Program) and the Flemish Research Foundation through the bilateral research project FWO G070619N \"The linguistic landscape of hate speech on social media\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SemEval-2019 task 5: Multilingual detection of hate speech against immigrants and women in Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristina", |
|
"middle": [], |
|
"last": "Bosco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elisabetta", |
|
"middle": [], |
|
"last": "Fersini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debora", |
|
"middle": [], |
|
"last": "Nozza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viviana", |
|
"middle": [], |
|
"last": "Patti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco Manuel Rangel", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuela", |
|
"middle": [], |
|
"last": "Sanguinetti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--63", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S19-2007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Debora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, and Manuela San- guinetti. 2019. SemEval-2019 task 5: Multilin- gual detection of hate speech against immigrants and women in Twitter. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 54-63, Minneapolis (MN), USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "2020. I feel offended, don't be abusive! Implicit/explicit messages in offensive and abusive language", |
|
"authors": [ |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jelena", |
|
"middle": [], |
|
"last": "Mitrovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inga", |
|
"middle": [], |
|
"last": "Kartoziya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Granitzer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6193--6202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommaso Caselli, Valerio Basile, Jelena Mitrovi\u0107, Inga Kartoziya, and Michael Granitzer. 2020. I feel of- fended, don't be abusive! Implicit/explicit messages in offensive and abusive language. In Proceedings of the 12th Language Resources and Evaluation Con- ference, pages 6193-6202, Marseille, France. Euro- pean Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Corpus approaches to critical metaphor analysis", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Charteris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Charteris-Black. 2004. Corpus approaches to critical metaphor analysis. Palgrave Macmillan.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Guiding neural machine translation decoding with external knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Chatterjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Blain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "157--168", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-4716" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajen Chatterjee, Matteo Negri, Marco Turchi, Mar- cello Federico, Lucia Specia, and Fr\u00e9d\u00e9ric Blain. 2017. Guiding neural machine translation decoding with external knowledge. In Proceedings of the Sec- ond Conference on Machine Translation, pages 157- 168, Copenhagen, Denmark. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael W. Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. CoRR, abs/1703.04009.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hate speech dataset from a white supremacy forum", |
|
"authors": [ |
|
{ |
|
"first": "Ona", |
|
"middle": [], |
|
"last": "De Gibert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naiara", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5102" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ona de Gibert, Naiara Perez, Aitor Garc\u00eda-Pablos, and Montse Cuadros. 2018. Hate speech dataset from a white supremacy forum. In Proceedings of the 2nd Workshop on Abusive Language Online (ALW2), pages 11-20, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERTje: A Dutch BERT Model", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Wietse De Vries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malvina", |
|
"middle": [], |
|
"last": "Gertjan Van Noord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1912.09582" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wietse de Vries, Andreas van Cranenburgh, Arianna Bisazza, Tommaso Caselli, Gertjan van Noord, and Malvina Nissim. 2019. BERTje: A Dutch BERT Model. arXiv:1912.09582.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "RobBERT: a dutch roBERTa-based language model", |
|
"authors": [ |
|
{ |
|
"first": "Pieter", |
|
"middle": [], |
|
"last": "Delobelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Winters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bettina", |
|
"middle": [], |
|
"last": "Berendt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pieter Delobelle, Thomas Winters, and Bettina Berendt. 2020. RobBERT: a dutch roBERTa-based language model.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Metaphor, impoliteness, and offence in online communication", |
|
"authors": [ |
|
{ |
|
"first": "Zsofia", |
|
"middle": [], |
|
"last": "Demjen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Hardaker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "The Routledge Handbook of Metaphor and Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--367", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zsofia Demjen and Claire Hardaker. 2017. Metaphor, impoliteness, and offence in online communication. In The Routledge Handbook of Metaphor and Lan- guage, pages 353-367. Routledge.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Conceptual emigration and immigration metaphors in the language of the press: a contrastive analysis", |
|
"authors": [ |
|
{ |
|
"first": "Inga", |
|
"middle": [], |
|
"last": "Dervinyt\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Studies about languages", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "49--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Inga Dervinyt\u00e9. 2009. Conceptual emigration and im- migration metaphors in the language of the press: a contrastive analysis. Studies about languages, 14:49-55.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis (MN), USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Benchmarking aggression identification in social media", |
|
"authors": [ |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atul", |
|
"middle": [], |
|
"last": "Kr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Ojha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ritesh Kumar, Atul Kr. Ojha, Shervin Malmasi, and Marcos Zampieri. 2018. Benchmarking aggression identification in social media. In Proceedings of the First Workshop on Trolling, Aggression and Cyber- bullying (TRAC-2018), pages 1-11, Santa Fe (NM), USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Metaphors We Live By", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Lakoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Lakoff and Mark Johnson. 1980. Metaphors We Live By. University of Chicago Press, Chicago (IL) USA.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A report on the 2020 VUA and TOEFL metaphor detection shared task", |
|
"authors": [ |
|
{ |
|
"first": "Chee", |
|
"middle": [], |
|
"last": "Wee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "Ben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ")", |
|
"middle": [], |
|
"last": "Leong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beata", |
|
"middle": [ |
|
"Beigman" |
|
], |
|
"last": "Klebanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Hamill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Egon", |
|
"middle": [], |
|
"last": "Stemle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rutuja", |
|
"middle": [], |
|
"last": "Ubale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xianyang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Figurative Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "18--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chee Wee (Ben) Leong, Beata Beigman Klebanov, Chris Hamill, Egon Stemle, Rutuja Ubale, and Xi- anyang Chen. 2020. A report on the 2020 VUA and TOEFL metaphor detection shared task. In Pro- ceedings of the Second Workshop on Figurative Lan- guage Processing, pages 18-29, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A report on the 2018 VUA metaphor detection shared task", |
|
"authors": [ |
|
{ |
|
"first": "Chee", |
|
"middle": [], |
|
"last": "Wee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "Ben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ")", |
|
"middle": [], |
|
"last": "Leong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beata", |
|
"middle": [ |
|
"Beigman" |
|
], |
|
"last": "Klebanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Shutova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Workshop on Figurative Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--66", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-0907" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chee Wee (Ben) Leong, Beata Beigman Klebanov, and Ekaterina Shutova. 2018. A report on the 2018 VUA metaphor detection shared task. In Proceedings of the Workshop on Figurative Language Processing, pages 56-66, New Orleans (LA), USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Named-entity tagging and domain adaptation for better customized translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhongwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ai", |
|
"middle": [ |
|
"Ti" |
|
], |
|
"last": "Aw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eng", |
|
"middle": [], |
|
"last": "Siong Chng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Named Entities Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--46", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2407" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhongwei Li, Xuancong Wang, Ai Ti Aw, Eng Siong Chng, and Haizhou Li. 2018. Named-entity tagging and domain adaptation for better customized transla- tion. In Proceedings of the Seventh Named Entities Workshop, pages 41-46, Melbourne, Australia. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The FRENK datasets of socially unacceptable discourse in slovene and english", |
|
"authors": [ |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darja", |
|
"middle": [], |
|
"last": "Fi\u0161er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toma\u017e", |
|
"middle": [], |
|
"last": "Erjavec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Text, Speech, and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola Ljube\u0161i\u0107, Darja Fi\u0161er, and Toma\u017e Erjavec. 2019. The FRENK datasets of socially unacceptable dis- course in slovene and english. In Text, Speech, and Dialogue, pages 103-114, Cham. Springer Interna- tional Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The LiLaH emotion lexicon of Croatian, Dutch and Slovene", |
|
"authors": [ |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilia", |
|
"middle": [], |
|
"last": "Markov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darja", |
|
"middle": [], |
|
"last": "Fi\u0161er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "153--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola Ljube\u0161i\u0107, Ilia Markov, Darja Fi\u0161er, and Walter Daelemans. 2020. The LiLaH emotion lexicon of Croatian, Dutch and Slovene. In Proceedings of the Third Workshop on Computational Modeling of Peo- ple's Opinions, Personality, and Emotion's in Social Media, pages 153-157, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Hate speech detection: Challenges and solutions", |
|
"authors": [ |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Macavaney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hao-Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katina", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ophir", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Frieder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "PloS one", |
|
"volume": "14", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0221152" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sean MacAvaney, Hao-Ren Yao, Eugene Yang, Katina Russell, Nazli Goharian, and Ophir Frieder. 2019. Hate speech detection: Challenges and solutions. PloS one, 14(8):e0221152.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Exploring stylometric and emotion-based features for multilingual crossdomain hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Ilia", |
|
"middle": [], |
|
"last": "Markov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darja", |
|
"middle": [], |
|
"last": "Fi\u0161er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Eleventh Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilia Markov, Nikola Ljube\u0161i\u0107, Darja Fi\u0161er, and Walter Daelemans. 2021. Exploring stylometric and emotion-based features for multilingual cross- domain hate speech detection. In Proceedings of the Eleventh Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 149-159, Kyiv, Ukraine (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Hate speech", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nockleby", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Encyclopedia of the American Constitution", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1277--1279", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John T. Nockleby. 2000. Hate speech. In Encyclope- dia of the American Constitution, pages 1277-1279. Macmillan, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deep-Met: A reading comprehension paradigm for tokenlevel metaphor detection", |
|
"authors": [ |
|
{ |
|
"first": "Chuandong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fumiyo", |
|
"middle": [], |
|
"last": "Fukumoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoxi", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiyi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rongbo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiqun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Figurative Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--39", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.figlang-1.4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuandong Su, Fumiyo Fukumoto, Xiaoxi Huang, Jiyi Li, Rongbo Wang, and Zhiqun Chen. 2020. Deep- Met: A reading comprehension paradigm for token- level metaphor detection. In Proceedings of the Sec- ond Workshop on Figurative Language Processing, pages 30-39, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Challenges for toxic comment classification: An in-depth error analysis", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Betty Van Aken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "L\u00f6ser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Betty van Aken, Julian Risch, Ralf Krestel, and Alexan- der L\u00f6ser. 2018. Challenges for toxic comment clas- sification: An in-depth error analysis.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lhoest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language pro- cessing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Predicting the type and target of offensive posts in social media", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019a. Predicting the type and target of offensive posts in social media.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Semeval-2019 task 6: Identifying and categorizing offensive language in social media (offenseval)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019b. Semeval-2019 task 6: Identifying and cat- egorizing offensive language in social media (offen- seval).", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Zeses Pitenis, and \u00c7agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pepa", |
|
"middle": [], |
|
"last": "Atanasova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Karadzhov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Confusion matrix for the target classification SVM baseline (1=\"migrants/", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Confusion matrix for the type classification SVM baseline (1=\"violence\", 0=\"offensive\").", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Confusion matrix for the type classification SVM enriched with generic n tokens feature (1=\"violence\", 0=\"offensive\").", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td/><td/><td>Training set</td><td/><td/><td>Test set</td><td/><td/></tr><tr><td>Task</td><td>Class</td><td colspan=\"7\">Literal Non-literal Both Literal Non-literal Both Total</td></tr><tr><td/><td>Violence</td><td>311</td><td>63</td><td>374</td><td>83</td><td>17</td><td>100</td><td>474</td></tr><tr><td>Type</td><td>Offensive</td><td>1,000</td><td>1,000</td><td>2,000</td><td>250</td><td>250</td><td>500</td><td>2,500</td></tr><tr><td/><td>All</td><td>1,311</td><td>1,063</td><td>2,374</td><td>333</td><td>267</td><td>600</td><td>2,974</td></tr><tr><td/><td>Migrants/LGBT</td><td>200</td><td>200</td><td>400</td><td>50</td><td>50</td><td>100</td><td>500</td></tr><tr><td/><td>Related</td><td>333</td><td>67</td><td>400</td><td>83</td><td>17</td><td>100</td><td>500</td></tr><tr><td>Target</td><td>Journalist/medium Commenter</td><td>328 200</td><td>72 200</td><td>400 400</td><td>82 50</td><td>18 50</td><td>100 100</td><td>500 500</td></tr><tr><td/><td>Other</td><td>200</td><td>200</td><td>400</td><td>50</td><td>50</td><td>100</td><td>500</td></tr><tr><td/><td>All</td><td>1,261</td><td>739</td><td>2,000</td><td>315</td><td>185</td><td>500</td><td>2,500</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "Statistics of all hateful comments in our corpus, including the number of hateful comments per type/target class, and the number of literal and non-literal comments (in total and per class)." |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td colspan=\"6\">: 10-fold cross-validation and test set perfor-</td></tr><tr><td colspan=\"6\">mances (%) on the target prediction task with generic</td></tr><tr><td colspan=\"5\">metaphor features (best results in bold).</td></tr><tr><td/><td>CV</td><td/><td/><td>Test set</td></tr><tr><td>Approach</td><td>F</td><td colspan=\"2\">Std Pre</td><td>Rec</td><td>F</td></tr><tr><td>SVM</td><td colspan=\"5\">71.5 3.5 68.8 79.9 72.3</td></tr><tr><td>+n tokens</td><td colspan=\"5\">73.8 3.2 74.0 81.6 76.9</td></tr><tr><td colspan=\"6\">+n expressions 74.1 3.2 74.2 80.4 76.7</td></tr><tr><td>+suffix</td><td colspan=\"5\">71.3 2.9 68.5 80.9 72.2</td></tr><tr><td>+tokens</td><td colspan=\"5\">73.4 3.4 71.0 82.4 74.8</td></tr><tr><td>+tags</td><td colspan=\"5\">73.1 3.1 71.2 81.0 74.6</td></tr><tr><td>+all</td><td colspan=\"5\">73.6 3.2 73.8 80.6 76.5</td></tr><tr><td>BERTje</td><td>-</td><td>-</td><td colspan=\"3\">80.2 78.5 79.3</td></tr><tr><td>+tags</td><td>-</td><td>-</td><td colspan=\"3\">82.7 80.0 81.2</td></tr><tr><td>RobBERT</td><td>-</td><td>-</td><td colspan=\"3\">81.1 74.8 77.4</td></tr><tr><td>+tags</td><td>-</td><td>-</td><td colspan=\"3\">82.0 77.2 79.3</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td colspan=\"6\">: 10-fold cross-validation and test set perfor-</td></tr><tr><td colspan=\"6\">mances (%) on the target prediction task with source</td></tr><tr><td colspan=\"6\">domain metaphor features (best results in bold).</td></tr><tr><td/><td>CV</td><td/><td/><td>Test set</td></tr><tr><td>Approach</td><td>F</td><td colspan=\"2\">Std Pre</td><td>Rec</td><td>F</td></tr><tr><td>SVM</td><td colspan=\"5\">71.5 3.5 68.8 79.9 72.3</td></tr><tr><td>+n tokens</td><td colspan=\"5\">74.3 4.2 73.7 80.1 76.3</td></tr><tr><td colspan=\"6\">+n expressions 74.0 3.1 73.7 80.1 76.3</td></tr><tr><td>+suffix</td><td colspan=\"5\">71.0 3.3 68.4 80.2 72.0</td></tr><tr><td>+tokens</td><td colspan=\"5\">72.9 3.6 69.7 82.9 73.7</td></tr><tr><td>+tags</td><td colspan=\"5\">73.0 3.9 70.9 81.8 74.5</td></tr><tr><td>+all</td><td colspan=\"5\">73.3 4.1 74.3 77.2 75.6</td></tr><tr><td>BERTje</td><td>-</td><td>-</td><td colspan=\"3\">80.2 78.5 79.3</td></tr><tr><td>+tags</td><td>-</td><td>-</td><td colspan=\"3\">81.6 77.1 79.0</td></tr><tr><td>RobBERT</td><td>-</td><td>-</td><td colspan=\"3\">81.1 74.8 77.4</td></tr><tr><td>+tags</td><td>-</td><td>-</td><td colspan=\"3\">79.8 75.8 77.5</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |