{ "paper_id": "2020", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T15:19:02.566606Z" }, "title": "MLEngineer at SemEval-2020 Task 7: BERT-Flair based Humor Detection Model (BFHumor)", "authors": [ { "first": "Farah", "middle": [], "last": "Shatnawi", "suffix": "", "affiliation": { "laboratory": "", "institution": "Jordan University of Science and Technology Irbid", "location": { "country": "Jordan" } }, "email": "ffshatnawi16@cit.just.edu.jo" }, { "first": "Mahmoud", "middle": [], "last": "Hammad", "suffix": "", "affiliation": { "laboratory": "", "institution": "Jordan University of Science and Technology Irbid", "location": { "country": "Jordan" } }, "email": "m-hammad@just.edu.jo" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Task 7, Assessing the Funniness of Edited News Headlines, in the International Workshop SemEval2020 introduces two sub-tasks to predict the funniness values of edited news headlines from the Reddit website. This paper proposes the BFHumor model of the MLEngineer team that participates in both sub-tasks in this competition. The BFHumor's model is defined as a BERT-Flair based humor detection model that is a combination of different pre-trained models with various Natural Language Processing (NLP) techniques. The Bidirectional Encoder Representations from Transformers (BERT) regressor is considered the primary pre-trained model in our approach, whereas Flair is the main NLP library. It is worth mentioning that the BFHumor model has been ranked 4 th in sub-task1 with a root mean square error (RMSE) value of 0.51966, and it is 0.02 away from the first ranked model. Also, the team is ranked 12 th in the sub-task2 with an accuracy of 0.62291, which is 0.05 away from the top-ranked model. Our results indicate that the BFHumor model is one of the top models for detecting humor in the text.", "pdf_parse": { "paper_id": "2020", "_pdf_hash": "", "abstract": [ { "text": "Task 7, Assessing the Funniness of Edited News Headlines, in the International Workshop SemEval2020 introduces two sub-tasks to predict the funniness values of edited news headlines from the Reddit website. This paper proposes the BFHumor model of the MLEngineer team that participates in both sub-tasks in this competition. The BFHumor's model is defined as a BERT-Flair based humor detection model that is a combination of different pre-trained models with various Natural Language Processing (NLP) techniques. The Bidirectional Encoder Representations from Transformers (BERT) regressor is considered the primary pre-trained model in our approach, whereas Flair is the main NLP library. It is worth mentioning that the BFHumor model has been ranked 4 th in sub-task1 with a root mean square error (RMSE) value of 0.51966, and it is 0.02 away from the first ranked model. Also, the team is ranked 12 th in the sub-task2 with an accuracy of 0.62291, which is 0.05 away from the top-ranked model. Our results indicate that the BFHumor model is one of the top models for detecting humor in the text.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Humor is a distinctive quality of humans, which is considered a figurative language; where the person can express his feelings through social media once entertainment media have existed or if he faces a funny event. Researchers found that humor leads to enhance the human's health and mood (Hern\u00e1ndez-Far\u00edas et al., 2015; Mao and Liu, 2019; Reyes et al., 2012; Yan and Pedersen, 2017) .", "cite_spans": [ { "start": 290, "end": 321, "text": "(Hern\u00e1ndez-Far\u00edas et al., 2015;", "ref_id": "BIBREF13" }, { "start": 322, "end": 340, "text": "Mao and Liu, 2019;", "ref_id": "BIBREF16" }, { "start": 341, "end": 360, "text": "Reyes et al., 2012;", "ref_id": "BIBREF22" }, { "start": 361, "end": 384, "text": "Yan and Pedersen, 2017)", "ref_id": "BIBREF26" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In the last decades, the world has witnessed a rapid move in Artificial Intelligent (AI) field toward simulating human intelligence in machines or computer systems (Wen et al., 2019; Raja and S, 2019) . One of the main branches of AI is Natural Language Processing (NLP), where the computer learns how to understand human languages regardless the diversity and complexity of human languages that are considered open challenges for NLP systems and communities (Abdullah and Shaikh, 2018; Zhou et al., 2020) . Knowing that humor has many facets and can be produced through words (texts), gestures (vision), and prosodic cues (acoustic) (Hasan et al., 2019; , humor detection is considered a multimodal language. The reasons behind the difficulty of detecting humor are due to (1) the nature of the context, emotion, and rhythm, which contains a large degree of complexity for the machine to understand the level of the humor. (2) the cultural differences, knowing that humor is universal, the different cultures see the humor in various ways (Martin et al., 1993) .", "cite_spans": [ { "start": 164, "end": 182, "text": "(Wen et al., 2019;", "ref_id": "BIBREF24" }, { "start": 183, "end": 200, "text": "Raja and S, 2019)", "ref_id": "BIBREF21" }, { "start": 459, "end": 486, "text": "(Abdullah and Shaikh, 2018;", "ref_id": "BIBREF0" }, { "start": 487, "end": 505, "text": "Zhou et al., 2020)", "ref_id": "BIBREF28" }, { "start": 634, "end": 654, "text": "(Hasan et al., 2019;", "ref_id": "BIBREF12" }, { "start": 1040, "end": 1061, "text": "(Martin et al., 1993)", "ref_id": "BIBREF17" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Detecting humor from social media sites or TV dialogues using a set of algorithms had been studied thoroughly in the NLP field (Glazkova et al., 2019; Bertero and Fung, 2016) . In this paper, we detect the humor through news headlines from the Reddit website 1 that are provided by SemEval-2020 organizers (Hossain et al., 2020) . The International Workshop on Semantic Evaluation SemEval-2020 has introduced several NLP shared tasks. Task 7, Assessing the Funniness of Edited News Headlines, aims to motivate participants to build systems to predict the level of funniness in edited news headlines. The task asked participants to replace a word in the headline using a micro-edit and check if this modification makes the news headlines more funnier or not. The task consists of two subtasks (sub-task1 and sub-task2). In sub-task1, the participants should predict the funniness of the edited headline based on RMSE value. While sub-task2 is intended to predict which edited headline is more funnier based on the accuracy value. Further clarifications about task 7 and the dataset in section 3. Our team, MLEngineer, proposed a BFHumor model for predicting the funniness value of the edited news headlines in both subtasks. Our model is an ensembling model of different state-of-the-art pre-trained models with various NLP techniques. The MLEngineer has been ranked 4 th in sub-task1 and 12 th in sub-task2 out of 84 teams.", "cite_spans": [ { "start": 127, "end": 150, "text": "(Glazkova et al., 2019;", "ref_id": "BIBREF11" }, { "start": 151, "end": 174, "text": "Bertero and Fung, 2016)", "ref_id": "BIBREF3" }, { "start": 306, "end": 328, "text": "(Hossain et al., 2020)", "ref_id": "BIBREF15" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The remaining sections of this paper are organized as follows: Section 2 describes the existed work on detecting humor. Section 3 provides insights into the proposed system methodology and architecture. Section 4 presents the key experimental setup and BFHumor results. Finally, in Section 5, we conclude with the findings of our research.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "A plethora of research has studied the figurative languages in various datasets to predict the funniness value. The publication race in this area is growing at a fast pace between the industry and academic researchers. This section presents some analysis of the existed work related to humor, irony, ridicule, satire detection, and assessing funniness.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "The authors in (Mao and Liu, 2019; Ortega-Bueno et al., 2018; Farzin et al., 2019; Castro et al., 2018; Weller and Seppi, 2019; Garain, 2019) studied humor detection in various languages to classify each tweet into a joke or not and how they are funny. In (Mao and Liu, 2019) , the authors classified the dataset into a joke or not with a dataset that was obtained from Twitter in the Spanish language. While Bueno et al. (Ortega-Bueno et al., 2018) , predicted the level of funniness in tweets (score value prediction based on the average value of 5-stars).", "cite_spans": [ { "start": 15, "end": 34, "text": "(Mao and Liu, 2019;", "ref_id": "BIBREF16" }, { "start": 35, "end": 61, "text": "Ortega-Bueno et al., 2018;", "ref_id": "BIBREF19" }, { "start": 62, "end": 82, "text": "Farzin et al., 2019;", "ref_id": "BIBREF9" }, { "start": 83, "end": 103, "text": "Castro et al., 2018;", "ref_id": "BIBREF6" }, { "start": 104, "end": 127, "text": "Weller and Seppi, 2019;", "ref_id": "BIBREF23" }, { "start": 128, "end": 141, "text": "Garain, 2019)", "ref_id": "BIBREF10" }, { "start": 256, "end": 275, "text": "(Mao and Liu, 2019)", "ref_id": "BIBREF16" }, { "start": 422, "end": 449, "text": "(Ortega-Bueno et al., 2018)", "ref_id": "BIBREF19" } ], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "Several machine learning techniques have been used to predict the funniness value. In (Garain, 2019) , the researchers proposed a model using deep learning algorithms in the HAHA-2019 task, such as Bidirectional-LSTM (BiLSTM) and LSTM. The researchers in (Blinov et al., 2019) applied several classification algorithms for humor recognition from texts, such as the linear Support Vector Machine (SVM). Another group of researchers (Glazkova et al., 2019 ) built a neural network classifier and applied it to the HAHA at IberLEF 2019 dataset. Also, authors in (Farzin et al., 2019) used Universal Language Model Fine-tuning for Text Classification ULMFiT and Naive Bayes with SVM (NBSVM).", "cite_spans": [ { "start": 86, "end": 100, "text": "(Garain, 2019)", "ref_id": "BIBREF10" }, { "start": 255, "end": 276, "text": "(Blinov et al., 2019)", "ref_id": "BIBREF4" }, { "start": 431, "end": 453, "text": "(Glazkova et al., 2019", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "In this paper, we have built a robust model, named BFHumor, dealing with continuous labels, which aims to predict the funniness value of the news headline between zero and three. This model is a combination of the various pre-trained model and techniques that are mainly BERT regressor and the NLP Flair library. Up to our knowledge, we are the first to use these state-of-the-art techniques for humor detection.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Related work", "sec_num": "2" }, { "text": "In this section, we present the workflow of our proposed system to predict the funniness of edited news headlines as shown in Figure 1 . More details about the workflow are in the following subsections. ", "cite_spans": [], "ref_spans": [ { "start": 126, "end": 134, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "System Methodology", "sec_num": "3" }, { "text": "The dataset used in this paper is obtained from Task 7 of SemEval2020. The organizers of this task (Hossain et al., 2019; Hossain et al., 2020) collected the original news headlines datasets from a popular website, Reddit. This data is called Humicroedit that was reduced to 15,095 headlines for sub-task1 and 14,696 headlines for sub-task2. The dataset is divided into training, development, and testing dataset for two sub-tasks, as shown in Table 1 . Some examples of original and edited news headlines appear in Table 2 .", "cite_spans": [ { "start": 99, "end": 121, "text": "(Hossain et al., 2019;", "ref_id": "BIBREF14" }, { "start": 122, "end": 143, "text": "Hossain et al., 2020)", "ref_id": "BIBREF15" } ], "ref_spans": [ { "start": 444, "end": 451, "text": "Table 1", "ref_id": "TABREF0" }, { "start": 516, "end": 524, "text": "Table 2", "ref_id": "TABREF1" } ], "eq_spans": [], "section": "Input Data", "sec_num": "3.1" }, { "text": "Train Dev Test Total Sub-task-1 9,652 2,419 3,024 15,095 Sub-task-2 9,381 2,355 2,960 14,696 The preprocessing and cleaning procedure consists of two critical steps: (1) Word-Replacement and (2) Data Pre-processing. In the first step, we replaced the substitutional word with the word between . Such as replacing the Vice word with school word in (\"Trump was told weeks ago that Flynn misled President\") that is became (\"Trump was told weeks ago that Flynn misled school President\").", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sub-task", "sec_num": null }, { "text": "In the second step, we have experimented with several preprocessing packages, however, the best results of the model came with using the original data with a few amount of preprocessing. We have applied existing preprocessing packages for cleaning the data, such as ekphrasis 2 , spaCy 3 , and clean-text. 4 We have tokenized the headline by splitting it into chunks of words. Then, we have applied spell correction on those words, unpacked the contractions (can't to can not), and unpacked the hashtags by applying word segmentation (#MeToo to \"Me Too \"). Also, we have converted the text into a lower case and deleted stop words using spaCy package. Finally, we have removed numbers, currency symbols (i.e.$,\u00a3), punctuation marks (i.e.,?!:;()[]#@), and applied lower case in the clean-text package.", "cite_spans": [ { "start": 306, "end": 307, "text": "4", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Sub-task", "sec_num": null }, { "text": "In this subsection, we describe our proposed model, the BERT-Flair-based Humor Detection Model (BFHumor). BFHumor model is built using two main components: the BERT regressor and the Flair library, as shown in Figure 2 .", "cite_spans": [], "ref_spans": [ { "start": 210, "end": 218, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Building BFHumor Model", "sec_num": "3.3" }, { "text": "Sub-model1: we have applied a novel BERT regressor models from the bert-sklearn library with four types: (bert-base-cased, bert-base-uncased, bert-large-cased, and bert-base-cased-finetuned-mrpc) 5 (Wolf et al., 2019) . The input of Sub-model1 is the training dataset to predict the corresponding mean grade (label) of that testing dataset. We have trained our regressor with a training batch size of 16 and an evaluation batch of 8 for 2 epochs. The learning rate is 3e-5, the validation fraction is 0.0; gradient accumulation steps are 1, and the maximum length is 64.", "cite_spans": [ { "start": 198, "end": 217, "text": "(Wolf et al., 2019)", "ref_id": "BIBREF25" } ], "ref_spans": [], "eq_spans": [], "section": "Building BFHumor Model", "sec_num": "3.3" }, { "text": "Sub-model2: we have used the Flair NLP library, which (1) applies several NLP models to text (i.e., named entity recognition) (2) supports multi-language for text (i.e., German, and French) (3) provides an interface that extracts word embeddings (Akbik et al., 2019) . After that, we have used a RoBERTa type 6 Figure 2 : The architecture of BFHumor model. as the underlying Flair to extract word embeddings from the training and testing dataset, especially from roberta-large-mnli. Then feeds them to the machine learning algorithms. Sub-model2 details: 24 layers, 1024 hidden layers, 16 heads, and 355 million parameters.", "cite_spans": [ { "start": 246, "end": 266, "text": "(Akbik et al., 2019)", "ref_id": "BIBREF2" } ], "ref_spans": [ { "start": 311, "end": 319, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Building BFHumor Model", "sec_num": "3.3" }, { "text": "Finally, to predict the funniness values of the testing data in the two-sub-tasks, the Na\u00efve Bayes regressor (Mayo and Frank, 2020) reads these embeddings as an input and returns the predictions. Then, we ensembled the predictions of BERT and Na\u00efve Bayes based on different weights based on several experiments for the best results in both sub-tasks.", "cite_spans": [ { "start": 109, "end": 131, "text": "(Mayo and Frank, 2020)", "ref_id": "BIBREF18" } ], "ref_spans": [], "eq_spans": [], "section": "Building BFHumor Model", "sec_num": "3.3" }, { "text": "Several experiments were applied for sub-task1 with using: BERT regressor with two types bert-basecased and bert-base-uncased, XLNet pre-trained model (Yang et al., 2019) , Recurrent neural network (RNN) and NB (Mayo and Frank, 2020; De Mulder et al., 2015) . More details about models information are shown in Table 3 .", "cite_spans": [ { "start": 151, "end": 170, "text": "(Yang et al., 2019)", "ref_id": "BIBREF27" }, { "start": 211, "end": 233, "text": "(Mayo and Frank, 2020;", "ref_id": "BIBREF18" }, { "start": 234, "end": 257, "text": "De Mulder et al., 2015)", "ref_id": "BIBREF7" } ], "ref_spans": [ { "start": 311, "end": 318, "text": "Table 3", "ref_id": "TABREF3" } ], "eq_spans": [], "section": "Sub-task1", "sec_num": "4.1.1" }, { "text": "Firstly, we applied the bert-base-cased, bert-base-uncased as underlying Bert type (Devlin et al., 2018) , and XLNet pre-trained model (Yang et al., 2019; Wolf et al., 2019) with different hyper-parameters from Anaconda software 7 using Google Colab. 8 We used the model sizes in BERT and XLNet models as following: number of layers= 12, hidden size= 768, number of self-attention heads= 12 and total parameters = 110 million. The RMSE results are as follows: 0.55974, 0.62023, and 0.57896 respectively.", "cite_spans": [ { "start": 83, "end": 104, "text": "(Devlin et al., 2018)", "ref_id": "BIBREF8" }, { "start": 135, "end": 154, "text": "(Yang et al., 2019;", "ref_id": "BIBREF27" }, { "start": 155, "end": 173, "text": "Wolf et al., 2019)", "ref_id": "BIBREF25" }, { "start": 251, "end": 252, "text": "8", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Sub-task1", "sec_num": "4.1.1" }, { "text": "Secondly, the architecture of the used RNN contains input, hidden, and output layers (De Mulder et al., 2015) . The input layer consists of word2vec as a word embedding with size = 300. Then, two Long Term Short Memory layers ( 256 units, recurrent dropout is equal to 0.2, dropout is equal to 0.1, and the return sequences is equal to true and false in two layers). Also, two dense layers (256 units with activation function is ReLU), and two dropout layers (dropout is equal to 0.3). The output layer is Dense with one unit and the sigmoid activation function. The loss function is mean squared error; optimizer function is adam, metric used is mean squared error, the batch size is 52, and the number of the epoch is 200. Based on the above parameters, the RMSE value obtained is 0.57855.", "cite_spans": [ { "start": 85, "end": 109, "text": "(De Mulder et al., 2015)", "ref_id": "BIBREF7" } ], "ref_spans": [], "eq_spans": [], "section": "Sub-task1", "sec_num": "4.1.1" }, { "text": "We extracted 153 features using AffectiveTweets package (Bravo-Marquez et al., 2019) in Weka tool 9 plus Python codes. The Weka features are extracted by using Embedding, Input Lexicon, and Sentistrenght functions. While Python features are Jaccard, cosine, and complexity. Then, we fed them into NB, which gave 0.57223 as an RMSE value. We also conducted several experiments for sub-task2, such as Bert regressor with bert-large-cased, ELMO (Peters et al., 2018) , and roberta-large-mnli as a RoBERTa type from Flair library. Firstly, we applied the bert-large-cased as underlying BERT type (Devlin et al., 2018) in Anaconda software. We used the model sizes in this type as following: number of layers= 24, the hidden size= 1024, the number of self-attention heads= 16, and total parameters = 340 million. The accuracy result is 0.55741.", "cite_spans": [ { "start": 56, "end": 84, "text": "(Bravo-Marquez et al., 2019)", "ref_id": "BIBREF5" }, { "start": 437, "end": 463, "text": "ELMO (Peters et al., 2018)", "ref_id": null }, { "start": 592, "end": 613, "text": "(Devlin et al., 2018)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Sub-task1", "sec_num": "4.1.1" }, { "text": "Secondly, we used the roberta-large-mnli and ELMO models. Then, we fed the word embeddings of these models into an NB algorithm to obtain the accuracy values as 0.58980, and 0.54931, respectively. The hyper-parameters, number of dimensions, algorithms, accuracy values for the three above models are shown in Table 4 .", "cite_spans": [], "ref_spans": [ { "start": 309, "end": 316, "text": "Table 4", "ref_id": null } ], "eq_spans": [], "section": "Sub-task1", "sec_num": "4.1.1" }, { "text": "In both sub-tasks, we use the same general structure with only one difference, which is the output phase (see Figure 2 ). Each sub-task passes the edited headlines through two different models, which produces two predictions and merges them.", "cite_spans": [], "ref_spans": [ { "start": 110, "end": 118, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "BFHumor Results", "sec_num": "4.2" }, { "text": "We have noticed the following findings when we conducted several experiments:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "BFHumor Results", "sec_num": "4.2" }, { "text": "1. Using two epochs in any of the BERT regressor model types gives better prediction results than using more than two epochs. But rarely, one epoch was giving the best prediction results.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "BFHumor Results", "sec_num": "4.2" }, { "text": "2. Splitting the data into 0.8 and 0.2 for train and test data, respectively using the Hold-out method in NB regressor gives the best results.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "BFHumor Results", "sec_num": "4.2" }, { "text": "3. Using the roberta-large-mnli word embedding with NB gives the best results. Table 4 : Model information and results of sub-task2.", "cite_spans": [], "ref_spans": [ { "start": 79, "end": 86, "text": "Table 4", "ref_id": null } ], "eq_spans": [], "section": "BFHumor Results", "sec_num": "4.2" }, { "text": "Model Info Accuracy BERT", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": null }, { "text": "4. Feeding all training datasets after using a clean-text package to the bert-based-uncased type as an input gives the best results.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model", "sec_num": null }, { "text": "MLEngineer team has participated in task 7 of SemEval-2020 using the BFHumor model has achieved the 4 th rank out of 49 participants in sub-task1 based on the RMSE metric, and the 12 th rank out of 32 participants in sub-task2 based on the accuracy metric as shown in Table 5 .", "cite_spans": [], "ref_spans": [ { "start": 268, "end": 275, "text": "Table 5", "ref_id": null } ], "eq_spans": [], "section": "Model", "sec_num": null }, { "text": "Metric Result Sub-task-1 RMSE 0.51966 Sub-task-2 Accuracy 0.62291 Table 5 : Sub-task-1 result.", "cite_spans": [], "ref_spans": [ { "start": 66, "end": 73, "text": "Table 5", "ref_id": null } ], "eq_spans": [], "section": "Sub-tasks", "sec_num": null }, { "text": "In this paper, we have participated in Task 7 of SemEval 2020 as the MLEngineer team. We presented our novel model, BFHumor, BERT-Flair based Humor Detection Model to predict the funniest values of edited headlines in both sub-tasks. BFHumor is a unique model because it combined the BERT regressor and Flair library and used the same underlying architecture in both sub-tasks.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "5" }, { "text": "In the BFHumor model, we selected the bert-base-cased, bert-base-uncased, bert-large-cased, and bert-base-cased-finetuned-mrpc as the underlying BERT regressor models. Also, we chose the robertalarge-mnli as an underlying Flair type. Then, we merged the prediction results of two sub-models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "5" }, { "text": "The performance of the BFHumor model outperformed the baseline system in the competition task, indicating that it is a promising model for detecting humor in the text. based on RMSE value and the accuracy: we were among the top 4 teams in the sub-task1 with an RMSE value of 0.51966, which is 0.02 away from the first ranked model. Also, we got the 12 th in the sub-task2 with an accuracy of 0.62291, which is 0.05 away from the first ranked model.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "5" }, { "text": "ekphrasis: https://github.com/cbaziotis/ekphrasis 3 spaCy: https://stackoverflow.com/questions/45605946/how-to-do-text-pre-processing-using-spacy 4 clean-text: https://github.com/jfilter/clean-text 5 Pre-trained models: https://huggingface.co/transformers/pretrained models.html 6 RoBERTa: https://github.com/pytorch/fairseq/tree/master/examples/roberta", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Anaconda site: https://www.anaconda.com/ 8 Google Colab site: https://colab.research.google.com/notebooks/intro.ipynb 9 Weka tool: https://www.tutorialspoint.com/weka/index.htm", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Teamuncc at semeval-2018 task 1: Emotion detection in english and arabic tweets using deep learning", "authors": [ { "first": "Malak", "middle": [], "last": "Abdullah", "suffix": "" }, { "first": "Samira", "middle": [], "last": "Shaikh", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 12th international workshop on semantic evaluation", "volume": "", "issue": "", "pages": "350--357", "other_ids": {}, "num": null, "urls": [], "raw_text": "Malak Abdullah and Samira Shaikh. 2018. Teamuncc at semeval-2018 task 1: Emotion detection in english and arabic tweets using deep learning. In Proceedings of the 12th international workshop on semantic evaluation, pages 350-357.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Sedat: sentiment and emotion detection in arabic text using cnn-lstm deep learning", "authors": [ { "first": "Malak", "middle": [], "last": "Abdullah", "suffix": "" }, { "first": "Mirsad", "middle": [], "last": "Hadzikadicy", "suffix": "" }, { "first": "Samira", "middle": [], "last": "Shaikhz", "suffix": "" } ], "year": 2018, "venue": "17th IEEE International Conference on Machine Learning and Applications (ICMLA)", "volume": "", "issue": "", "pages": "835--840", "other_ids": {}, "num": null, "urls": [], "raw_text": "Malak Abdullah, Mirsad Hadzikadicy, and Samira Shaikhz. 2018. Sedat: sentiment and emotion detection in arabic text using cnn-lstm deep learning. In 2018 17th IEEE International Conference on Machine Learning and Applications (ICMLA), pages 835-840. IEEE.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Flair: An easy-to-use framework for state-of-the-art nlp", "authors": [ { "first": "Alan", "middle": [], "last": "Akbik", "suffix": "" }, { "first": "Tanja", "middle": [], "last": "Bergmann", "suffix": "" }, { "first": "Duncan", "middle": [], "last": "Blythe", "suffix": "" }, { "first": "Kashif", "middle": [], "last": "Rasul", "suffix": "" }, { "first": "Stefan", "middle": [], "last": "Schweter", "suffix": "" }, { "first": "Roland", "middle": [], "last": "Vollgraf", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", "volume": "", "issue": "", "pages": "54--59", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alan Akbik, Tanja Bergmann, Duncan Blythe, Kashif Rasul, Stefan Schweter, and Roland Vollgraf. 2019. Flair: An easy-to-use framework for state-of-the-art nlp. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations), pages 54-59.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "A long short-term memory framework for predicting humor in dialogues", "authors": [ { "first": "Dario", "middle": [], "last": "Bertero", "suffix": "" }, { "first": "Pascale", "middle": [], "last": "Fung", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "", "issue": "", "pages": "130--135", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dario Bertero and Pascale Fung. 2016. A long short-term memory framework for predicting humor in dialogues. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 130-135.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Large dataset and language model funtuning for humor recognition", "authors": [ { "first": "Vladislav", "middle": [], "last": "Blinov", "suffix": "" }, { "first": "Valeria", "middle": [], "last": "Bolotova-Baranova", "suffix": "" }, { "first": "Pavel", "middle": [], "last": "Braslavski", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "4027--4032", "other_ids": {}, "num": null, "urls": [], "raw_text": "Vladislav Blinov, Valeria Bolotova-Baranova, and Pavel Braslavski. 2019. Large dataset and language model fun- tuning for humor recognition. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4027-4032.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Affectivetweets: a weka package for analyzing affect in tweets", "authors": [ { "first": "Felipe", "middle": [], "last": "Bravo-Marquez", "suffix": "" }, { "first": "Eibe", "middle": [], "last": "Frank", "suffix": "" }, { "first": "Bernhard", "middle": [], "last": "Pfahringer", "suffix": "" }, { "first": "Saif M", "middle": [], "last": "Mohammad", "suffix": "" } ], "year": 2019, "venue": "Journal of Machine Learning Research", "volume": "", "issue": "", "pages": "1--6", "other_ids": {}, "num": null, "urls": [], "raw_text": "Felipe Bravo-Marquez, Eibe Frank, Bernhard Pfahringer, and Saif M Mohammad. 2019. Affectivetweets: a weka package for analyzing affect in tweets. Journal of Machine Learning Research, pages 1-6.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Overview of the haha task: Humor analysis based on human annotation at ibereval 2018", "authors": [ { "first": "Santiago", "middle": [], "last": "Castro", "suffix": "" }, { "first": "Luis", "middle": [], "last": "Chiruzzo", "suffix": "" }, { "first": "Aiala", "middle": [], "last": "Ros\u00e1", "suffix": "" } ], "year": 2018, "venue": "IberEval@ SEPLN", "volume": "", "issue": "", "pages": "187--194", "other_ids": {}, "num": null, "urls": [], "raw_text": "Santiago Castro, Luis Chiruzzo, and Aiala Ros\u00e1. 2018. Overview of the haha task: Humor analysis based on human annotation at ibereval 2018. In IberEval@ SEPLN, pages 187-194.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "A survey on the application of recurrent neural networks to statistical language modeling", "authors": [ { "first": "Steven", "middle": [], "last": "Wim De Mulder", "suffix": "" }, { "first": "Marie-Francine", "middle": [], "last": "Bethard", "suffix": "" }, { "first": "", "middle": [], "last": "Moens", "suffix": "" } ], "year": 2015, "venue": "Computer Speech & Language", "volume": "30", "issue": "1", "pages": "61--98", "other_ids": {}, "num": null, "urls": [], "raw_text": "Wim De Mulder, Steven Bethard, and Marie-Francine Moens. 2015. A survey on the application of recurrent neural networks to statistical language modeling. Computer Speech & Language, 30(1):61-98.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", "authors": [ { "first": "Jacob", "middle": [], "last": "Devlin", "suffix": "" }, { "first": "Ming-Wei", "middle": [], "last": "Chang", "suffix": "" }, { "first": "Kenton", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Kristina", "middle": [], "last": "Toutanova", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1810.04805" ] }, "num": null, "urls": [], "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirec- tional transformers for language understanding. arXiv preprint arXiv:1810.04805.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Applying a pre-trained language model to spanish twitter humor prediction", "authors": [ { "first": "Piotr", "middle": [], "last": "Bobak Farzin", "suffix": "" }, { "first": "Jeremy", "middle": [], "last": "Czapla", "suffix": "" }, { "first": "", "middle": [], "last": "Howard", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1907.03187" ] }, "num": null, "urls": [], "raw_text": "Bobak Farzin, Piotr Czapla, and Jeremy Howard. 2019. Applying a pre-trained language model to spanish twitter humor prediction. arXiv preprint arXiv:1907.03187.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Humor analysis based on human annotation (haha)-2019: Humor analysis at tweet level using deep learning", "authors": [ { "first": "Avishek", "middle": [], "last": "Garain", "suffix": "" } ], "year": 2019, "venue": "IberLEF@ SEPLN", "volume": "", "issue": "", "pages": "191--196", "other_ids": {}, "num": null, "urls": [], "raw_text": "Avishek Garain. 2019. Humor analysis based on human annotation (haha)-2019: Humor analysis at tweet level using deep learning. In IberLEF@ SEPLN, pages 191-196.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Utmn at haha@ iberlef2019: Recognizing humor in spanish tweets using hard parameter sharing for neural networks", "authors": [ { "first": "Anna", "middle": [], "last": "Glazkova", "suffix": "" }, { "first": "Nadezhda", "middle": [], "last": "Ganzherli", "suffix": "" }, { "first": "Elena", "middle": [], "last": "Mikhalkova", "suffix": "" } ], "year": 2019, "venue": "In IberLEF@ SEPLN", "volume": "", "issue": "", "pages": "222--228", "other_ids": {}, "num": null, "urls": [], "raw_text": "Anna Glazkova, Nadezhda Ganzherli, and Elena Mikhalkova. 2019. Utmn at haha@ iberlef2019: Recognizing humor in spanish tweets using hard parameter sharing for neural networks. In IberLEF@ SEPLN, pages 222- 228.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Ur-funny: A multimodal language dataset for understanding humor", "authors": [ { "first": "Wasifur", "middle": [], "last": "Md Kamrul Hasan", "suffix": "" }, { "first": "Amir", "middle": [], "last": "Rahman", "suffix": "" }, { "first": "Jianyuan", "middle": [], "last": "Zadeh", "suffix": "" }, { "first": "", "middle": [], "last": "Zhong", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1904.06618" ] }, "num": null, "urls": [], "raw_text": "Md Kamrul Hasan, Wasifur Rahman, Amir Zadeh, Jianyuan Zhong, Md Iftekhar Tanveer, Louis-Philippe Morency, and Mohammed Hoque. 2019. Ur-funny: A multimodal language dataset for understanding humor. arXiv preprint arXiv:1904.06618.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Applying basic features from sentiment analysis for automatic irony detection", "authors": [ { "first": "Iraz\u00fa", "middle": [], "last": "Hern\u00e1ndez-Far\u00edas", "suffix": "" }, { "first": "Jos\u00e9-Miguel", "middle": [], "last": "Bened\u00ed", "suffix": "" }, { "first": "Paolo", "middle": [], "last": "Rosso", "suffix": "" } ], "year": 2015, "venue": "Iberian Conference on Pattern Recognition and Image Analysis", "volume": "", "issue": "", "pages": "337--344", "other_ids": {}, "num": null, "urls": [], "raw_text": "Iraz\u00fa Hern\u00e1ndez-Far\u00edas, Jos\u00e9-Miguel Bened\u00ed, and Paolo Rosso. 2015. Applying basic features from sentiment analysis for automatic irony detection. In Iberian Conference on Pattern Recognition and Image Analysis, pages 337-344. Springer.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "president vows to cuthair", "authors": [ { "first": "Nabil", "middle": [], "last": "Hossain", "suffix": "" }, { "first": "John", "middle": [], "last": "Krumm", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Gamon", "suffix": "" } ], "year": 2019, "venue": "Dataset and analysis of creative text editing for humorous headlines", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1906.00274" ] }, "num": null, "urls": [], "raw_text": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \" president vows to cuthair\": Dataset and analysis of creative text editing for humorous headlines. arXiv preprint arXiv:1906.00274.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Semeval-2020 Task 7: Assessing humor in edited news headlines", "authors": [ { "first": "Nabil", "middle": [], "last": "Hossain", "suffix": "" }, { "first": "John", "middle": [], "last": "Krumm", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Gamon", "suffix": "" }, { "first": "Henry", "middle": [], "last": "Kautz", "suffix": "" } ], "year": 2020, "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2020)", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Nabil Hossain, John Krumm, Michael Gamon, and Henry Kautz. 2020. Semeval-2020 Task 7: Assessing humor in edited news headlines. In Proceedings of International Workshop on Semantic Evaluation (SemEval-2020), Barcelona, Spain.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "A bert-based approach for automatic humor detection and scoring", "authors": [ { "first": "Jihang", "middle": [], "last": "Mao", "suffix": "" }, { "first": "Wanli", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the Iberian Languages Evaluation Forum", "volume": "", "issue": "", "pages": "197--202", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jihang Mao and Wanli Liu. 2019. A bert-based approach for automatic humor detection and scoring. In Proceed- ings of the Iberian Languages Evaluation Forum (IberLEF 2019). CEUR Workshop Proceedings, CEUR-WS, Bilbao, Spain (9 2019), pages 197-202.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Humor, coping with stress, self-concept, and psychological well-being. Humor", "authors": [ { "first": "A", "middle": [], "last": "Rod", "suffix": "" }, { "first": "Nicholas", "middle": [ "A" ], "last": "Martin", "suffix": "" }, { "first": "Joan", "middle": [], "last": "Kuiper", "suffix": "" }, { "first": "Kathryn", "middle": [ "A" ], "last": "Olinger", "suffix": "" }, { "first": "", "middle": [], "last": "Dance", "suffix": "" } ], "year": 1993, "venue": "", "volume": "6", "issue": "", "pages": "89--89", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rod A Martin, Nicholas A Kuiper, L Joan Olinger, and Kathryn A Dance. 1993. Humor, coping with stress, self-concept, and psychological well-being. Humor, 6:89-89.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Improving naive bayes for regression with optimized artificial surrogate data", "authors": [ { "first": "Michael", "middle": [], "last": "Mayo", "suffix": "" }, { "first": "Eibe", "middle": [], "last": "Frank", "suffix": "" } ], "year": 2020, "venue": "Applied Artificial Intelligence", "volume": "34", "issue": "6", "pages": "484--514", "other_ids": {}, "num": null, "urls": [], "raw_text": "Michael Mayo and Eibe Frank. 2020. Improving naive bayes for regression with optimized artificial surrogate data. Applied Artificial Intelligence, 34(6):484-514.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Uo upv: Deep linguistic humor detection in spanish social media", "authors": [ { "first": "Reynier", "middle": [], "last": "Ortega-Bueno", "suffix": "" }, { "first": "Carlos", "middle": [ "E" ], "last": "Muniz-Cuza", "suffix": "" }, { "first": "Jos\u00e9 E Medina", "middle": [], "last": "Pagola", "suffix": "" }, { "first": "Paolo", "middle": [], "last": "Rosso", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the Third Workshop on Evaluation of Human Language Technologies for Iberian Languages (IberEval 2018) co-located with 34th Conference of the Spanish Society for Natural Language Processing", "volume": "", "issue": "", "pages": "204--213", "other_ids": {}, "num": null, "urls": [], "raw_text": "Reynier Ortega-Bueno, Carlos E Muniz-Cuza, Jos\u00e9 E Medina Pagola, and Paolo Rosso. 2018. Uo upv: Deep linguistic humor detection in spanish social media. In Proceedings of the Third Workshop on Evaluation of Human Language Technologies for Iberian Languages (IberEval 2018) co-located with 34th Conference of the Spanish Society for Natural Language Processing (SEPLN 2018), pages 204-213.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Deep contextualized word representations", "authors": [ { "first": "E", "middle": [], "last": "Matthew", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Peters", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Neumann", "suffix": "" }, { "first": "Matt", "middle": [], "last": "Iyyer", "suffix": "" }, { "first": "Christopher", "middle": [], "last": "Gardner", "suffix": "" }, { "first": "Kenton", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Lee", "suffix": "" }, { "first": "", "middle": [], "last": "Zettlemoyer", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1802.05365" ] }, "num": null, "urls": [], "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettle- moyer. 2018. Deep contextualized word representations. arXiv preprint arXiv:1802.05365.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "A conceptual overview and systematic review on artificial intelligence and its approaches", "authors": [ { "first": "Beschi", "middle": [], "last": "Raja", "suffix": "" }, { "first": "S", "middle": [], "last": "Sneha", "suffix": "" } ], "year": 2019, "venue": "International Journal of Emerging Technology and Innovative Engineering", "volume": "5", "issue": "12", "pages": "821--828", "other_ids": {}, "num": null, "urls": [], "raw_text": "Beschi Raja and Sneha S. 2019. A conceptual overview and systematic review on artificial intelligence and its approaches. International Journal of Emerging Technology and Innovative Engineering, 5(12):821-828.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "From humor recognition to irony detection: The figurative language of social media", "authors": [ { "first": "Antonio", "middle": [], "last": "Reyes", "suffix": "" }, { "first": "Paolo", "middle": [], "last": "Rosso", "suffix": "" }, { "first": "Davide", "middle": [], "last": "Buscaldi", "suffix": "" } ], "year": 2012, "venue": "Data & Knowledge Engineering", "volume": "74", "issue": "", "pages": "1--12", "other_ids": {}, "num": null, "urls": [], "raw_text": "Antonio Reyes, Paolo Rosso, and Davide Buscaldi. 2012. From humor recognition to irony detection: The figurative language of social media. Data & Knowledge Engineering, 74:1-12.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Humor detection: A transformer gets the last laugh", "authors": [ { "first": "Orion", "middle": [], "last": "Weller", "suffix": "" }, { "first": "Kevin", "middle": [], "last": "Seppi", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1909.00252" ] }, "num": null, "urls": [], "raw_text": "Orion Weller and Kevin Seppi. 2019. Humor detection: A transformer gets the last laugh. arXiv preprint arXiv:1909.00252.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Desiderata for delivering nlp to accelerate healthcare ai advancement and a mayo clinic nlp-as-a-service implementation", "authors": [ { "first": "Andrew", "middle": [], "last": "Wen", "suffix": "" }, { "first": "Sunyang", "middle": [], "last": "Fu", "suffix": "" }, { "first": "Sungrim", "middle": [], "last": "Moon", "suffix": "" }, { "first": "Mohamed", "middle": [ "El" ], "last": "Wazir", "suffix": "" }, { "first": "Andrew", "middle": [], "last": "Rosenbaum", "suffix": "" }, { "first": "C", "middle": [], "last": "Vinod", "suffix": "" }, { "first": "Sijia", "middle": [], "last": "Kaggal", "suffix": "" }, { "first": "Sunghwan", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Hongfang", "middle": [], "last": "Sohn", "suffix": "" }, { "first": "Jungwei", "middle": [], "last": "Liu", "suffix": "" }, { "first": "", "middle": [], "last": "Fan", "suffix": "" } ], "year": 2019, "venue": "npj Digital Medicine", "volume": "2", "issue": "1", "pages": "1--7", "other_ids": {}, "num": null, "urls": [], "raw_text": "Andrew Wen, Sunyang Fu, Sungrim Moon, Mohamed El Wazir, Andrew Rosenbaum, Vinod C Kaggal, Sijia Liu, Sunghwan Sohn, Hongfang Liu, and Jungwei Fan. 2019. Desiderata for delivering nlp to accelerate healthcare ai advancement and a mayo clinic nlp-as-a-service implementation. npj Digital Medicine, 2(1):1-7.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Huggingface's transformers: State-of-the-art natural language processing", "authors": [ { "first": "Thomas", "middle": [], "last": "Wolf", "suffix": "" }, { "first": "Lysandre", "middle": [], "last": "Debut", "suffix": "" }, { "first": "Victor", "middle": [], "last": "Sanh", "suffix": "" }, { "first": "Julien", "middle": [], "last": "Chaumond", "suffix": "" }, { "first": "Clement", "middle": [], "last": "Delangue", "suffix": "" }, { "first": "Anthony", "middle": [], "last": "Moi", "suffix": "" }, { "first": "Pierric", "middle": [], "last": "Cistac", "suffix": "" }, { "first": "Tim", "middle": [], "last": "Rault", "suffix": "" }, { "first": "R\u00e9mi", "middle": [], "last": "Louf", "suffix": "" }, { "first": "Morgan", "middle": [], "last": "Funtowicz", "suffix": "" } ], "year": 2019, "venue": "ArXiv", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtowicz, et al. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, pages arXiv-1910.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Who's to say what's funny? a computer using language models and deep learning", "authors": [ { "first": "Xinru", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Ted", "middle": [], "last": "Pedersen", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1705.10272" ] }, "num": null, "urls": [], "raw_text": "Xinru Yan and Ted Pedersen. 2017. Who's to say what's funny? a computer using language models and deep learning, that's who! arXiv preprint arXiv:1705.10272.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Xlnet: Generalized autoregressive pretraining for language understanding", "authors": [ { "first": "Zhilin", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Zihang", "middle": [], "last": "Dai", "suffix": "" }, { "first": "Yiming", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Jaime", "middle": [], "last": "Carbonell", "suffix": "" }, { "first": "R", "middle": [], "last": "Russ", "suffix": "" }, { "first": "Quoc V", "middle": [], "last": "Salakhutdinov", "suffix": "" }, { "first": "", "middle": [], "last": "Le", "suffix": "" } ], "year": 2019, "venue": "Advances in neural information processing systems", "volume": "", "issue": "", "pages": "5753--5763", "other_ids": {}, "num": null, "urls": [], "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information process- ing systems, pages 5753-5763.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Progress in neural nlp: modeling, learning, and reasoning. Engineering", "authors": [ { "first": "Ming", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Nan", "middle": [], "last": "Duan", "suffix": "" }, { "first": "Shujie", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Heung-Yeung", "middle": [], "last": "Shum", "suffix": "" } ], "year": 2020, "venue": "", "volume": "6", "issue": "", "pages": "275--290", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ming Zhou, Nan Duan, Shujie Liu, and Heung-Yeung Shum. 2020. Progress in neural nlp: modeling, learning, and reasoning. Engineering, 6(3):275-290.", "links": null } }, "ref_entries": { "FIGREF0": { "type_str": "figure", "uris": null, "num": null, "text": "Work-flow of the BFHumor model." }, "TABREF0": { "html": null, "content": "
TasksFunny or Not Score Examples
Original Sub-Task1Not Funny 0 Slightly Funny 1Dutch minister resigns in drug baron <row/> Trump 's 2nd Nominee for <Army/>Secretary Withdraws
Original Sub-Task2Moderately Funny 2 Funny 2.8#WomensMarch against <Donald Trump/>around the world Conservative Leaders Urge Mitch McConnell to <Resign/>
Edited Sub-Task1Not Funny 0 Slightly Funny 1Dutch minister resigns in drug baron blow Trump 's 2nd Nominee for Class Secretary Withdraws
Edited Sub-Task2Moderately Funny 2 Funny 2.8#WomensMarch against kitchens around the world Conservative Leaders Urge Mitch McConnell to Bathe
", "num": null, "text": "Dataset size for the two sub-tasks of Task 7.", "type_str": "table" }, "TABREF1": { "html": null, "content": "", "num": null, "text": "", "type_str": "table" }, "TABREF3": { "html": null, "content": "
: Model information and results of sub-task1
4.1.2 Sub-task2
", "num": null, "text": "", "type_str": "table" } } } }