|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:32:29.465278Z" |
|
}, |
|
"title": "Classification of Censored Tweets in Chinese Language using XLNet", |
|
"authors": [ |
|
{ |
|
"first": "Shaikh", |
|
"middle": [], |
|
"last": "Sahil", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Institute of Technology Karnataka", |
|
"location": { |
|
"settlement": "Surathkal", |
|
"region": "Mangalore", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Institute of Technology Karnataka", |
|
"location": { |
|
"settlement": "Surathkal", |
|
"region": "Mangalore", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "m_anandkumar@nitk.edu.in" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In the growth of today's world and advanced technology, social media networks play a significant role in impacting human lives. Censorship is the overthrowing of speech, public transmission, or other details that play a vast role in social media. The content may be considered harmful, sensitive, or inconvenient. Authorities like institutes, governments, and other organizations conduct Censorship. This paper has implemented a model that helps classify censored and uncensored tweets as a binary classification. The paper describes submission to the Censorship shared task of the NLP4IF 2021 workshop. We used various transformerbased pre-trained models, and XLNet outputs a better accuracy among all. We fine-tuned the model for better performance and achieved a reasonable accuracy, and calculated other performance metrics.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In the growth of today's world and advanced technology, social media networks play a significant role in impacting human lives. Censorship is the overthrowing of speech, public transmission, or other details that play a vast role in social media. The content may be considered harmful, sensitive, or inconvenient. Authorities like institutes, governments, and other organizations conduct Censorship. This paper has implemented a model that helps classify censored and uncensored tweets as a binary classification. The paper describes submission to the Censorship shared task of the NLP4IF 2021 workshop. We used various transformerbased pre-trained models, and XLNet outputs a better accuracy among all. We fine-tuned the model for better performance and achieved a reasonable accuracy, and calculated other performance metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The suppression of words, images, and ideas is known as Censorship. The government or the private organization can carry Censorship based on objectionable, harmful, sensitive, or inconvenient material. There are different types of Censorship; for example, when a person uses Censorship for their work or speech, this type of Censorship is known as self-censorship. Censorship is used for many things like books, music, videos, movies, etc., for various reasons like hate speech, national security, etc. (Khurana et al., 2017) . Many countries in their law provide protections against Censorship, but there is much uncertainty in determining what could be censored and what could not be censored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 503, |
|
"end": 525, |
|
"text": "(Khurana et al., 2017)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, nowadays, we know that most of the data and the information are available on the internet, so many governments strictly monitor the disturbing or objectionable content on the internet. We could not use any method other than the software like fraud censorship detection and disturbing and objectionable content monitor, which works continuously and maintains the same accuracy for monitoring this vast data size.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper examines the methodologies and various machine learning domains that classify the censored and uncensored tweets associated with the workshop (Shaar et al., 2021) . We used multiple models such as BERT (Bidirectional Encoder Representations from Transformers) (Devlin et al., 2018) , DeBERTa (Decoding-enhanced BERT with disentangled attention) (He et al., 2020) , ELECTRA (Clark et al., 2020) , and XLNet (a generic autoregressive pre-training procedure) for binary classification of the tweets. \"0\" says that the tweet is uncensored, and \"1\" says that the tweet is censored. Also, we have experimented with various phases, such as data preprocessing, tokenization, and fine-tuning for model prediction. Further, we will go through various performance metrics such as accuracy, precision, and recall. We achieved a reasonable accuracy using XLNet as compared to other models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 173, |
|
"text": "(Shaar et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 292, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 373, |
|
"text": "(He et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 404, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Relevant Work (Aceto and Pescap\u00e8, 2015) proposed a source for censoring procedures and a characterization of censoring systems and studied the tools and various censorship detection platforms. They also presented a characterization plan to analyze and examine multiple censored and uncensored data. They used their results to understand current hurdles and suggested new directions in the area of censorship detection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 41, |
|
"text": "(Aceto and Pescap\u00e8, 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(Ben Jones and Gill, 2014) presented an automated system that permits continuous measurements of block pages and filters them from generated. They claimed that their system detects 95% of the block pages, recognized five filtering tools, and evaluated performance metrics and various fingerprinting methods. (Athanasopoulos et al., 2011) presented the idea and implementation of a web-based censorship monitor named \"CensMon\". CensMon works automatically and does not depends on Internet users to inform censored websites. Possible censorship is distinguished from access network breakdowns, and various input streams are utilized to define the type of censored data. They showed that their model detects the censored data favourably and points filtering methodologies efficiently used by the censor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 337, |
|
"text": "(Athanasopoulos et al., 2011)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(Niaki et al., 2019) presented ICLab used for censorship research that is known to be an internet measurement platform. It can recognize DNS manipulation where the browser initially purposes its IP address with a DNS query and TCP-packed injection. ICLabs attempts to reduce false positives and manual validation through performing operations and going through all the processing levels. They plotted various graphs, planned, and calculated metrics and concluded that ICLab detects different censorship mechanisms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The dataset of the shared task has been built using a web scraper (Kei Yin Ng and Peng, 2020) that contains censored and uncensored tweets gathered for a duration of 4 months (August 29, 2018 , to December 29, 2018 . The dataset attributes contain tweets (represented by the text in the dataset) and label, where the \"text\" field contains the information collected in the Chinese language, and \"label\" contains 0's and 1's where '0' signifies the tweet as uncensored and '1' signifies as a censored tweet. The first few lines and format of the dataset is shown in Fig. 1 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 191, |
|
"text": "(August 29, 2018", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 214, |
|
"text": ", to December 29, 2018", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 564, |
|
"end": 571, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The XLNet (Yang et al., 2019 ) is a transformer-based machine learning method for Natural Language Processing tasks. It is famous for a generalized autoregressive pretraining method which is one of the most significant emerging models of NLP. The XLNet consists of the recent innovations in NLP, stating the solutions and other approaches regarding language modelling. XLNet is also known for the auto-regressive language model that promotes joint predictions over a sequence of tokens on transformer design. It aims to find the possibility of a word token's overall alterations of word tokens in a sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 28, |
|
"text": "(Yang et al., 2019", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The language model comprises two stages, the pretrain phase and fine-tune phase. XLNet mainly concentrates on the pre-train phase. Permutation Language Modeling is one of the new objectives which is implemented in the pre-train phase. We used \"hfl/chinesexlnet-base\" as a pre-trained model (Cui et al., 2020) for Chinese data that targets enhancing Chinese NLP resources and contributes a broad category of Chinese pre-trained model selection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 308, |
|
"text": "(Cui et al., 2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Initially, the dataset is preprocessed, and the generated tokens are given input to XLNet pre-trained model. The model trains the data over 20 epochs and further goes through a mean pool, passing through a fully connected layer for fine-tuning and classification, and predicts the data over a given test set. Fig. 2 shows the architecture of the XLNet model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 315, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The dataset contains fields like \"text\" and \"label\" only, extra attribute \"id\" is added to the dataset for better preprocessing. Also, the noisy information from the dataset has been filtered out by using the \"tweet-preprocessor\" library. After preprocessing the dataset with the first few lines is shown in Fig. 3 . ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 314, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Preprocessing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Tokenization breaks down a text document into a phrase, sentence, paragraph, or smaller units, such as single words. Those smaller units are said to be tokens. All this breakdown happens with the help of a tokenizer before feeding it to the model. We used \"XLNetTokenizer\" on the pre-trained model, as the models need tokens to be in an orderly fashion. The tokenizer imports from the \"transformers\" library. So, word segmentation can be said to break down a sentence into component words that are to be feed into the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "A pre-trained model is used to classify the text, where an encoder subnetwork is combined with a fully connected layer for prediction. Further, the tokenized training data is used to fine-tune the model weights. We have used \"XLNetForSequenceClassification\" for sequence classification. It consists of a linear layer on the pooled output peak. The model targets to do binary classification on the test data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-Tuning", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We have used Adam optimizer to fine-tune the pretrained model and performed label encoding for output labels. The softmax over the logits used for prediction and the learning rate is initialized with 2e-5, and twenty epochs were used for training. After training the data with XLNet, we achieved a training accuracy of 0.99.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Validation Table 1 : Performance of the system on validation data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 18, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We calculated precision, recall and F1-measure for the validation set with all the four models used in our investigation, as shown in Table 1 . We got a precision of 0.634 and a recall of 0.634, which is far better than other models. Fig. 4 shows the plot for different epochs vs. validation accuracy during the training phase. Moving ahead with test data, we achieved a precision of 0.65 and recall of 0.64 using XLNet. Table 2. shows the precision, recall, and F1-Measure for test set using XLNet. Also, we found majority class baseline as 49.98 and human baseline as 23.83 as shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 141, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 240, |
|
"text": "Fig. 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 429, |
|
"text": "Table 2.", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 595, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we made one CSV file where the file contains test data tweet with label attribute. Fig. 5 shows the test data prediction, where the tweets are classified as censored and uncensored tweets. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 98, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the paper, we investigated various pre-trained models and achieved a reasonable accuracy for XLNET. We cleaned the dataset during preprocessing, which is further given input to the model. XLNet seems to be influential in the classification problem moving deep into censorship detection. XLNet performs better than BERT, DeBERTa, and ELECTRA having its improved training methodology, where it uses permutation language modelling predicting the tokens randomly. The future work is to examine other NLP models and finetune them censorship detection in other languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Internet censorship detection: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Aceto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Pescap\u00e8", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.comnet.2015.03.008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giuseppe Aceto and Antonio Pescap\u00e8. 2015. Internet censorship detection: A survey. Computer Networks, 83.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Censmon: A web censorship monitor", |
|
"authors": [ |
|
{ |
|
"first": "Elias", |
|
"middle": [], |
|
"last": "Athanasopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "USENIX Workshop on Free and Open Communications on the Internet (FOCI 11)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elias Athanasopoulos, Sotiris Ioannidis, and Andreas Sfakianakis. 2011. Censmon: A web censorship mon- itor. In USENIX Workshop on Free and Open Com- munications on the Internet (FOCI 11), San Fran- cisco, CA. USENIX Association.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automated detection and fingerprinting of censorship block pages", |
|
"authors": [], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2663716.2663722" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nick Feamster Ben Jones, Tzu-Wen Lee and Phillipa Gill. 2014. Automated detection and fingerprinting of censorship block pages. Stony Brook University.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Electra: Pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc Le, and Christo- pher Manning. 2020. Electra: Pre-training text en- coders as discriminators rather than generators.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Revisiting pre-trained models for Chinese natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoping", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "657--668", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Shijin Wang, and Guoping Hu. 2020. Revisiting pre-trained models for Chinese natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 657-668, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Deberta: Decoding-enhanced bert with disentangled attention", |
|
"authors": [ |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2020. Deberta: Decoding-enhanced bert with disentangled attention.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Linguistic fingerprints of internet censorship: The case of sina weibo", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Feldman Kei Yin Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "446--453", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i01.5381" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Feldman Kei Yin Ng and Jing Peng. 2020. Lin- guistic fingerprints of internet censorship: The case of sina weibo. volume 34, pages 446-453. Proceedings of the AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Natural language processing: State of the art, current trends and challenges", |
|
"authors": [ |
|
{ |
|
"first": "Diksha", |
|
"middle": [], |
|
"last": "Khurana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Koli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiran", |
|
"middle": [], |
|
"last": "Khatter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sukhdev", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diksha Khurana, Aditya Koli, Kiran Khatter, and Sukhdev Singh. 2017. Natural language processing: State of the art, current trends and challenges.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Zachary Weinberg, Nguyen Phong Hoang, Abbas Razaghpanah, Nicolas Christin, and Phillipa Gill", |
|
"authors": [ |
|
{ |
|
"first": "Shinyoung", |
|
"middle": [], |
|
"last": "Arian Akhavan Niaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arian Akhavan Niaki, Shinyoung Cho, Zachary Wein- berg, Nguyen Phong Hoang, Abbas Razaghpanah, Nicolas Christin, and Phillipa Gill. 2019. Iclab: A global, longitudinal internet censorship measurement platform.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Findings of the NLP4IF-2021 shared task on fighting the COVID-19 infodemic and censorship detection", |
|
"authors": [ |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wajdi", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Fourth Workshop on Natural Language Processing for Internet Freedom: Censorship, Disinformation, and Propaganda, NLP4IF@NAACL' 21, Online. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shaden Shaar, Firoj Alam, Giovanni Da San Martino, Alex Nikolov, Wajdi Zaghouani, Preslav Nakov, and Anna Feldman. 2021. Findings of the NLP4IF-2021 shared task on fighting the COVID-19 infodemic and censorship detection. In Proceedings of the Fourth Workshop on Natural Language Processing for In- ternet Freedom: Censorship, Disinformation, and Propaganda, NLP4IF@NAACL' 21, Online. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc Le. 2019. Xlnet: Generalized autoregressive pretraining for lan- guage understanding.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "First few lines of dataset. The dataset comprises three sets, i.e. train, validation and test set. The train set comprises 1512 tweets, and the validation set comprises 189 tweets. The test set only comprises 189 tweets with no labels." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Architecture of XLNet." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "First few lines of dataset after preprocessing." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Figure 4: Validation Accuracy plot." |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "First few lines of test data after prediction." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td>Class</td><td>Accuracy</td></tr><tr><td colspan=\"2\">Majority baseline 49.98</td></tr><tr><td>Human baseline</td><td>23.83</td></tr><tr><td>XLNet</td><td>0.64</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Performance of the system on test data using XLNet." |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |