|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:43:50.371953Z" |
|
}, |
|
"title": "ARBML: Democratizing Arabic Natural Language Processing Tools", |
|
"authors": [ |
|
{ |
|
"first": "Zaid", |
|
"middle": [], |
|
"last": "Alyafeai", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maged", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Al-Shaibani", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automating natural language understanding is a lifelong quest addressed for decades. With the help of advances in machine learning and particularly, deep learning, we are able to produce state of the art models that can imitate human interactions with languages. Unfortunately, these advances are controlled by the availability of language resources. Arabic advances in this field , although it has a great potential, are still limited. This is apparent in both research and development. In this paper, we showcase some NLP models we trained for Arabic. We also present our methodology and pipeline to build such models from data collection, data preprocessing, tokenization and model deployment. These tools help in the advancement of the field and provide a systematic approach for extending NLP tools to many languages.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automating natural language understanding is a lifelong quest addressed for decades. With the help of advances in machine learning and particularly, deep learning, we are able to produce state of the art models that can imitate human interactions with languages. Unfortunately, these advances are controlled by the availability of language resources. Arabic advances in this field , although it has a great potential, are still limited. This is apparent in both research and development. In this paper, we showcase some NLP models we trained for Arabic. We also present our methodology and pipeline to build such models from data collection, data preprocessing, tokenization and model deployment. These tools help in the advancement of the field and provide a systematic approach for extending NLP tools to many languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Arabic language is a widely used language. It is the sixth most spoken language in the world (Farghaly and Shaalan, 2009) . It also has a noticeable influence on many other languages around the globe. Compared to English, Arabic is morphologically a very rich language (Habash, 2010) with relatively complex grammar and cursive script including the use of diacritics. Diacritics are special characters added to Arabic writing to replace the absence of short vowels. Moreover, Arabic has a variety of dialects that may greatly differ in style and grammar.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 121, |
|
"text": "(Farghaly and Shaalan, 2009)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Arabic content on the web is vastly emerging with great diversity in style and subjects, written in many dialects. This opportunity opens doors for research to hone machine capabilities to automate language understanding and comprehension. However, Arabic inherent characteristics makes it difficult to resolve and require linguistic expertise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Natural Language Processing is gaining a lot of attractions within the research community. The aim is to create machines that can replicate or exceed human language understanding. On another perspective, a lot of effort is invested to develop software applications to port research advances to industry. Another effort is also directed to facilitate researchers job by automating routine workflows, cleaning and preprocessing, for example. Some examples of this are huggingface, allennlp and flare. Most of these tools are designed to work for English or generalize the pipeline to work for other languages. Arabic, although it is not as popular as other languages tools, also has some contributions, but in the linguistics part only. Some promising examples are MADAMIRA (Pasha et al., 2014) , FARASA (Abdelali et al., 2016) , Adawat (Zerrouki, 2020) and CAMeL NLP (Obeid et al., 2020) . These tools address a large spectrum of NLP tasks for Arabic like segmentation, part of speech tagging, named entity recognition, diacritizatoin and grammatical analysis. However, most of these tools are not using the recent advances in NLP. Unfortunately, in the Arabic community, open source contribution is not widely accepted. This is due to the copyrights restrictions made by authors as some of these tools are not licensed for commercial use. Although, the source code can be delivered on demand, this mechanism is still not development friendly with unclear methodology and processes to version control and collaboration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 772, |
|
"end": 792, |
|
"text": "(Pasha et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 802, |
|
"end": 825, |
|
"text": "(Abdelali et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 866, |
|
"end": 886, |
|
"text": "(Obeid et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we introduce our contribution to the Arabic language open source community. We present a collection of models that can be utilized and improved to solve a wide variety of many Natural Language Processing tasks. Moreover, we introduce three libraries for scrapping, cleaning and tokenization. We also provide notebooks that can be easily used to replicate our experiments. We provide a flexible code design that can be implemented and extended to other languages. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We created ARBML in the hope of democratizing Arabic natural language processing by creating a set of demos as well as tools for making it easy to use for novice users, developers and researchers. We revise the NLP pipeline and make it suitable for Arabic as indicated in Figure 1 . We provide datasets, preprocessors, tokenizers and models. Furthermore, we host notebooks that can replicate our experiments and help users to understand how to do each task. In the next few sections we explain our main tools.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 280, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Design", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This is a repository that hosts a collections of data gathered from multiple websites. This data is collected using scrapy, a well-known python scraping library. This implementation comes as a mature result after a sequence of scraping efforts using low level python libraries like requests and beautifulsoup. The current available data is a collected from three giant Arabic poetry websites: aldiwan, poetry encyclopedia, and poets gate. We plan to scrape as many sources as possible on a given topic, poetry, news, or blogs for instance. We then group, do initial processing, and de-duplicate these data into an individual repositories to be easy to work on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnqeeb", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Tnkeeh is a library that is responsible for preprocessing datasets. It has four main procedures", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Cleaning: this module is used for cleaning datasets by removing diacritics, extra spaces, remove English characters and remove", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Tatweel -a character used for increasing the length of characters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Segmentation: we use FARASA (Abdelali et al., 2016) for segmenting texts into morphemes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 53, |
|
"text": "(Abdelali et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Normalization: Arabic letters can appear in different forms due to different Unicode's representing the same characters. We created a dictionary to map the same representations to their fixed characters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Data Splitting: we use a set of of procedures to split different types of datasets depending on the tasks to train on. For instance, we can split datasets if they are for unsupervised, classifications or parallel tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Reading: this module reads the different modes of datasets into python variables.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tnkeeh", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Tkseem is a tokenization library that implements multiple tokenization algorithms optimized for Arabic. We provide six categories of tokenizers with a simple interface.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Word Tokenizer: splits words based on white spaces.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Character Tokenizer: splits characters depending on their position on text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Sentencepiece Tokenizer: A wrapper for the sentencepiece library (Kudo and Richardson, 2018) . \u2022 Morphological Tokenizer: splits words based on morphology. This was trained using Madamira (Pasha et al., 2014 ) on a large Arabic news corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 94, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 209, |
|
"text": "(Pasha et al., 2014", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Random Tokenizer: tokenizes text based on random splitting of words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Disjoint Letter Tokenizer: splits based on letters that are not connected in Arabic script writing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "All these tokenizers extend a common Base class Tokenizer (Figure 2 ) that implements the main functionalities like encode, decode, tokenize, and detokenize (Table 2) . One useful function of these tokenizers is the ability to serialize them and load them on demand. This approach relaxes the time for training specially on large corpus. We also provide different methods for optimization like caching and memory-mapped files to speed up the tokenization process. Encodes and saves the model as numpy array These tokenizers are evaluated on three NLP tasks: sentiment analysis, Arabic meter poetry classification and neural machine translation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 67, |
|
"text": "(Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 166, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tkseem", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "This main module is responsible for storing and serving different datasets and models. The main purpose is to give a real time experience for different models that are related to Arabic and NLP. The main strategy is highlighted in Figure 3 . This procedure shows off our main approach for making the models easily accessible via different interfaces. We follow three main stages", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 239, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Preprocess Dataset: we collect and preprocess different datasets that are related to different tasks. Table 1 shows the main datasets that we collected. The datasets cover different areas like translation, sentiment analysis, poem classification, etc. using Keras with TensorFlow backend (Abadi et al., 2016) . We used Keras because it is straight forward to convert the models using TensorFlow.js. We use Google Colab for training our models with proper documentations in a tutorial-like procedure. We make all the model available in this repository.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 310, |
|
"text": "(Abadi et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Deployment: We make the models available in the browser using TensorFlow.js (Smilkov et al., 2019) . TensorFlow.js is part of the Ten-sorFlow ecosystem that supports training and inference of machine learning models in the browser. The main advantage is a deviceagnostic approach that makes all the models available on any device that has a browser. Moreover, the models are light and can run offline. The main motive is to make the models easily accessible via a simple interface like the browser. This makes it easier for users to test different models in a few clicks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 100, |
|
"text": "(Smilkov et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We make all the datasets and models available on our GitHub : https://github.com/ARBML/ARBML. The procedure we follow makes it easier for developers to contribute to our project. Moreover, our strategy is language-agnostic and encourages extending it to other languages. In Table 4 we compare ARBML against other tools in the literature.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 281, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "3 End-user Experience ARBML provides a solid contribution from two main perspectives:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Educational Perspective: People who wish to learn NLP will greatly benefit from ARBML. ARBML provides various training models with different techniques for both, command line and web interfaces using Keras and Tern-sorFlow.js. The pipeline from cleaning the dataset to model training and deployment is documented in details as Colab Notebooks. Additionally, users can test different models directly in the browser. For instance, we have a translation model where the users can enter a statement in Arabic and it will be translated in English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Development and Research: From development prospective a lot of tools like tnqeeb, tkseem and tnkheeh can be used in different projects related to NLP. Furthermore, developers can use our deployed models as prototype to test the possibility of implementing them in industry. Moreover researchers can use our pipeline to create new state-of-the-art models by using our models as a starting point. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models and Datasets", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Recently, many NLP tools have been developed but they only focus on English. In this work we showcased ARBML which is a set of tools that make Arabic NLP easily accessible through different interfaces. We target the NLP pipeline starting from scrapping datasets, preprocessing, tokenization to training and deployment. We focused on making the design of our tools language-agnostic and hence can be extended to many other languages, given we change the morphological aspects. We collected many datasets that can be easily used by researchers to develop new models. We also designed three libraries tnqeeb, tnkeeh and tkseem which can be easily utilized by developers to develop tools to support Arabic NLP. The tools utilize the morphological nature of Arabic to provide different functionalities that are unique for Arabic. We plan to add many other models and make them easily accessible through the browser. Mainly, our next step is to tackle more advanced models like transformers (Vaswani et al., 2017) . Furthermore, we want to apply different techniques like quantization and distillation to make the models available in the browser. Moreover, we would like to focus on light models like MobileBERT (Sun et al., 2020) , retrain it for Arabic and make it readily usable in the browser.", |
|
"cite_spans": [ |
|
{ |
|
"start": 985, |
|
"end": 1007, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1206, |
|
"end": 1224, |
|
"text": "(Sun et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future plans", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "ARBML is and open source project that will keep growing in the future. We would like to thank all developers who shared ideas, models and helped us address different issues.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Tensorflow: A system for large-scale machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Mart\u00edn", |
|
"middle": [], |
|
"last": "Abadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Barham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianmin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Devin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Ghemawat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Irving", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Isard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "12th {USENIX} symposium on operating systems design and implementation ({OSDI} 16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "265--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mart\u00edn Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. 2016. Tensorflow: A system for large-scale machine learning. In 12th {USENIX} symposium on operating systems design and implementation ({OSDI} 16), pages 265-283.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Farasa: A fast and furious segmenter for arabic", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed Abdelali, Kareem Darwish, Nadir Durrani, and Hamdy Mubarak. 2016. Farasa: A fast and furious segmenter for arabic. In Proceedings of the 2016 conference of the North American chapter of the as- sociation for computational linguistics: Demonstra- tions, pages 11-16.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Meter classification of arabic poems using deep bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Zaid", |
|
"middle": [], |
|
"last": "Maged S Al-Shaibani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irfan", |
|
"middle": [], |
|
"last": "Alyafeai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maged S Al-shaibani, Zaid Alyafeai, and Irfan Ahmad. 2020. Meter classification of arabic poems using deep bidirectional recurrent neural networks. Pat- tern Recognition Letters.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Shakkala, arabic text vocalization", |
|
"authors": [ |
|
{ |
|
"first": "Zerrouki", |
|
"middle": [], |
|
"last": "Barqawi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zerrouki Barqawi. 2017. Shakkala, arabic text vocal- ization.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Cnn for handwritten arabic digits recognition based on lenet-5", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "El-Sawy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "El-Bakry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Hazem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Loey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International conference on advanced intelligent systems and informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "566--575", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed El-Sawy, EL-Bakry Hazem, and Mohamed Loey. 2016. Cnn for handwritten arabic digits recog- nition based on lenet-5. In International confer- ence on advanced intelligent systems and informat- ics, pages 566-575. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Building large arabic multi-domain resources for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Hady", |
|
"middle": [], |
|
"last": "Elsahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samhaa R El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hady ElSahar and Samhaa R El-Beltagy. 2015. Build- ing large arabic multi-domain resources for senti- ment analysis. In International Conference on Intel- ligent Text Processing and Computational Linguis- tics, pages 23-34. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Arabic natural language processing: Challenges and solutions", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farghaly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khaled", |
|
"middle": [], |
|
"last": "Shaalan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACM Transactions on Asian Language Information Processing (TALIP)", |
|
"volume": "8", |
|
"issue": "4", |
|
"pages": "1--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Farghaly and Khaled Shaalan. 2009. Arabic natu- ral language processing: Challenges and solutions. ACM Transactions on Asian Language Information Processing (TALIP), 8(4):1-22.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Introduction to arabic natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Nizar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Synthesis Lectures on Human Language Technologies", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "1--187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nizar Y Habash. 2010. Introduction to arabic natural language processing. Synthesis Lectures on Human Language Technologies, 3(1):1-187.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.06226" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Microsoft coco: Common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "740--755", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European confer- ence on computer vision, pages 740-755. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The stanford corenlp natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Closky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of 52nd annual meeting of the association for computational linguistics: system demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The stanford corenlp natural language processing toolkit. In Proceedings of 52nd annual meeting of the association for computational linguis- tics: system demonstrations, pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Camel tools: An open source python toolkit for arabic natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Ossama", |
|
"middle": [], |
|
"last": "Obeid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nasser", |
|
"middle": [], |
|
"last": "Zalmout", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salam", |
|
"middle": [], |
|
"last": "Khalifa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Taji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mai", |
|
"middle": [], |
|
"last": "Oudah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bashar", |
|
"middle": [], |
|
"last": "Alhafni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Go", |
|
"middle": [], |
|
"last": "Inoue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fadhl", |
|
"middle": [], |
|
"last": "Eryani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Erdmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7022--7032", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ossama Obeid, Nasser Zalmout, Salam Khalifa, Dima Taji, Mai Oudah, Bashar Alhafni, Go Inoue, Fadhl Eryani, Alexander Erdmann, and Nizar Habash. 2020. Camel tools: An open source python toolkit for arabic natural language processing. In Proceed- ings of The 12th Language Resources and Evalua- tion Conference, pages 7022-7032.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Madamira: A fast, comprehensive tool for morphological analysis and disambiguation of arabic", |
|
"authors": [ |
|
{ |
|
"first": "Arfath", |
|
"middle": [], |
|
"last": "Pasha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Al-Badrashiny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Kholy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramy", |
|
"middle": [], |
|
"last": "Eskander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Pooleery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Owen", |
|
"middle": [], |
|
"last": "Rambow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Lrec", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1094--1101", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arfath Pasha, Mohamed Al-Badrashiny, Mona T Diab, Ahmed El Kholy, Ramy Eskander, Nizar Habash, Manoj Pooleery, Owen Rambow, and Ryan Roth. 2014. Madamira: A fast, comprehensive tool for morphological analysis and disambiguation of ara- bic. In Lrec, volume 14, pages 1094-1101.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Tensorflow. js: Machine learning for the web and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Smilkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Thorat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Assogba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Kreeger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kangyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanqing", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Nielsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Soergel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.05350" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Smilkov, Nikhil Thorat, Yannick Assogba, Ann Yuan, Nick Kreeger, Ping Yu, Kangyi Zhang, Shan- qing Cai, Eric Nielsen, David Soergel, et al. 2019. Tensorflow. js: Machine learning for the web and be- yond. arXiv preprint arXiv:1901.05350.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Aravec: A set of arabic word embedding models for use in arabic nlp", |
|
"authors": [ |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Abu Bakr Soliman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eissa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samhaa R El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Procedia Computer Science", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "256--265", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abu Bakr Soliman, Kareem Eissa, and Samhaa R El- Beltagy. 2017. Aravec: A set of arabic word embed- ding models for use in arabic nlp. Procedia Com- puter Science, 117:256-265.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Mobilebert: a compact task-agnostic bert for resource-limited devices", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqing", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongkun", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.02984" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. 2020. Mobilebert: a compact task-agnostic bert for resource-limited de- vices. arXiv preprint arXiv:2004.02984.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Towards an open platform for arabic language processing", |
|
"authors": [ |
|
{ |
|
"first": "Taha", |
|
"middle": [], |
|
"last": "Zerrouki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taha Zerrouki. 2020. Towards an open platform for arabic language processing.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "ARBML pipeline.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Base Tokenizer.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Models procedure.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Collected and preprocessed Datasets.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Tokenizer functions.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "Trained and deployed models.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"text": "Comparing ARBML against other Arabic NLP tools.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |