File size: 3,894 Bytes
7bdce57
 
 
 
 
 
 
 
 
 
 
 
c10ec2b
7bdce57
 
 
 
 
 
 
 
 
 
 
 
c10ec2b
 
7bdce57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c10ec2b
7bdce57
c10ec2b
7bdce57
 
 
 
f64d039
7bdce57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# coding=utf-8

"""The Multilingual Amazon Reviews Corpus"""


import json

import datasets
from datasets.exceptions import DefunctDatasetError


_DESCRIPTION = """\
Please refer to https://huggingface.co/datasets/defunct-datasets/amazon_reviews_multi.
"""

_LANGUAGES = {
    "de": "German",
    "en": "English",
    "es": "Spanish",
    "fr": "French",
    "ja": "Japanese",
    "zh": "Chinese",
}
_ALL_LANGUAGES = "all_languages"
_VERSION = "1.0.0"
_HOMEPAGE_URL = "https://huggingface.co/datasets/defunct-datasets/amazon_reviews_multi"
_DOWNLOAD_URL = "https://huggingface.co/datasets/buruzaemon/amazon_reviews_multi/resolve/main/{lang}/{split}.jsonl.gz"


class AmazonReviewsMultiConfig(datasets.BuilderConfig):
    """BuilderConfig for AmazonReviewsMultiConfig."""

    def __init__(self, languages=None, **kwargs):
        super(AmazonReviewsMultiConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
        self.languages = languages


class AmazonReviewsMulti(datasets.GeneratorBasedBuilder):
    """The Multilingual Amazon Reviews Corpus"""

    BUILDER_CONFIGS = [
        AmazonReviewsMultiConfig(
            name=_ALL_LANGUAGES,
            languages=_LANGUAGES,
            description="A collection of Amazon reviews specifically designed to aid research in multilingual text classification.",
        )
    ] + [
        AmazonReviewsMultiConfig(
            name=lang,
            languages=[lang],
            description=f"{_LANGUAGES[lang]} examples from a collection of Amazon reviews specifically designed to aid research in multilingual text classification",
        )
        for lang in _LANGUAGES
    ]
    BUILDER_CONFIG_CLASS = AmazonReviewsMultiConfig
    DEFAULT_CONFIG_NAME = _ALL_LANGUAGES

    def _info(self):
        #raise DefunctDatasetError(
        #    "Dataset 'amazon_reviews_multi' is defunct and no longer accessible due to the decision of data providers"
        #)
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "review_id": datasets.Value("string"),
                    "product_id": datasets.Value("string"),
                    "reviewer_id": datasets.Value("string"),
                    "stars": datasets.Value("int32"),
                    "review_body": datasets.Value("string"),
                    "review_title": datasets.Value("string"),
                    "language": datasets.Value("string"),
                    "product_category": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            license=None,
            homepage=_HOMEPAGE_URL,
            citation=None,
        )

    def _split_generators(self, dl_manager):
        train_urls = [_DOWNLOAD_URL.format(split="train", lang=lang) for lang in self.config.languages]
        dev_urls = [_DOWNLOAD_URL.format(split="validation", lang=lang) for lang in self.config.languages]
        test_urls = [_DOWNLOAD_URL.format(split="test", lang=lang) for lang in self.config.languages]

        train_paths = dl_manager.download_and_extract(train_urls)
        dev_paths = dl_manager.download_and_extract(dev_urls)
        test_paths = dl_manager.download_and_extract(test_urls)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths}),
        ]

    def _generate_examples(self, file_paths):
        row_count = 0
        for file_path in file_paths:
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f:
                    yield row_count, json.loads(line)
                    row_count += 1