Ubuntu commited on
Commit
8d16a51
·
1 Parent(s): 56e2ab8

add sc2qa_commoncrawl.py

Browse files
Files changed (1) hide show
  1. sc2qa_commoncrawl.py +104 -0
sc2qa_commoncrawl.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """(SC)^2QA: Self-Contained Summary-Centric QA Dataset."""
18
+
19
+
20
+ import csv
21
+
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _CITATION = """\
29
+ @article{zhou2021generating,
30
+ author = {Li Zhou, Kevin Small, Yong Zhang, Sandeep Atluri},
31
+ title = "{Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning}",
32
+ conference = {The 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP 2021)},
33
+ year = 2021,
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ """
39
+
40
+ _URLS = {
41
+ "train":"https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl/resolve/main/train.csv",
42
+ "val":"https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl/resolve/main/val.csv",
43
+ "test":"https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl/resolve/main/test.csv",
44
+ }
45
+
46
+ class SC2QAConfig(datasets.BuilderConfig):
47
+ """BuilderConfig for (SC)^2QA."""
48
+
49
+ def __init__(self, **kwargs):
50
+ """BuilderConfig for (SC)^2QA.
51
+
52
+ Args:
53
+ **kwargs: keyword arguments forwarded to super.
54
+ """
55
+ super(SC2QAConfig, self).__init__(**kwargs)
56
+
57
+
58
+ class SC2QA(datasets.GeneratorBasedBuilder):
59
+ BUILDER_CONFIGS = [
60
+ SC2QAConfig(
61
+ name="plain_text",
62
+ version=datasets.Version("1.0.0", ""),
63
+ description="Plain text",
64
+ ),
65
+ ]
66
+
67
+ def _info(self):
68
+ # Should return a datasets.DatasetInfo object
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(
72
+ {
73
+ "question": datasets.Value("string"),
74
+ "article": datasets.Value("string"),
75
+ "summary": datasets.Value("string"),
76
+ "model source": datasets.Value("string"),
77
+ "length bucket": datasets.Value("int8"),
78
+ "url": datasets.Value("string"),
79
+ "qa classifier score": datasets.Value("float"),
80
+ }
81
+ ),
82
+ citation=_CITATION,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager):
86
+ downloaded_files = dl_manager.download_and_extract(_URLS)
87
+
88
+ return [
89
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
90
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
91
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], "split": "test"}),
92
+ ]
93
+
94
+ def _generate_examples(self, filepath, split=None):
95
+ """This function returns the examples in the raw (text) form."""
96
+ logger.info("generating examples from = %s", filepath)
97
+ key = 0
98
+ with open(filepath, encoding="ascii", errors='ignore') as f:
99
+ csv_reader = csv.DictReader(f)
100
+ for row in csv_reader:
101
+ if split == "test": # This avoids error when doing data type converation from empty string
102
+ row["length bucket"] = "0"
103
+ row["qa classifier score"] = "0.0"
104
+ yield row['url'], row