Datasets:
drt
/

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
parquet-converter commited on
Commit
107a56e
·
1 Parent(s): 4cd4341

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,53 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
35
- # Audio files - uncompressed
36
- *.pcm filter=lfs diff=lfs merge=lfs -text
37
- *.sam filter=lfs diff=lfs merge=lfs -text
38
- *.raw filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - compressed
40
- *.aac filter=lfs diff=lfs merge=lfs -text
41
- *.flac filter=lfs diff=lfs merge=lfs -text
42
- *.mp3 filter=lfs diff=lfs merge=lfs -text
43
- *.ogg filter=lfs diff=lfs merge=lfs -text
44
- *.wav filter=lfs diff=lfs merge=lfs -text
45
- # Image files - uncompressed
46
- *.bmp filter=lfs diff=lfs merge=lfs -text
47
- *.gif filter=lfs diff=lfs merge=lfs -text
48
- *.png filter=lfs diff=lfs merge=lfs -text
49
- *.tiff filter=lfs diff=lfs merge=lfs -text
50
- # Image files - compressed
51
- *.jpg filter=lfs diff=lfs merge=lfs -text
52
- *.jpeg filter=lfs diff=lfs merge=lfs -text
53
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,4 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- source: https://github.com/KGQA/KGQA-datasets
4
- ---
 
 
 
 
 
complex_web_questions.py DELETED
@@ -1,144 +0,0 @@
1
- """ComplexWebQuestions: A Dataset for Answering Complex Questions that Require Reasoning over Multiple Web Snippets."""
2
-
3
- import json
4
- import os
5
-
6
- import datasets
7
-
8
- logger = datasets.logging.get_logger(__name__)
9
-
10
- _CITATION = """\
11
- @inproceedings{Talmor2018TheWA,
12
- title={The Web as a Knowledge-Base for Answering Complex Questions},
13
- author={Alon Talmor and Jonathan Berant},
14
- booktitle={NAACL},
15
- year={2018}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- ComplexWebQuestions is a dataset for answering complex questions that require reasoning over multiple web snippets. It contains a large set of complex questions in natural language, and can be used in multiple ways: 1) By interacting with a search engine, which is the focus of our paper (Talmor and Berant, 2018); 2) As a reading comprehension task: we release 12,725,989 web snippets that are relevant for the questions, and were collected during the development of our model; 3) As a semantic parsing task: each question is paired with a SPARQL query that can be executed against Freebase to retrieve the answer.
21
- """
22
-
23
- _URL = "https://allenai.org/data/complexwebquestions"
24
- _COMPLEXWEBQUESTIONS_URLS = {
25
- "train": "https://www.dropbox.com/sh/7pkwkrfnwqhsnpo/AAAIHeWX0cPpbpwK6w06BCxva/ComplexWebQuestions_train.json?dl=1",
26
- "dev": "https://www.dropbox.com/sh/7pkwkrfnwqhsnpo/AADH8beLbOUWxwvY_K38E3ADa/ComplexWebQuestions_dev.json?dl=1",
27
- "test": "https://www.dropbox.com/sh/7pkwkrfnwqhsnpo/AABr4ysSy_Tg8Wfxww4i_UWda/ComplexWebQuestions_test.json?dl=1"
28
- }
29
-
30
- class ComplexWebQuestionsConfig(datasets.BuilderConfig):
31
- """BuilderConfig for ComplexWebQuestions"""
32
- def __init__(self,
33
- data_url,
34
- data_dir,
35
- **kwargs):
36
- """BuilderConfig for ComplexWebQuestions.
37
- Args:
38
- **kwargs: keyword arguments forwarded to super.
39
- """
40
- super(ComplexWebQuestionsConfig, self).__init__(**kwargs)
41
- self.data_url = data_url
42
- self.data_dir = data_dir
43
-
44
- class ComplexWebQuestions(datasets.GeneratorBasedBuilder):
45
- """ComplexWebQuestions: A Dataset for Answering Complex Questions that Require Reasoning over Multiple Web Snippets."""
46
- BUILDER_CONFIGS = [
47
- ComplexWebQuestionsConfig(
48
- name="complex_web_questions",
49
- description="ComplexWebQuestions",
50
- data_url="",
51
- data_dir="ComplexWebQuestions"
52
- ),
53
- ComplexWebQuestionsConfig(
54
- name="complexwebquestions_test",
55
- description="ComplexWebQuestions",
56
- data_url="",
57
- data_dir="ComplexWebQuestions"
58
- )
59
- ]
60
-
61
- def _info(self):
62
- features = datasets.Features(
63
- {
64
- "ID": datasets.Value("string"),
65
- "answers": datasets.features.Sequence(
66
- datasets.Features(
67
- {
68
- "aliases": datasets.features.Sequence(
69
- datasets.Value("string")
70
- ),
71
- "answer": datasets.Value("string"),
72
- "answer_id": datasets.Value("string")
73
- }
74
- )
75
- ),
76
- "composition_answer": datasets.Value("string"),
77
- "compositionality_type": datasets.Value("string"),
78
- "created": datasets.Value("string"),
79
- "machine_question": datasets.Value("string"),
80
- "question": datasets.Value("string"),
81
- "sparql": datasets.Value("string"),
82
- "webqsp_ID": datasets.Value("string"),
83
- "webqsp_question": datasets.Value("string")
84
- }
85
- )
86
-
87
- if self.config.name == "complexwebquestions_test":
88
- features.pop("answers", None)
89
- features.pop("composition_answer", None)
90
-
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- supervised_keys=None,
94
- homepage=_URL,
95
- citation=_CITATION,
96
- features=features
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- data_dir = None
101
- if self.config.name == "complexwebquestions_test":
102
- complexwebquestions_test_files = dl_manager.download(
103
- {
104
- "test": _COMPLEXWEBQUESTIONS_URLS["test"],
105
- }
106
- )
107
- return [
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TEST,
110
- gen_kwargs={
111
- "data_file": os.path.join(data_dir or "", complexwebquestions_test_files["test"]),
112
- "split": "test"
113
- }
114
- )
115
- ]
116
- else:
117
- complexwebquestions_files = dl_manager.download(
118
- {
119
- "train": _COMPLEXWEBQUESTIONS_URLS["train"],
120
- "dev": _COMPLEXWEBQUESTIONS_URLS["dev"]
121
- }
122
- )
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- gen_kwargs={
127
- "data_file": os.path.join(data_dir or "", complexwebquestions_files["train"]),
128
- "split": "train"
129
- }
130
- ),
131
- datasets.SplitGenerator(
132
- name=datasets.Split.VALIDATION,
133
- gen_kwargs={
134
- "data_file": os.path.join(data_dir or "", complexwebquestions_files["dev"]),
135
- "split": "validation"
136
- }
137
- )
138
- ]
139
-
140
- def _generate_examples(self, data_file, **kwargs):
141
- with open(data_file, encoding="utf8") as f:
142
- complexwebquestions = json.load(f)
143
- for idx, question in enumerate(complexwebquestions):
144
- yield idx, question
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
complex_web_questions/complex_web_questions-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7065067766733211e6118f8c7ab424cb40ea97bdda0f8e0e12fc282db74fc473
3
+ size 9014668
complex_web_questions/complex_web_questions-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed06aeb3d6f1bf50ece813a85ccb62c0e956e06afe599d30633628e2d155e8dd
3
+ size 967609
complexwebquestions_test/complex_web_questions-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ce323e0740a1572d849dd65d2c4047d369e2071cbdd69b8a2d42e27e9bc750
3
+ size 749444