arka0821 commited on
Commit
4f7eb0e
·
1 Parent(s): a9da0a1

Create multi_document_summarization.py

Browse files
Files changed (1) hide show
  1. multi_document_summarization.py +102 -0
multi_document_summarization.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Multi-Document Dataset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """
26
+ @article{lu2020multi,
27
+ title={Multi-Document: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles},
28
+ author={Arka Das, India},
29
+ journal={arXiv preprint arXiv:2010.14235},
30
+ year={2022}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """
35
+ Multi-Document, a large-scale multi-document summarization dataset created from scientific articles. Multi-Document introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references.
36
+ """
37
+
38
+ _URL_TRAIN = "https://github.com/arka0821/multi_document_summarization/data/train.json.gz"
39
+ _URL_TEST = "https://github.com/arka0821/multi_document_summarization/data/test.json.gz"
40
+ _URL_VAL = "https://github.com/arka0821/multi_document_summarization/data/val.json.gz"
41
+
42
+
43
+ class MultiDocumentSum(datasets.GeneratorBasedBuilder):
44
+ """ "Multi-Document Dataset."""
45
+
46
+ VERSION = datasets.Version("1.1.0")
47
+
48
+ def _info(selif):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "id": datasets.Value("string"),
54
+ "docs": datasets.Sequence(
55
+ {
56
+ "id": datasets.Value("string"),
57
+ "text": datasets.Value("string")
58
+ },
59
+ ),
60
+ "summary": datasets.Value("string"),
61
+ }
62
+ ),
63
+ supervised_keys=None,
64
+ homepage="https://github.com/arka0821/multi_document_summarization",
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+ train_path = dl_manager.download_and_extract(_URL_TRAIN)
71
+ test_path = dl_manager.download_and_extract(_URL_TEST)
72
+ val_path = dl_manager.download_and_extract(_URL_VAL)
73
+
74
+ return [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ gen_kwargs={"path": train_path},
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TEST,
81
+ gen_kwargs={"path": test_path},
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ gen_kwargs={"path": val_path},
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, path=None):
90
+ """Yields examples."""
91
+ with open(path, encoding="utf-8") as f:
92
+ data = json.load(f)
93
+ f.close()
94
+
95
+ for idx, el in enumerate(data):
96
+ cite_n = list(el["ref_abstract"].keys())
97
+ cite_n_mid = [el["ref_abstract"][cite]["mid"] for cite in cite_n]
98
+ cite_n_abstract = [el["ref_abstract"][cite]["abstract"] for cite in cite_n]
99
+ tmp = {"cite_N": cite_n, "mid": cite_n_mid, "abstract": cite_n_abstract}
100
+ d = el.copy()
101
+ d["summary"] = tmp
102
+ yield idx, d