Datasets:

Modalities:
Text
Size:
< 1K
Libraries:
Datasets
dibyaaaaax commited on
Commit
d302180
·
1 Parent(s): bdafc6b

Upload duc2001.py

Browse files
Files changed (1) hide show
  1. duc2001.py +142 -0
duc2001.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @inproceedings{10.5555/1620163.1620205,
7
+ author = {Wan, Xiaojun and Xiao, Jianguo},
8
+ title = {Single Document Keyphrase Extraction Using Neighborhood Knowledge},
9
+ year = {2008},
10
+ isbn = {9781577353683},
11
+ publisher = {AAAI Press},
12
+ booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 2},
13
+ pages = {855–860},
14
+ numpages = {6},
15
+ location = {Chicago, Illinois},
16
+ series = {AAAI'08}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+
22
+ """
23
+
24
+ _HOMEPAGE = ""
25
+
26
+ # TODO: Add the licence for the dataset here if you can find it
27
+ _LICENSE = ""
28
+
29
+ # TODO: Add link to the official dataset URLs here
30
+
31
+ _URLS = {
32
+ "test": "test.jsonl"
33
+ }
34
+
35
+
36
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
37
+ class DUC2001(datasets.GeneratorBasedBuilder):
38
+ """TODO: Short description of my dataset."""
39
+
40
+ VERSION = datasets.Version("0.0.1")
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(name="extraction", version=VERSION,
44
+ description="This part of my dataset covers extraction"),
45
+ datasets.BuilderConfig(name="generation", version=VERSION,
46
+ description="This part of my dataset covers generation"),
47
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
48
+ ]
49
+
50
+ DEFAULT_CONFIG_NAME = "extraction"
51
+
52
+ def _info(self):
53
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
54
+ features = datasets.Features(
55
+ {
56
+ "id": datasets.Value("int64"),
57
+ "document": datasets.features.Sequence(datasets.Value("string")),
58
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
59
+
60
+ }
61
+ )
62
+ elif self.config.name == "generation":
63
+ features = datasets.Features(
64
+ {
65
+ "id": datasets.Value("int64"),
66
+ "document": datasets.features.Sequence(datasets.Value("string")),
67
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
68
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
69
+
70
+ }
71
+ )
72
+ else:
73
+ features = datasets.Features(
74
+ {
75
+ "id": datasets.Value("int64"),
76
+ "document": datasets.features.Sequence(datasets.Value("string")),
77
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
78
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
79
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
80
+ "other_metadata": datasets.features.Sequence(
81
+ {
82
+ "text": datasets.features.Sequence(datasets.Value("string")),
83
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
84
+ }
85
+ )
86
+
87
+ }
88
+ )
89
+ return datasets.DatasetInfo(
90
+ # This is the description that will appear on the datasets page.
91
+ description=_DESCRIPTION,
92
+ # This defines the different columns of the dataset and their types
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ # License for the dataset if available
96
+ license=_LICENSE,
97
+ # Citation for the dataset
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+
103
+ data_dir = dl_manager.download_and_extract(_URLS)
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TEST,
107
+ # These kwargs will be passed to _generate_examples
108
+ gen_kwargs={
109
+ "filepath": data_dir['test'],
110
+ "split": "test"
111
+ },
112
+ ),
113
+ ]
114
+
115
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
116
+ def _generate_examples(self, filepath, split):
117
+ with open(filepath, encoding="utf-8") as f:
118
+ for key, row in enumerate(f):
119
+ data = json.loads(row)
120
+ if self.config.name == "extraction":
121
+ # Yields examples as (key, example) tuples
122
+ yield key, {
123
+ "id": data['paper_id'],
124
+ "document": data["document"],
125
+ "doc_bio_tags": data.get("doc_bio_tags")
126
+ }
127
+ elif self.config.name == "generation":
128
+ yield key, {
129
+ "id": data['paper_id'],
130
+ "document": data["document"],
131
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
132
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
133
+ }
134
+ else:
135
+ yield key, {
136
+ "id": data['paper_id'],
137
+ "document": data["document"],
138
+ "doc_bio_tags": data.get("doc_bio_tags"),
139
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
140
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
141
+ "other_metadata": data["other_metadata"]
142
+ }