Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
79fd48c
·
1 Parent(s): bac06e7

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. ropes.py +30 -26
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
 
1
  ---
2
+ pretty_name: ROPES
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
ropes.py CHANGED
@@ -17,7 +17,6 @@ Code is heavily inspired from https://github.com/huggingface/datasets/blob/maste
17
 
18
 
19
  import json
20
- import os
21
 
22
  import datasets
23
 
@@ -84,51 +83,56 @@ class Ropes(datasets.GeneratorBasedBuilder):
84
 
85
  def _split_generators(self, dl_manager):
86
  """Returns SplitGenerators."""
87
- data_dir = dl_manager.download_and_extract(_URLs)
88
 
89
  return [
90
  datasets.SplitGenerator(
91
  name=datasets.Split.TRAIN,
92
  gen_kwargs={
93
- "filepath": os.path.join(data_dir["train+dev"], "ropes-train-dev-v1.0", "train-v1.0.json"),
94
  "split": "train",
 
95
  },
96
  ),
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TEST,
99
  gen_kwargs={
100
- "filepath": os.path.join(data_dir["test"], "ropes-test-questions-v1.0", "test-1.0.json"),
101
  "split": "test",
 
102
  },
103
  ),
104
  datasets.SplitGenerator(
105
  name=datasets.Split.VALIDATION,
106
  gen_kwargs={
107
- "filepath": os.path.join(data_dir["train+dev"], "ropes-train-dev-v1.0", "dev-v1.0.json"),
108
  "split": "dev",
 
109
  },
110
  ),
111
  ]
112
 
113
- def _generate_examples(self, filepath, split):
114
  """Yields examples."""
115
- with open(filepath, encoding="utf-8") as f:
116
- ropes = json.load(f)
117
- for article in ropes["data"]:
118
- for paragraph in article["paragraphs"]:
119
- background = paragraph["background"].strip()
120
- situation = paragraph["situation"].strip()
121
- for qa in paragraph["qas"]:
122
- question = qa["question"].strip()
123
- id_ = qa["id"]
124
- answers = [] if split == "test" else [answer["text"].strip() for answer in qa["answers"]]
125
-
126
- yield id_, {
127
- "background": background,
128
- "situation": situation,
129
- "question": question,
130
- "id": id_,
131
- "answers": {
132
- "text": answers,
133
- },
134
- }
 
 
 
17
 
18
 
19
  import json
 
20
 
21
  import datasets
22
 
 
83
 
84
  def _split_generators(self, dl_manager):
85
  """Returns SplitGenerators."""
86
+ archives = dl_manager.download(_URLs)
87
 
88
  return [
89
  datasets.SplitGenerator(
90
  name=datasets.Split.TRAIN,
91
  gen_kwargs={
92
+ "filepath": "/".join(["ropes-train-dev-v1.0", "train-v1.0.json"]),
93
  "split": "train",
94
+ "files": dl_manager.iter_archive(archives["train+dev"]),
95
  },
96
  ),
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TEST,
99
  gen_kwargs={
100
+ "filepath": "/".join(["ropes-test-questions-v1.0", "test-1.0.json"]),
101
  "split": "test",
102
+ "files": dl_manager.iter_archive(archives["test"]),
103
  },
104
  ),
105
  datasets.SplitGenerator(
106
  name=datasets.Split.VALIDATION,
107
  gen_kwargs={
108
+ "filepath": "/".join(["ropes-train-dev-v1.0", "dev-v1.0.json"]),
109
  "split": "dev",
110
+ "files": dl_manager.iter_archive(archives["train+dev"]),
111
  },
112
  ),
113
  ]
114
 
115
+ def _generate_examples(self, filepath, split, files):
116
  """Yields examples."""
117
+ for path, f in files:
118
+ if path == filepath:
119
+ ropes = json.loads(f.read().decode("utf-8"))
120
+ for article in ropes["data"]:
121
+ for paragraph in article["paragraphs"]:
122
+ background = paragraph["background"].strip()
123
+ situation = paragraph["situation"].strip()
124
+ for qa in paragraph["qas"]:
125
+ question = qa["question"].strip()
126
+ id_ = qa["id"]
127
+ answers = [] if split == "test" else [answer["text"].strip() for answer in qa["answers"]]
128
+
129
+ yield id_, {
130
+ "background": background,
131
+ "situation": situation,
132
+ "question": question,
133
+ "id": id_,
134
+ "answers": {
135
+ "text": answers,
136
+ },
137
+ }
138
+ break