working loading script
Browse files
OK-VQA.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""OK-VQA loading script."""
|
15 |
+
|
16 |
+
|
17 |
+
import csv
|
18 |
+
import json
|
19 |
+
import os
|
20 |
+
from pathlib import Path
|
21 |
+
import datasets
|
22 |
+
|
23 |
+
|
24 |
+
_CITATION = """\
|
25 |
+
@article{DBLP:journals/corr/abs-1906-00067,
|
26 |
+
author = {Kenneth Marino and
|
27 |
+
Mohammad Rastegari and
|
28 |
+
Ali Farhadi and
|
29 |
+
Roozbeh Mottaghi},
|
30 |
+
title = {{OK-VQA:} {A} Visual Question Answering Benchmark Requiring External
|
31 |
+
Knowledge},
|
32 |
+
journal = {CoRR},
|
33 |
+
volume = {abs/1906.00067},
|
34 |
+
year = {2019},
|
35 |
+
url = {http://arxiv.org/abs/1906.00067},
|
36 |
+
eprinttype = {arXiv},
|
37 |
+
eprint = {1906.00067},
|
38 |
+
timestamp = {Thu, 13 Jun 2019 13:36:00 +0200},
|
39 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1906-00067.bib},
|
40 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
|
44 |
+
|
45 |
+
_DESCRIPTION = """\
|
46 |
+
OK-VQA is a new dataset for visual question answering that requires methods which can draw upon outside knowledge to answer questions.
|
47 |
+
- 14,055 open-ended questions
|
48 |
+
- 5 ground truth answers per question
|
49 |
+
- Manually filtered to ensure all questions require outside knowledge (e.g. from Wikipeida)
|
50 |
+
- Reduced questions with most common answers to reduce dataset bias
|
51 |
+
"""
|
52 |
+
|
53 |
+
|
54 |
+
_HOMEPAGE = "https://okvqa.allenai.org/"
|
55 |
+
|
56 |
+
# TODO: Add the licence for the dataset here if you can find it
|
57 |
+
_LICENSE = "CC BY 4.0" # found in the zip files bellow - we show maybe ask for confirmation
|
58 |
+
|
59 |
+
|
60 |
+
_URLS = {
|
61 |
+
"annotations": {
|
62 |
+
"train": "https://okvqa.allenai.org/static/data/mscoco_train2014_annotations.json.zip",
|
63 |
+
"val": "https://okvqa.allenai.org/static/data/mscoco_val2014_annotations.json.zip",
|
64 |
+
},
|
65 |
+
"questions": {
|
66 |
+
"train": "https://okvqa.allenai.org/static/data/OpenEnded_mscoco_train2014_questions.json.zip",
|
67 |
+
"val": "https://okvqa.allenai.org/static/data/OpenEnded_mscoco_val2014_questions.json.zip",
|
68 |
+
},
|
69 |
+
"images": {
|
70 |
+
"train": "http://images.cocodataset.org/zips/train2014.zip",
|
71 |
+
"val": "http://images.cocodataset.org/zips/val2014.zip",
|
72 |
+
},
|
73 |
+
}
|
74 |
+
|
75 |
+
|
76 |
+
class OKVQADataset(datasets.GeneratorBasedBuilder):
|
77 |
+
|
78 |
+
VERSION = datasets.Version("1.0.0")
|
79 |
+
|
80 |
+
def _info(self):
|
81 |
+
features = datasets.Features(
|
82 |
+
{
|
83 |
+
"image": datasets.Image(),
|
84 |
+
"question_type": datasets.Value('string'),
|
85 |
+
"confidence": datasets.Value('int32'),
|
86 |
+
"answers": [{
|
87 |
+
"answer": datasets.Value('string'),
|
88 |
+
"raw_answer": datasets.Value('string'),
|
89 |
+
"answer_confidence": datasets.Value('string'),
|
90 |
+
"answer_id": datasets.Value('int64'),
|
91 |
+
}],
|
92 |
+
"image_id": datasets.Value('int64'),
|
93 |
+
"answer_type": datasets.Value('string'),
|
94 |
+
"question_id": datasets.Value('int64'),
|
95 |
+
"question": datasets.Value('string'),
|
96 |
+
}
|
97 |
+
)
|
98 |
+
return datasets.DatasetInfo(
|
99 |
+
description=_DESCRIPTION,
|
100 |
+
features=features,
|
101 |
+
homepage=_HOMEPAGE,
|
102 |
+
license=_LICENSE,
|
103 |
+
citation=_CITATION,
|
104 |
+
)
|
105 |
+
|
106 |
+
def _split_generators(self, dl_manager):
|
107 |
+
# urls = _URLS[self.config.name] # TODO later
|
108 |
+
data_dir = dl_manager.download_and_extract(_URLS)
|
109 |
+
gen_kwargs = {}
|
110 |
+
for split_name in ["train", "val"]:
|
111 |
+
gen_kwargs_per_split = {}
|
112 |
+
for dir_name in _URLS.keys():
|
113 |
+
if split_name in data_dir[dir_name]:
|
114 |
+
file_name = Path(_URLS[dir_name][split_name]).name[: -len(".zip")]
|
115 |
+
path = Path(data_dir[dir_name][split_name]) / file_name
|
116 |
+
gen_kwargs_per_split[f"{dir_name}_path"] = path
|
117 |
+
else:
|
118 |
+
gen_kwargs_per_split[f"{dir_name}_path"] = None
|
119 |
+
gen_kwargs[split_name] = gen_kwargs_per_split
|
120 |
+
|
121 |
+
return [
|
122 |
+
datasets.SplitGenerator(
|
123 |
+
name=datasets.Split.TRAIN,
|
124 |
+
gen_kwargs=gen_kwargs["train"],
|
125 |
+
),
|
126 |
+
datasets.SplitGenerator(
|
127 |
+
name=datasets.Split.VALIDATION,
|
128 |
+
gen_kwargs=gen_kwargs["val"],
|
129 |
+
),
|
130 |
+
]
|
131 |
+
|
132 |
+
def _generate_examples(self, questions_path, annotations_path, images_path):
|
133 |
+
dataset = json.load(open(annotations_path, "r"))
|
134 |
+
questions = json.load(open(questions_path, "r"))
|
135 |
+
|
136 |
+
qa = {ann["question_id"]: [] for ann in dataset["annotations"]}
|
137 |
+
for ann in dataset["annotations"]:
|
138 |
+
qa[ann["question_id"]] = ann
|
139 |
+
|
140 |
+
for question in questions["questions"]:
|
141 |
+
annotation = qa[question["question_id"]]
|
142 |
+
# some checks
|
143 |
+
assert len(set(question.keys()) ^ {"image_id", "question", "question_id"}) == 0
|
144 |
+
assert (
|
145 |
+
len(
|
146 |
+
set(annotation.keys())
|
147 |
+
^ {
|
148 |
+
"question_type",
|
149 |
+
"confidence",
|
150 |
+
"answers",
|
151 |
+
"image_id",
|
152 |
+
"answer_type",
|
153 |
+
"question_id",
|
154 |
+
}
|
155 |
+
)
|
156 |
+
== 0
|
157 |
+
)
|
158 |
+
# build record
|
159 |
+
record = question
|
160 |
+
record.update(annotation)
|
161 |
+
record["image"] = str(images_path / f"COCO_{images_path.name}_{record['image_id']:0>12}.jpg")
|
162 |
+
yield question["question_id"], record
|