Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Vietnamese
Size:
10K - 100K
ArXiv:
""" | |
Script used to process UIT-ViQuAD 2.0. | |
Source: https://github.com/tuanbc88/ai_question_answering/tree/master/machine_reading_comprehension/02_datasets | |
""" | |
import os | |
import json | |
import pandas as pd | |
from itertools import groupby | |
from datasets import Dataset, DatasetDict, Features, Sequence, Value | |
def deduplicate_answers(answers): | |
answers_sorted = sorted(answers, key=lambda x: (x['text'], x['answer_start'])) | |
unique_answers = [next(group) for _, group in groupby(answers_sorted, key=lambda x: (x['text'], x['answer_start']))] | |
return unique_answers | |
data_dir = "UIT-ViQuAD 2.0" | |
dataset_dict = {} | |
features = Features({ | |
'id': Value('string'), | |
'uit_id': Value('string'), | |
'title': Value('string'), | |
'context': Value('string'), | |
'question': Value('string'), | |
'answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')}), | |
'is_impossible': Value('bool'), | |
'plausible_answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')}) | |
}) | |
for split in ["train", "dev", "test"]: | |
fname = os.path.join(data_dir, f"{split}.json") | |
data = json.load(open(fname)) | |
ids, uit_ids, titles, contexts, questions, all_answers, impossibles, all_plausible_answers = [], [], [], [], [], [], [], [] | |
title_i = 0 | |
print("-"*20, split, len(data["data"]), "-"*20) | |
for title_data in data["data"]: | |
title = title_data["title"] | |
ctx_i = 0 | |
title_i += 1 | |
for ctx_and_qs in title_data["paragraphs"]: | |
qas = ctx_and_qs["qas"] | |
context = ctx_and_qs["context"] | |
q_i = 0 | |
ctx_i += 1 | |
question_set = set() | |
for q in qas: | |
question = q["question"] | |
answers = q.get("answers", None) | |
plausible_answers = q.get("plausible_answers", None) | |
# Dedup answers | |
if answers: | |
answers = deduplicate_answers(answers) | |
if plausible_answers: | |
plausible_answers = deduplicate_answers(plausible_answers) | |
uit_id = q["id"] | |
is_impossible = q.get("is_impossible", False) | |
# Check duplicate questions | |
if question in question_set: | |
print("---Found duplicate question: ", question, "---") | |
print("Answer: ", answers) | |
print("Answer plaus: ", plausible_answers) | |
print("Impossible: ", is_impossible) | |
continue | |
q_i += 1 | |
overall_id = f"{title_i:04d}-{ctx_i:04d}-{q_i:04d}" | |
# Append data to lists | |
ids.append(overall_id) | |
uit_ids.append(uit_id) | |
titles.append(title) | |
contexts.append(context) | |
questions.append(question) | |
all_answers.append(answers) | |
impossibles.append(is_impossible) | |
all_plausible_answers.append(plausible_answers) | |
question_set.add(question) | |
# Convert to Dataset | |
dataset = Dataset.from_dict({ | |
'id': ids, | |
'uit_id': uit_ids, | |
'title': titles, | |
'context': contexts, | |
'question': questions, | |
'answers': all_answers, | |
'is_impossible': impossibles, | |
'plausible_answers': all_plausible_answers | |
}, features=features) | |
dataset_dict[split if split!="dev" else "validation"] = dataset | |
print(dataset_dict) | |
hf_dataset = DatasetDict(dataset_dict) | |
hf_name = "UIT-ViQuAD2.0" | |
hf_dataset.push_to_hub(f"taidng/{hf_name}") | |
print("Dataset uploaded successfully!") | |