File size: 3,649 Bytes
8480be6
 
 
 
 
 
 
 
484592a
8480be6
 
 
 
 
 
 
 
484592a
 
 
 
 
 
 
 
 
 
8480be6
 
 
 
 
484592a
 
 
8480be6
 
 
 
 
 
484592a
8480be6
 
 
 
484592a
8480be6
406f09a
 
8480be6
484592a
 
 
 
8480be6
484592a
8480be6
 
 
 
 
 
 
 
 
 
 
484592a
 
 
 
 
 
 
 
 
8480be6
484592a
8480be6
484592a
 
 
 
 
 
 
 
 
 
 
8480be6
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
Script used to process UIT-ViQuAD 2.0.
Source: https://github.com/tuanbc88/ai_question_answering/tree/master/machine_reading_comprehension/02_datasets
"""
import os
import json
import pandas as pd
from itertools import groupby
from datasets import Dataset, DatasetDict, Features, Sequence, Value

def deduplicate_answers(answers):
    answers_sorted = sorted(answers, key=lambda x: (x['text'], x['answer_start']))
    unique_answers = [next(group) for _, group in groupby(answers_sorted, key=lambda x: (x['text'], x['answer_start']))]
    return unique_answers

data_dir = "UIT-ViQuAD 2.0"
dataset_dict = {}
features = Features({
    'id': Value('string'),
    'uit_id': Value('string'),
    'title': Value('string'),
    'context': Value('string'),
    'question': Value('string'),
    'answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')}),
    'is_impossible': Value('bool'),
    'plausible_answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')})
})

for split in ["train", "dev", "test"]:
    fname = os.path.join(data_dir, f"{split}.json")
    data = json.load(open(fname))

    ids, uit_ids, titles, contexts, questions, all_answers, impossibles, all_plausible_answers = [], [], [], [], [], [], [], []
    title_i = 0
    print("-"*20, split, len(data["data"]), "-"*20)
    for title_data in data["data"]:
        title = title_data["title"]
        ctx_i = 0
        title_i += 1

        for ctx_and_qs in title_data["paragraphs"]:
            qas = ctx_and_qs["qas"]
            context = ctx_and_qs["context"]
            q_i = 0
            ctx_i += 1
            question_set = set()
            for q in qas:
                question = q["question"]
                answers = q.get("answers", None)
                plausible_answers = q.get("plausible_answers", None)
                # Dedup answers
                if answers:
                    answers = deduplicate_answers(answers)
                if plausible_answers:
                    plausible_answers = deduplicate_answers(plausible_answers)
                uit_id = q["id"]
                is_impossible = q.get("is_impossible", False)

                # Check duplicate questions
                if question in question_set:
                    print("---Found duplicate question: ", question, "---")
                    print("Answer: ", answers)
                    print("Answer plaus: ", plausible_answers)
                    print("Impossible: ", is_impossible)
                    continue

                q_i += 1
                overall_id = f"{title_i:04d}-{ctx_i:04d}-{q_i:04d}"
                # Append data to lists
                ids.append(overall_id)
                uit_ids.append(uit_id)
                titles.append(title)
                contexts.append(context)
                questions.append(question)
                all_answers.append(answers)
                impossibles.append(is_impossible)
                all_plausible_answers.append(plausible_answers)
                question_set.add(question)

    # Convert to Dataset
    dataset = Dataset.from_dict({
        'id': ids,
        'uit_id': uit_ids,
        'title': titles,
        'context': contexts,
        'question': questions,
        'answers': all_answers,
        'is_impossible': impossibles,
        'plausible_answers': all_plausible_answers
    }, features=features)
    dataset_dict[split if split!="dev" else "validation"] = dataset

print(dataset_dict)
hf_dataset = DatasetDict(dataset_dict)
hf_name = "UIT-ViQuAD2.0"
hf_dataset.push_to_hub(f"taidng/{hf_name}")
print("Dataset uploaded successfully!")