taidng commited on
Commit
484592a
·
1 Parent(s): 8953168

update README and process script

Browse files
Files changed (2) hide show
  1. README.md +16 -2
  2. process_viquad.py +44 -24
README.md CHANGED
@@ -71,7 +71,21 @@ The original UIT-ViQuAD contains over 23,000 QA pairs based on 174 Vietnamese Wi
71
 
72
  The dataset has been processed to remove a few duplicated questions and answers.
73
 
74
- Questions about the private test set or the dataset should be directed to the authors.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  ### Languages
77
 
@@ -131,4 +145,4 @@ Shared task where version 2.0 was published:
131
 
132
  ### Acknowledgements
133
 
134
- We thank the authors of ViQuAD for releasing this dataset to the community.
 
71
 
72
  The dataset has been processed to remove a few duplicated questions and answers.
73
 
74
+ Version 2.0 contains the fields `is_impossible` and `plausible`, which the authors [explained](https://vlsp.org.vn/vlsp2021/eval/mrc) in the shared task announcement:
75
+
76
+ ```
77
+ Context: Khác với nhiều ngôn ngữ Ấn-Âu khác, tiếng Anh đã gần như loại bỏ hệ thống biến tố dựa trên cách để thay bằng cấu trúc phân tích. Đại từ nhân xưng duy trì hệ thống cách hoàn chỉnh hơn những lớp từ khác. Tiếng Anh có bảy lớp từ chính: động từ, danh từ, tính từ, trạng từ, hạn định từ (tức mạo từ), giới từ, và liên từ. Có thể tách đại từ khỏi danh từ, và thêm vào thán từ.
78
+
79
+ question: Tiếng Anh có bao nhiêu loại từ?
80
+ is_impossible: False. // There exists an answer to the question.
81
+ answer: bảy.
82
+
83
+ question: Ngôn ngữ Ấn-Âu có bao nhiêu loại từ?
84
+ is_impossible: True. // There are no correct answers extracted from the Context.
85
+ plausible_answer: bảy. // A plausible but incorrect answer extracted from the Context has the same type which the question aims to.
86
+ ```
87
+
88
+ Specific questions about the test set or the dataset should be directed to the [authors](https://nlp.uit.edu.vn/datasets).
89
 
90
  ### Languages
91
 
 
145
 
146
  ### Acknowledgements
147
 
148
+ We thank the authors of ViQuAD and VLSP for releasing this dataset to the community.
process_viquad.py CHANGED
@@ -6,7 +6,7 @@ import os
6
  import json
7
  import pandas as pd
8
  from itertools import groupby
9
- from datasets import Dataset, DatasetDict
10
 
11
  def deduplicate_answers(answers):
12
  answers_sorted = sorted(answers, key=lambda x: (x['text'], x['answer_start']))
@@ -15,35 +15,46 @@ def deduplicate_answers(answers):
15
 
16
  data_dir = "UIT-ViQuAD 2.0"
17
  dataset_dict = {}
 
 
 
 
 
 
 
 
 
 
18
 
19
  for split in ["train", "dev", "test"]:
20
  fname = os.path.join(data_dir, f"{split}.json")
21
  data = json.load(open(fname))
22
- rows = []
23
- title_i = 0
24
 
 
 
 
25
  for title_data in data["data"]:
26
  title = title_data["title"]
27
  ctx_i = 0
28
  title_i += 1
29
 
30
  for ctx_and_qs in title_data["paragraphs"]:
31
- questions = ctx_and_qs["qas"]
32
  context = ctx_and_qs["context"]
33
  q_i = 0
34
  ctx_i += 1
35
  question_set = set()
36
- # define default wherever answer is empty
37
- answer_default: list = [{'answer_start': -1, 'text': ''}]
38
- for q in questions:
39
  question = q["question"]
40
- answers = q["answers"] if "answers" in q else answer_default
41
- plausible_answers = q["plausible_answers"] if "plausible_answers" in q else answer_default
42
  # Dedup answers
43
- answers = deduplicate_answers(answers)
44
- plausible_answers = deduplicate_answers(plausible_answers)
 
 
45
  uit_id = q["id"]
46
- is_impossible = q["is_impossible"] if "is_impossible" in q else False
47
 
48
  # Check duplicate questions
49
  if question in question_set:
@@ -55,20 +66,29 @@ for split in ["train", "dev", "test"]:
55
 
56
  q_i += 1
57
  overall_id = f"{title_i:04d}-{ctx_i:04d}-{q_i:04d}"
58
- rows.append({
59
- "id": overall_id,
60
- "uit_id": uit_id,
61
- "title": title,
62
- "context": context,
63
- "question": question,
64
- "answers": answers,
65
- "is_impossible": is_impossible,
66
- "plausible_answers": plausible_answers
67
- })
68
  question_set.add(question)
 
69
  # Convert to Dataset
70
- df = pd.DataFrame(rows)
71
- dataset_dict[split if split!="dev" else "validation"] = Dataset.from_pandas(df)
 
 
 
 
 
 
 
 
 
72
 
73
  print(dataset_dict)
74
  hf_dataset = DatasetDict(dataset_dict)
 
6
  import json
7
  import pandas as pd
8
  from itertools import groupby
9
+ from datasets import Dataset, DatasetDict, Features, Sequence, Value
10
 
11
  def deduplicate_answers(answers):
12
  answers_sorted = sorted(answers, key=lambda x: (x['text'], x['answer_start']))
 
15
 
16
  data_dir = "UIT-ViQuAD 2.0"
17
  dataset_dict = {}
18
+ features = Features({
19
+ 'id': Value('string'),
20
+ 'uit_id': Value('string'),
21
+ 'title': Value('string'),
22
+ 'context': Value('string'),
23
+ 'question': Value('string'),
24
+ 'answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')}),
25
+ 'is_impossible': Value('bool'),
26
+ 'plausible_answers': Sequence(feature={'text': Value('string'), 'answer_start': Value('int32')})
27
+ })
28
 
29
  for split in ["train", "dev", "test"]:
30
  fname = os.path.join(data_dir, f"{split}.json")
31
  data = json.load(open(fname))
 
 
32
 
33
+ ids, uit_ids, titles, contexts, questions, all_answers, impossibles, all_plausible_answers = [], [], [], [], [], [], [], []
34
+ title_i = 0
35
+ print("-"*20, split, len(data["data"]), "-"*20)
36
  for title_data in data["data"]:
37
  title = title_data["title"]
38
  ctx_i = 0
39
  title_i += 1
40
 
41
  for ctx_and_qs in title_data["paragraphs"]:
42
+ qas = ctx_and_qs["qas"]
43
  context = ctx_and_qs["context"]
44
  q_i = 0
45
  ctx_i += 1
46
  question_set = set()
47
+ for q in qas:
 
 
48
  question = q["question"]
49
+ answers = q.get("answers", [])
50
+ plausible_answers = q.get("plausible_answers", [])
51
  # Dedup answers
52
+ if answers:
53
+ answers = deduplicate_answers(answers)
54
+ if plausible_answers:
55
+ plausible_answers = deduplicate_answers(plausible_answers)
56
  uit_id = q["id"]
57
+ is_impossible = q.get("is_impossible", False)
58
 
59
  # Check duplicate questions
60
  if question in question_set:
 
66
 
67
  q_i += 1
68
  overall_id = f"{title_i:04d}-{ctx_i:04d}-{q_i:04d}"
69
+ # Append data to lists
70
+ ids.append(overall_id)
71
+ uit_ids.append(uit_id)
72
+ titles.append(title)
73
+ contexts.append(context)
74
+ questions.append(question)
75
+ all_answers.append(answers)
76
+ impossibles.append(is_impossible)
77
+ all_plausible_answers.append(plausible_answers)
 
78
  question_set.add(question)
79
+
80
  # Convert to Dataset
81
+ dataset = Dataset.from_dict({
82
+ 'id': ids,
83
+ 'uit_id': uit_ids,
84
+ 'title': titles,
85
+ 'context': contexts,
86
+ 'question': questions,
87
+ 'answers': all_answers,
88
+ 'is_impossible': impossibles,
89
+ 'plausible_answers': all_plausible_answers
90
+ }, features=features)
91
+ dataset_dict[split if split!="dev" else "validation"] = dataset
92
 
93
  print(dataset_dict)
94
  hf_dataset = DatasetDict(dataset_dict)