matteogabburo commited on
Commit
8983693
·
verified ·
1 Parent(s): 0404d07

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +172 -1
README.md CHANGED
@@ -15,6 +15,168 @@ pretty_name: mWikiQA
15
  size_categories:
16
  - 100K<n<1M
17
  configs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  - config_name: en
19
  data_files:
20
  - split: train
@@ -185,8 +347,17 @@ To use these splits, you can use the following snippet of code replacing ``[LANG
185
  ```
186
  from datasets import load_dataset
187
 
 
 
 
 
 
 
 
 
 
188
  """
189
- if you want the default splits, replace [LANG] with an identifier in: en, fr, de, it, po, sp
190
  dataset = load_dataset("matteogabburo/mWikiQA", "[LANG]")
191
  """
192
  # example:
 
15
  size_categories:
16
  - 100K<n<1M
17
  configs:
18
+ - config_name: default
19
+ data_files:
20
+ - split: train_en
21
+ path: "eng-train.jsonl"
22
+ - split: train_de
23
+ path: "deu-train.jsonl"
24
+ - split: train_fr
25
+ path: "fra-train.jsonl"
26
+ - split: train_it
27
+ path: "ita-train.jsonl"
28
+ - split: train_po
29
+ path: "por-train.jsonl"
30
+ - split: train_sp
31
+ path: "spa-train.jsonl"
32
+ - split: validation_en
33
+ path: "eng-dev.jsonl"
34
+ - split: validation_de
35
+ path: "deu-dev.jsonl"
36
+ - split: validation_fr
37
+ path: "fra-dev.jsonl"
38
+ - split: validation_it
39
+ path: "ita-dev.jsonl"
40
+ - split: validation_po
41
+ path: "por-dev.jsonl"
42
+ - split: validation_sp
43
+ path: "spa-dev.jsonl"
44
+ - split: test_en
45
+ path: "eng-test.jsonl"
46
+ - split: test_de
47
+ path: "deu-test.jsonl"
48
+ - split: test_fr
49
+ path: "fra-test.jsonl"
50
+ - split: test_it
51
+ path: "ita-test.jsonl"
52
+ - split: test_po
53
+ path: "por-test.jsonl"
54
+ - split: test_sp
55
+ path: "spa-test.jsonl"
56
+ - split: validation_clean_en
57
+ path: "eng-dev_clean.jsonl"
58
+ - split: validation_clean_de
59
+ path: "deu-dev_clean.jsonl"
60
+ - split: validation_clean_fr
61
+ path: "fra-dev_clean.jsonl"
62
+ - split: validation_clean_it
63
+ path: "ita-dev_clean.jsonl"
64
+ - split: validation_clean_po
65
+ path: "por-dev_clean.jsonl"
66
+ - split: validation_clean_sp
67
+ path: "spa-dev_clean.jsonl"
68
+ - split: test_clean_en
69
+ path: "eng-test_clean.jsonl"
70
+ - split: test_clean_de
71
+ path: "deu-test_clean.jsonl"
72
+ - split: test_clean_fr
73
+ path: "fra-test_clean.jsonl"
74
+ - split: test_cleanit
75
+ path: "ita-test_clean.jsonl"
76
+ - split: test_clean_po
77
+ path: "por-test_clean.jsonl"
78
+ - split: test_clean_sp
79
+ path: "spa-test_clean.jsonl"
80
+ - split: validation_++_en
81
+ path: "eng-dev_no_allneg.jsonl"
82
+ - split: validation_++_de
83
+ path: "deu-dev_no_allneg.jsonl"
84
+ - split: validation_++_fr
85
+ path: "fra-dev_no_allneg.jsonl"
86
+ - split: validation_++_it
87
+ path: "ita-dev_no_allneg.jsonl"
88
+ - split: validation_++_po
89
+ path: "por-dev_no_allneg.jsonl"
90
+ - split: validation_++_sp
91
+ path: "spa-dev_no_allneg.jsonl"
92
+ - split: test_++_en
93
+ path: "eng-test_no_allneg.jsonl"
94
+ - split: test_++_de
95
+ path: "deu-test_no_allneg.jsonl"
96
+ - split: test_++_fr
97
+ path: "fra-test_no_allneg.jsonl"
98
+ - split: test_++_it
99
+ path: "ita-test_no_allneg.jsonl"
100
+ - split: test_++_po
101
+ path: "por-test_no_allneg.jsonl"
102
+ - split: test_++_sp
103
+ path: "spa-test_no_allneg.jsonl"
104
+ - config_name: clean
105
+ data_files:
106
+ - split: train_en
107
+ path: "eng-train.jsonl"
108
+ - split: train_de
109
+ path: "deu-train.jsonl"
110
+ - split: train_fr
111
+ path: "fra-train.jsonl"
112
+ - split: train_it
113
+ path: "ita-train.jsonl"
114
+ - split: train_po
115
+ path: "por-train.jsonl"
116
+ - split: train_sp
117
+ path: "spa-train.jsonl"
118
+ - split: validation_clean_en
119
+ path: "eng-dev_clean.jsonl"
120
+ - split: validation_clean_de
121
+ path: "deu-dev_clean.jsonl"
122
+ - split: validation_clean_fr
123
+ path: "fra-dev_clean.jsonl"
124
+ - split: validation_clean_it
125
+ path: "ita-dev_clean.jsonl"
126
+ - split: validation_clean_po
127
+ path: "por-dev_clean.jsonl"
128
+ - split: validation_clean_sp
129
+ path: "spa-dev_clean.jsonl"
130
+ - split: test_clean_en
131
+ path: "eng-test_clean.jsonl"
132
+ - split: test_clean_de
133
+ path: "deu-test_clean.jsonl"
134
+ - split: test_clean_fr
135
+ path: "fra-test_clean.jsonl"
136
+ - split: test_cleanit
137
+ path: "ita-test_clean.jsonl"
138
+ - split: test_clean_po
139
+ path: "por-test_clean.jsonl"
140
+ - split: test_clean_sp
141
+ path: "spa-test_clean.jsonl"
142
+ - config_name: ++
143
+ data_files:
144
+ - split: train_en
145
+ path: "eng-train.jsonl"
146
+ - split: train_de
147
+ path: "deu-train.jsonl"
148
+ - split: train_fr
149
+ path: "fra-train.jsonl"
150
+ - split: train_it
151
+ path: "ita-train.jsonl"
152
+ - split: train_po
153
+ path: "por-train.jsonl"
154
+ - split: train_sp
155
+ path: "spa-train.jsonl"
156
+ - split: validation_++_en
157
+ path: "eng-dev_no_allneg.jsonl"
158
+ - split: validation_++_de
159
+ path: "deu-dev_no_allneg.jsonl"
160
+ - split: validation_++_fr
161
+ path: "fra-dev_no_allneg.jsonl"
162
+ - split: validation_++_it
163
+ path: "ita-dev_no_allneg.jsonl"
164
+ - split: validation_++_po
165
+ path: "por-dev_no_allneg.jsonl"
166
+ - split: validation_++_sp
167
+ path: "spa-dev_no_allneg.jsonl"
168
+ - split: test_++_en
169
+ path: "eng-test_no_allneg.jsonl"
170
+ - split: test_++_de
171
+ path: "deu-test_no_allneg.jsonl"
172
+ - split: test_++_fr
173
+ path: "fra-test_no_allneg.jsonl"
174
+ - split: test_++_it
175
+ path: "ita-test_no_allneg.jsonl"
176
+ - split: test_++_po
177
+ path: "por-test_no_allneg.jsonl"
178
+ - split: test_++_sp
179
+ path: "spa-test_no_allneg.jsonl"
180
  - config_name: en
181
  data_files:
182
  - split: train
 
347
  ```
348
  from datasets import load_dataset
349
 
350
+ # if you want the whole corpora
351
+ corpora = load_dataset("matteogabburo/mWikiQA")
352
+
353
+ # if you want the clean test and test sets
354
+ corpora = load_dataset("matteogabburo/mWikiQA", "clean")
355
+
356
+ # if you want the "no all negatives" validation and test sets
357
+ corpora = load_dataset("matteogabburo/mWikiQA", "++")
358
+
359
  """
360
+ if you want the default splits of a specific language, replace [LANG] with an identifier in: en, fr, de, it, po, sp
361
  dataset = load_dataset("matteogabburo/mWikiQA", "[LANG]")
362
  """
363
  # example: