Delete loading script auxiliary file
Browse files
create_miracl_japanese_small.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import json
|
3 |
-
from tqdm import tqdm
|
4 |
-
from datasets import load_dataset
|
5 |
-
|
6 |
-
DEV_QREL_FILEPATH = "./qrels.miracl-v1.0-ja-dev.tsv"
|
7 |
-
OUTPUT_FILEPATH = "./miracl-japanese-small-docs.jsonl"
|
8 |
-
|
9 |
-
def extract_doc_ids(filepath):
|
10 |
-
dev_qrel = pd.read_csv(filepath, delimiter='\t',
|
11 |
-
names=['query_id', 'ph', 'doc_pas_id', 'rel'])
|
12 |
-
doc_ids = set([int(dp_id.split('#')[0]) for dp_id in dev_qrel.doc_pas_id])
|
13 |
-
return doc_ids
|
14 |
-
|
15 |
-
|
16 |
-
if __name__ == '__main__':
|
17 |
-
dev_doc_ids = extract_doc_ids(DEV_QREL_FILEPATH)
|
18 |
-
doc_ids = dev_doc_ids
|
19 |
-
print("# of docids in dev", len(dev_doc_ids))
|
20 |
-
|
21 |
-
new_dataset = []
|
22 |
-
seen_doc_ids = set()
|
23 |
-
dataset = load_dataset("miracl/miracl-corpus", "ja")
|
24 |
-
for data in tqdm(dataset['train']):
|
25 |
-
docid = int(data["docid"].split("#")[0])
|
26 |
-
if docid in doc_ids:
|
27 |
-
new_dataset.append(data)
|
28 |
-
with open(OUTPUT_FILEPATH, 'w', encoding='utf-8') as f:
|
29 |
-
for data in new_dataset:
|
30 |
-
f.write(json.dumps(data) + '\n')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|