|
from datasets import load_dataset, Features, Value, Sequence |
|
from dataclasses import dataclass, field |
|
import logging |
|
from transformers import HfArgumentParser |
|
from tqdm import tqdm |
|
from typing import Dict, List |
|
import json |
|
import numpy as np |
|
|
|
logger = logging.getLogger() |
|
logger.setLevel(logging.INFO) |
|
console_handler = logging.StreamHandler() |
|
console_handler.setFormatter( |
|
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s") |
|
) |
|
logger.handlers = [console_handler] |
|
|
|
|
|
@dataclass |
|
class ConversionAgruments: |
|
out: str = field(metadata={"help": "Output path"}) |
|
|
|
|
|
@dataclass |
|
class QRel: |
|
doc: int |
|
score: int |
|
|
|
|
|
def load_msmarco(path: str, split) -> Dict[int, str]: |
|
dataset = load_dataset(path, split, split=split) |
|
cache: Dict[int, str] = {} |
|
for row in tqdm(dataset, desc=f"loading {path} split={split}"): |
|
index = int(row["_id"]) |
|
cache[index] = row["text"] |
|
return cache |
|
|
|
|
|
def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]: |
|
dataset = load_dataset(path, split=split) |
|
print(dataset.features) |
|
cache: Dict[int, List[QRel]] = {} |
|
for row in tqdm(dataset, desc=f"loading {path} split={split}"): |
|
qid = int(row["query-id"]) |
|
qrel = QRel(int(row["corpus-id"]), int(row["score"])) |
|
if qid in cache: |
|
cache[qid].append(qrel) |
|
else: |
|
cache[qid] = [qrel] |
|
return cache |
|
|
|
|
|
def process_raw( |
|
qrels: Dict[int, List[QRel]], queries: Dict[int, str], corpus: Dict[int, str] |
|
) -> List[Dict]: |
|
result = [] |
|
for query, rels in tqdm(qrels.items(), desc="processing split"): |
|
pos = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score > 0] |
|
neg = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score == 0] |
|
group = {"query": queries[query], "positive": pos, "negative": neg} |
|
result.append(group) |
|
return result |
|
|
|
|
|
def main(): |
|
parser = HfArgumentParser((ConversionAgruments)) |
|
(args,) = parser.parse_args_into_dataclasses() |
|
print(f"Args: {args}") |
|
qrels = { |
|
"train": load_qrel("BeIR/msmarco-qrels", split="train"), |
|
"test": load_qrel("BeIR/msmarco-qrels", split="test"), |
|
"dev": load_qrel("BeIR/msmarco-qrels", split="validation"), |
|
} |
|
queries = load_msmarco("BeIR/msmarco", split="queries") |
|
corpus = load_msmarco("BeIR/msmarco", split="corpus") |
|
print("processing done") |
|
for split, data in qrels.items(): |
|
dataset = process_raw(data, queries, corpus) |
|
with open(f"{args.out}/{split}.jsonl", "w") as out: |
|
for item in dataset: |
|
json.dump(item, out) |
|
out.write("\n") |
|
print("done") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|