|
from datasets import load_dataset |
|
from dataclasses import dataclass, field |
|
import logging |
|
from transformers import HfArgumentParser |
|
from tqdm import tqdm |
|
from typing import Dict, List |
|
import json |
|
|
|
logger = logging.getLogger() |
|
logger.setLevel(logging.INFO) |
|
console_handler = logging.StreamHandler() |
|
console_handler.setFormatter( |
|
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s") |
|
) |
|
logger.handlers = [console_handler] |
|
|
|
|
|
@dataclass |
|
class ConversionAgruments: |
|
path: str = field(metadata={"help": "Path to the MAMARCO dataset"}) |
|
out: str = field(metadata={"help": "Output path"}) |
|
|
|
|
|
@dataclass |
|
class QRel: |
|
doc: int |
|
score: int |
|
|
|
|
|
def load_json(path: str, split: str = "train") -> List[str]: |
|
dataset = load_dataset("json", data_files=path, split=split) |
|
cache: List[str] = [] |
|
for row in tqdm(dataset, desc=f"loading {path}"): |
|
index = int(row["_id"]) |
|
if index >= len(cache): |
|
cache.extend([""] * (1 + 2 * max(index, len(cache)))) |
|
cache[index] = row["text"] |
|
return cache |
|
|
|
|
|
def load_qrel(path: str) -> Dict[int, List[QRel]]: |
|
dataset = load_dataset("csv", data_files=path, split="train", delimiter="\t") |
|
print(dataset.features) |
|
cache: Dict[int, List[QRel]] = {} |
|
for row in tqdm(dataset, desc=f"loading {path}"): |
|
qid = int(row["query-id"]) |
|
qrel = QRel(int(row["corpus-id"]), int(row["score"])) |
|
if qid in cache: |
|
cache[qid].append(qrel) |
|
else: |
|
cache[qid] = [qrel] |
|
return cache |
|
|
|
|
|
def process( |
|
qrels: Dict[int, List[QRel]], queries: List[str], corpus: List[str] |
|
) -> List[Dict]: |
|
result = [] |
|
for query, rels in tqdm(qrels.items(), desc="processing split"): |
|
pos = [ |
|
{"doc": corpus[rel.doc], "score": rel.score} |
|
for rel in rels |
|
if rel.doc < len(corpus) and rel.score > 0 and corpus[rel.doc] != "" |
|
] |
|
neg = [ |
|
{"doc": corpus[rel.doc], "score": rel.score} |
|
for rel in rels |
|
if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != "" |
|
] |
|
group = {"query": queries[query], "pos": pos} |
|
if len(neg) > 0: |
|
group["neg"] = neg |
|
result.append(group) |
|
return result |
|
|
|
|
|
def main(): |
|
parser = HfArgumentParser((ConversionAgruments)) |
|
(args,) = parser.parse_args_into_dataclasses() |
|
print(f"Args: {args}") |
|
corpus = load_json(f"{args.path}/corpus.jsonl", split="train") |
|
queries = load_json(f"{args.path}/queries.jsonl") |
|
qrels = { |
|
"dev": process(load_qrel(f"{args.path}/qrels/dev.tsv"), queries, corpus), |
|
"test": process(load_qrel(f"{args.path}/qrels/test.tsv"), queries, corpus), |
|
"train": process(load_qrel(f"{args.path}/qrels/train.tsv"), queries, corpus), |
|
} |
|
print("processing done") |
|
for split, data in qrels.items(): |
|
with open(f"{args.out}/{split}.jsonl", "w") as out: |
|
for item in data: |
|
json.dump(item, out) |
|
out.write("\n") |
|
print("done") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|