File size: 3,070 Bytes
cf05905
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from datasets import load_dataset
from dataclasses import dataclass, field
import logging
from transformers import HfArgumentParser
from tqdm import tqdm
from typing import Dict, List
import json

logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(
    logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
)
logger.handlers = [console_handler]


@dataclass
class ConversionAgruments:
    path: str = field(metadata={"help": "Path to the MAMARCO dataset"})
    out: str = field(metadata={"help": "Output path"})


@dataclass
class QRel:
    doc: int
    score: int


def load_json(path: str, split: str = "train") -> List[str]:
    dataset = load_dataset("json", data_files=path, split=split)
    cache: List[str] = []
    for row in tqdm(dataset, desc=f"loading {path}"):
        index = int(row["_id"])
        if index >= len(cache):
            cache.extend([""] * (1 + 2 * max(index, len(cache))))
        cache[index] = row["text"]
    return cache


def load_qrel(path: str) -> Dict[int, List[QRel]]:
    dataset = load_dataset("csv", data_files=path, split="train", delimiter="\t")
    print(dataset.features)
    cache: Dict[int, List[QRel]] = {}
    for row in tqdm(dataset, desc=f"loading {path}"):
        qid = int(row["query-id"])
        qrel = QRel(int(row["corpus-id"]), int(row["score"]))
        if qid in cache:
            cache[qid].append(qrel)
        else:
            cache[qid] = [qrel]
    return cache


def process(
    qrels: Dict[int, List[QRel]], queries: List[str], corpus: List[str]
) -> List[Dict]:
    result = []
    for query, rels in tqdm(qrels.items(), desc="processing split"):
        pos = [
            {"doc": corpus[rel.doc], "score": rel.score}
            for rel in rels
            if rel.doc < len(corpus) and rel.score > 0 and corpus[rel.doc] != ""
        ]
        neg = [
            {"doc": corpus[rel.doc], "score": rel.score}
            for rel in rels
            if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
        ]
        group = {"query": queries[query], "pos": pos}
        if len(neg) > 0:
            group["neg"] = neg
        result.append(group)
    return result


def main():
    parser = HfArgumentParser((ConversionAgruments))
    (args,) = parser.parse_args_into_dataclasses()
    print(f"Args: {args}")
    corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
    queries = load_json(f"{args.path}/queries.jsonl")
    qrels = {
        "dev": process(load_qrel(f"{args.path}/qrels/dev.tsv"), queries, corpus),
        "test": process(load_qrel(f"{args.path}/qrels/test.tsv"), queries, corpus),
        "train": process(load_qrel(f"{args.path}/qrels/train.tsv"), queries, corpus),
    }
    print("processing done")
    for split, data in qrels.items():
        with open(f"{args.out}/{split}.jsonl", "w") as out:
            for item in data:
                json.dump(item, out)
                out.write("\n")
    print("done")


if __name__ == "__main__":
    main()