shuttie commited on
Commit
b35f8ea
·
1 Parent(s): 64b0d16

fix neg:nil issie while loading dataset

Browse files
README.md CHANGED
@@ -30,14 +30,14 @@ dataset_info:
30
  dtype: float
31
  splits:
32
  - name: train
33
- num_bytes: 1
34
- num_examples: 1
35
  - name: test
36
- num_bytes: 1
37
- num_examples: 1
38
  - name: dev
39
- num_bytes: 1
40
- num_examples: 1
41
  train-eval-index:
42
  - config: default
43
  task: sentence-similarity
 
30
  dtype: float
31
  splits:
32
  - name: train
33
+ num_bytes: 89609915
34
+ num_examples: 502939
35
  - name: test
36
+ num_bytes: 969945
37
+ num_examples: 43
38
  - name: dev
39
+ num_bytes: 1206403
40
+ num_examples: 6980
41
  train-eval-index:
42
  - config: default
43
  task: sentence-similarity
convert.py CHANGED
@@ -67,9 +67,7 @@ def process(
67
  for rel in rels
68
  if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
69
  ]
70
- group = {"query": queries[query], "pos": pos}
71
- if len(neg) > 0:
72
- group["neg"] = neg
73
  result.append(group)
74
  return result
75
 
@@ -78,17 +76,18 @@ def main():
78
  parser = HfArgumentParser((ConversionAgruments))
79
  (args,) = parser.parse_args_into_dataclasses()
80
  print(f"Args: {args}")
81
- corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
82
- queries = load_json(f"{args.path}/queries.jsonl")
83
  qrels = {
84
- "dev": process(load_qrel(f"{args.path}/qrels/dev.tsv"), queries, corpus),
85
- "test": process(load_qrel(f"{args.path}/qrels/test.tsv"), queries, corpus),
86
- "train": process(load_qrel(f"{args.path}/qrels/train.tsv"), queries, corpus),
87
  }
 
 
88
  print("processing done")
89
  for split, data in qrels.items():
 
90
  with open(f"{args.out}/{split}.jsonl", "w") as out:
91
- for item in data:
92
  json.dump(item, out)
93
  out.write("\n")
94
  print("done")
 
67
  for rel in rels
68
  if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
69
  ]
70
+ group = {"query": queries[query], "pos": pos, "neg": neg}
 
 
71
  result.append(group)
72
  return result
73
 
 
76
  parser = HfArgumentParser((ConversionAgruments))
77
  (args,) = parser.parse_args_into_dataclasses()
78
  print(f"Args: {args}")
 
 
79
  qrels = {
80
+ "dev": load_qrel(f"{args.path}/qrels/dev.tsv"),
81
+ "train": load_qrel(f"{args.path}/qrels/train.tsv"),
82
+ "test": load_qrel(f"{args.path}/qrels/test.tsv"),
83
  }
84
+ corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
85
+ queries = load_json(f"{args.path}/queries.jsonl")
86
  print("processing done")
87
  for split, data in qrels.items():
88
+ dataset = process(data, queries, corpus)
89
  with open(f"{args.out}/{split}.jsonl", "w") as out:
90
+ for item in dataset:
91
  json.dump(item, out)
92
  out.write("\n")
93
  print("done")
data/dev/dev.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92ff05e37188b132fdac1734e823e78b8c92a2e00622f9886711b0d7d9274624
3
- size 1109843
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd532e51439da3bd74c79105f9f9f27beafd1db73a1b2155713ec37c8fd376d
3
+ size 1206403
data/test/test.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c4362a2f021d6b45f068d2f13aee9336b84123e718df797930956a540f30167
3
- size 860084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b454c2d3e35adf12716e64699cc6637f703d9e82fc46f036cfa5af59433edc5
3
+ size 969945
data/train/train.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfe4888522ab3a7be8bfa406f3e45a207193cd8b0fd026acd895cb6c1213a86f
3
- size 92239408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56a52df9c3bbd14ee6f92cef2aeb61bcf6d33bc9e85012559b53a5f246157a0
3
+ size 89609915
requirements.txt CHANGED
@@ -1 +1,2 @@
1
  datasets
 
 
1
  datasets
2
+ transformers
use.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ data = load_dataset("nixiesearch/MSMARCO", split="train")