Datasets:
mteb
/

ArXiv:
orionweller commited on
Commit
5ae366d
·
1 Parent(s): 047c1d4
README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - fas
4
+ - rus
5
+ - zho
6
+
7
+
8
+ multilinguality:
9
+ - multilingual
10
+
11
+ task_categories:
12
+ - text-retrieval
13
+
14
+ ---
15
+
16
+ From the NeuCLIR TREC Track 2023: https://arxiv.org/abs/2304.12367
17
+
18
+ Generated from https://huggingface.co/datasets/neuclir/neuclir1
19
+
20
+ ```
21
+ @misc{lawrie2024overview,
22
+ title={Overview of the TREC 2023 NeuCLIR Track},
23
+ author={Dawn Lawrie and Sean MacAvaney and James Mayfield and Paul McNamee and Douglas W. Oard and Luca Soldaini and Eugene Yang},
24
+ year={2024},
25
+ eprint={2404.08071},
26
+ archivePrefix={arXiv},
27
+ primaryClass={cs.IR}
28
+ }
29
+ ```
30
+
neuclir-2023-fast.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+
5
+ _CITATION = '''
6
+ @article{lawrie2024overview,
7
+ title={Overview of the TREC 2023 NeuCLIR track},
8
+ author={Lawrie, Dawn and MacAvaney, Sean and Mayfield, James and McNamee, Paul and Oard, Douglas W and Soldaini, Luca and Yang, Eugene},
9
+ year={2024}
10
+ }
11
+ '''
12
+
13
+ _LANGUAGES = [
14
+ 'rus',
15
+ 'fas',
16
+ 'zho',
17
+ ]
18
+
19
+ _DESCRIPTION = 'dataset load script for NeuCLIR 2023 Fast'
20
+
21
+ _DATASET_URLS = {
22
+ lang: {
23
+ 'test': f'https://huggingface.co/datasets/MTEB/neuclir-2023-fast/resolve/main/neuclir-{lang}/test-00000-of-00001.parquet',
24
+ } for lang in _LANGUAGES
25
+ }
26
+
27
+ _DATASET_CORPUS_URLS = {
28
+ f'corpus-{lang}': {
29
+ 'corpus': f'https://huggingface.co/datasets/MTEB/neuclir-2023-fast/resolve/main/neuclir-{lang}/corpus-00000-of-00001.parquet'
30
+ } for lang in _LANGUAGES
31
+ }
32
+
33
+ _DATASET_QUERIES_URLS = {
34
+ f'queries-{lang}': {
35
+ 'queries': f'https://huggingface.co/datasets/MTEB/neuclir-2023-fast/resolve/main/neuclir-{lang}/queries-00000-of-00001.parquet'
36
+ } for lang in _LANGUAGES
37
+ }
38
+
39
+
40
+ class MLDR(datasets.GeneratorBasedBuilder):
41
+ BUILDER_CONFIGS = [datasets.BuilderConfig(
42
+ version=datasets.Version('1.0.0'),
43
+ name=lang, description=f'NeuCLIR dataset in language {lang}.'
44
+ ) for lang in _LANGUAGES
45
+ ] + [
46
+ datasets.BuilderConfig(
47
+ version=datasets.Version('1.0.0'),
48
+ name=f'corpus-{lang}', description=f'corpus of NeuCLIR dataset in language {lang}.'
49
+ ) for lang in _LANGUAGES
50
+ ] + [
51
+ datasets.BuilderConfig(
52
+ version=datasets.Version('1.0.0'),
53
+ name=f'queries-{lang}', description=f'queries of NeuCLIR dataset in language {lang}.'
54
+ ) for lang in _LANGUAGES
55
+ ]
56
+
57
+ def _info(self):
58
+ name = self.config.name
59
+ if name.startswith('corpus-'):
60
+ features = datasets.Features({
61
+ '_id': datasets.Value('string'),
62
+ 'text': datasets.Value('string'),
63
+ 'title': datasets.Value('string'),
64
+ })
65
+ elif name.startswith("queries-"):
66
+ features = datasets.Features({
67
+ '_id': datasets.Value('string'),
68
+ 'text': datasets.Value('string'),
69
+ })
70
+ else:
71
+ features = datasets.Features({
72
+ 'query-id': datasets.Value('string'),
73
+ 'corpus-id': datasets.Value('string'),
74
+ 'score': datasets.Value('int32'),
75
+ })
76
+
77
+ return datasets.DatasetInfo(
78
+ # This is the description that will appear on the datasets page.
79
+ description=_DESCRIPTION,
80
+ # This defines the different columns of the dataset and their types
81
+ features=features, # Here we define them above because they are different between the two configurations
82
+ supervised_keys=None,
83
+ # Homepage of the dataset for documentation
84
+ homepage='https://arxiv.org/abs/2304.12367',
85
+ # License for the dataset if available
86
+ license=None,
87
+ # Citation for the dataset
88
+ citation=_CITATION,
89
+ )
90
+
91
+ def _split_generators(self, dl_manager):
92
+ name = self.config.name
93
+ if name.startswith('corpus-'):
94
+ downloaded_files = dl_manager.download_and_extract(_DATASET_CORPUS_URLS[name])
95
+ splits = [
96
+ datasets.SplitGenerator(
97
+ name='corpus',
98
+ gen_kwargs={
99
+ 'filepath': downloaded_files['corpus'],
100
+ },
101
+ ),
102
+ ]
103
+ elif name.startswith("queries-"):
104
+ downloaded_files = dl_manager.download_and_extract(_DATASET_QUERIES_URLS[name])
105
+ splits = [
106
+ datasets.SplitGenerator(
107
+ name='queries',
108
+ gen_kwargs={
109
+ 'filepath': downloaded_files['queries'],
110
+ },
111
+ ),
112
+ ]
113
+ else:
114
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[name])
115
+ splits = [
116
+ datasets.SplitGenerator(
117
+ name='test',
118
+ gen_kwargs={
119
+ 'filepath': downloaded_files['test'],
120
+ },
121
+ ),
122
+ ]
123
+ return splits
124
+
125
+ def _generate_examples(self, filepath):
126
+ import pandas as pd
127
+
128
+ name = self.config.name
129
+ df = pd.read_parquet(filepath)
130
+
131
+ if name.startswith('corpus-'):
132
+ for index, row in df.iterrows():
133
+ yield row['_id'], {
134
+ '_id': row['_id'],
135
+ 'text': row['text'],
136
+ 'title': row['title']
137
+ }
138
+ elif name.startswith("queries-"):
139
+ for index, row in df.iterrows():
140
+ yield row['_id'], {
141
+ '_id': row['_id'],
142
+ 'text': row['text']
143
+ }
144
+ else:
145
+ for index, row in df.iterrows():
146
+ yield f"{row['query-id']}-----{row['corpus-id']}", {
147
+ 'query-id': row['query-id'],
148
+ 'corpus-id': row['corpus-id'],
149
+ 'score': row['score']
150
+ }
neuclir-fas/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7846f22eb1392e4cbc4e1978a7fd56b8b0d3b7b21a0f65e201c0ca309a089530
3
+ size 37815775
neuclir-fas/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d29a2b708ccc2d2a1daa54aa2e5c8d8001ed21f6aeada342b0d931a347919bc8
3
+ size 7410
neuclir-fas/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e561bc6fd740d19867d6b5b05618d4903cd8d9b2a05085f5dfe38dc0be58e6c
3
+ size 278230
neuclir-rus/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b79990dbfbe5fddbbc0caf21977279c7820022334d7674721e8198657adf8845
3
+ size 38545671
neuclir-rus/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2fb98b9add49ca727fc30395cd31c638ee306d4432f7dd27c77d5dd450b0cf7
3
+ size 8492
neuclir-rus/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:055a7d190ffb7810247ec5467773b81f9ea5fe80f04e24b31ee3e339d42ba8c6
3
+ size 305373
neuclir-zho/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f2386596ea886ad8ec9f10686b5c0e14a04644257fe51bf71a4362a1c9eb71
3
+ size 37375163
neuclir-zho/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8054215a902481f0242cf93d0dcdc7ffe17d79b9f4eb805dc14a9a4c95a74430
3
+ size 5936
neuclir-zho/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:701404741c9a3c23e4b44f43f8bbef2b322b404a2ecaeba3866c5ed2936ce288
3
+ size 261971