Datasets:

Languages:
Indonesian
ArXiv:
holylovenia commited on
Commit
880151b
·
1 Parent(s): 60a0194

Upload indolem_nerui.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_nerui.py +218 -0
indolem_nerui.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from nusacrowd.utils import schemas
6
+ from nusacrowd.utils.common_parser import load_conll_data
7
+
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @INPROCEEDINGS{8275098,
13
+ author={Gultom, Yohanes and Wibowo, Wahyu Catur},
14
+ booktitle={2017 International Workshop on Big Data and Information Security (IWBIS)},
15
+ title={Automatic open domain information extraction from Indonesian text},
16
+ year={2017},
17
+ volume={},
18
+ number={},
19
+ pages={23-30},
20
+ doi={10.1109/IWBIS.2017.8275098}}
21
+
22
+ @article{DBLP:journals/corr/abs-2011-00677,
23
+ author = {Fajri Koto and
24
+ Afshin Rahimi and
25
+ Jey Han Lau and
26
+ Timothy Baldwin},
27
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
28
+ Model for Indonesian {NLP}},
29
+ journal = {CoRR},
30
+ volume = {abs/2011.00677},
31
+ year = {2020},
32
+ url = {https://arxiv.org/abs/2011.00677},
33
+ eprinttype = {arXiv},
34
+ eprint = {2011.00677},
35
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
36
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
37
+ bibsource = {dblp computer science bibliography, https://dblp.org}
38
+ }
39
+ """
40
+
41
+ _LOCAL = False
42
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
43
+ _DATASETNAME = "indolem_nerui"
44
+
45
+ _DESCRIPTION = """\
46
+ NER UI is a Named Entity Recognition dataset that contains 2,125 sentences obtained via an annotation assignment in an NLP course at the University of Indonesia in 2016.
47
+ The corpus has three named entity classes: location, organisation, and person with training/dev/test distribution: 1,530/170/42 and based on 5-fold cross validation.
48
+ """
49
+
50
+ _HOMEPAGE = "https://indolem.github.io/"
51
+
52
+ _LICENSE = "Creative Commons Attribution 4.0"
53
+
54
+ _URLS = {
55
+ _DATASETNAME: [
56
+ {
57
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/train.01.tsv",
58
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/dev.01.tsv",
59
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/test.01.tsv",
60
+ },
61
+ {
62
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/train.02.tsv",
63
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/dev.02.tsv",
64
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/test.02.tsv",
65
+ },
66
+ {
67
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/train.03.tsv",
68
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/dev.03.tsv",
69
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/test.03.tsv",
70
+ },
71
+ {
72
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/train.04.tsv",
73
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/dev.04.tsv",
74
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/test.04.tsv",
75
+ },
76
+ {
77
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/train.05.tsv",
78
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/dev.05.tsv",
79
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerui/test.05.tsv",
80
+ },
81
+ ]
82
+ }
83
+
84
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
85
+
86
+ _SOURCE_VERSION = "1.0.0"
87
+ _NUSANTARA_VERSION = "1.0.0"
88
+
89
+
90
+ class IndolemNERUIDataset(datasets.GeneratorBasedBuilder):
91
+ """NER UI contains 2,125 sentences obtained via an annotation assignment in an NLP course at the University of Indonesia. The corpus has three named entity classes: location, organisation, and person; and based on 5-fold cross validation."""
92
+
93
+ label_classes = [
94
+ "O",
95
+ "B-LOCATION",
96
+ "B-ORGANIZATION",
97
+ "B-PERSON",
98
+ "I-LOCATION",
99
+ "I-ORGANIZATION",
100
+ "I-PERSON",
101
+ ]
102
+
103
+ BUILDER_CONFIGS = [
104
+ NusantaraConfig(
105
+ name=f"indolem_nerui_source",
106
+ version=datasets.Version(_SOURCE_VERSION),
107
+ description="Indolem NER UI source schema",
108
+ schema="source",
109
+ subset_id=f"indolem_nerui",
110
+ ),
111
+ NusantaraConfig(
112
+ name=f"indolem_nerui_nusantara_seq_label",
113
+ version=datasets.Version(_NUSANTARA_VERSION),
114
+ description="Indolem NER UI Nusantara schema",
115
+ schema="nusantara_seq_label",
116
+ subset_id=f"indolem_nerui",
117
+ )
118
+ ] + [
119
+ NusantaraConfig(
120
+ name=f"indolem_nerui_fold{i}_source",
121
+ version=datasets.Version(_SOURCE_VERSION),
122
+ description="Indolem NER UI source schema",
123
+ schema="source",
124
+ subset_id=f"indolem_nerui_fold{i}",
125
+ )
126
+ for i in range(5)
127
+ ] + [
128
+ NusantaraConfig(
129
+ name=f"indolem_nerui_fold{i}_nusantara_seq_label",
130
+ version=datasets.Version(_NUSANTARA_VERSION),
131
+ description="Indolem NER UI Nusantara schema",
132
+ schema="nusantara_seq_label",
133
+ subset_id=f"indolem_nerui_fold{i}",
134
+ )
135
+ for i in range(5)
136
+ ]
137
+
138
+ DEFAULT_CONFIG_NAME = "indolem_nerui_source"
139
+
140
+ def _info(self) -> datasets.DatasetInfo:
141
+ if self.config.schema == "source":
142
+ features = datasets.Features(
143
+ {
144
+ "index": datasets.Value("string"),
145
+ "tokens": [datasets.Value("string")],
146
+ "tags": [datasets.Value("string")],
147
+ }
148
+ )
149
+ elif self.config.schema == "nusantara_seq_label":
150
+ features = schemas.seq_label_features(self.label_classes)
151
+
152
+ return datasets.DatasetInfo(
153
+ description=_DESCRIPTION,
154
+ features=features,
155
+ homepage=_HOMEPAGE,
156
+ license=_LICENSE,
157
+ citation=_CITATION,
158
+ )
159
+
160
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
161
+ idx = self._get_fold_index()
162
+ urls = _URLS[_DATASETNAME][idx]
163
+ data_dir = dl_manager.download_and_extract(urls)
164
+
165
+ return [
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ gen_kwargs={
169
+ "filepath": data_dir["train"],
170
+ "split": "train",
171
+ },
172
+ ),
173
+ datasets.SplitGenerator(
174
+ name=datasets.Split.TEST,
175
+ gen_kwargs={
176
+ "filepath": data_dir["test"],
177
+ "split": "test",
178
+ },
179
+ ),
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.VALIDATION,
182
+ gen_kwargs={
183
+ "filepath": data_dir["validation"],
184
+ "split": "dev",
185
+ },
186
+ ),
187
+ ]
188
+
189
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
190
+ conll_dataset = load_conll_data(filepath)
191
+
192
+ if self.config.schema == "source":
193
+ for i, row in enumerate(conll_dataset):
194
+ ex = {
195
+ "index": str(i),
196
+ "tokens": row["sentence"],
197
+ "tags": row["label"],
198
+ }
199
+ yield i, ex
200
+
201
+ elif self.config.schema == "nusantara_seq_label":
202
+ for i, row in enumerate(conll_dataset):
203
+ ex = {
204
+ "id": str(i),
205
+ "tokens": row["sentence"],
206
+ "labels": row["label"],
207
+ }
208
+ yield i, ex
209
+
210
+ def _get_fold_index(self):
211
+ try:
212
+ subset_id = self.config.subset_id
213
+ idx_fold = subset_id.index("_fold")
214
+ file_id = subset_id[(idx_fold + 5):]
215
+ return int(file_id)
216
+ except:
217
+ # get default: fold0 (index 0)
218
+ return 0