Kosuke-Yamada
commited on
Commit
·
1a68a58
1
Parent(s):
977bce8
modify how to import
Browse files- ner-wikinews-dataset.py +15 -23
ner-wikinews-dataset.py
CHANGED
@@ -1,9 +1,7 @@
|
|
1 |
import json
|
2 |
from typing import Generator
|
3 |
|
4 |
-
|
5 |
-
GeneratorBasedBuilder, Sequence, Split, SplitGenerator,
|
6 |
-
Value, Version)
|
7 |
|
8 |
_CITATION = ""
|
9 |
_DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
|
@@ -12,27 +10,21 @@ _LICENSE = "This work is licensed under CC BY 2.5"
|
|
12 |
_URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
|
13 |
|
14 |
|
15 |
-
class NerWikinewsDataset(GeneratorBasedBuilder):
|
16 |
-
BUILDER_CONFIGS = [
|
17 |
-
BuilderConfig(
|
18 |
-
name="new-wikinews-dataset",
|
19 |
-
version=Version("1.0.0"),
|
20 |
-
description=_DESCRIPTION,
|
21 |
-
),
|
22 |
-
]
|
23 |
-
|
24 |
def _info(self):
|
25 |
-
return DatasetInfo(
|
26 |
description=_DESCRIPTION,
|
27 |
-
features=Features(
|
28 |
{
|
29 |
-
"curid": Value("string"),
|
30 |
-
"text": Value("string"),
|
31 |
"entities": [
|
32 |
{
|
33 |
-
"name": Value("string"),
|
34 |
-
"span": Sequence(
|
35 |
-
|
|
|
|
|
36 |
}
|
37 |
],
|
38 |
}
|
@@ -74,15 +66,15 @@ class NerWikinewsDataset(GeneratorBasedBuilder):
|
|
74 |
return outputs
|
75 |
|
76 |
def _split_generators(
|
77 |
-
self, dl_manager: DownloadManager
|
78 |
-
) -> list[SplitGenerator]:
|
79 |
data_file = dl_manager.download_and_extract(_URL)
|
80 |
with open(data_file, "r", encoding="utf-8") as f:
|
81 |
data = json.load(f)
|
82 |
data = self._convert_data_format(data)
|
83 |
return [
|
84 |
-
SplitGenerator(
|
85 |
-
name=Split.TEST,
|
86 |
gen_kwargs={"data": data},
|
87 |
),
|
88 |
]
|
|
|
1 |
import json
|
2 |
from typing import Generator
|
3 |
|
4 |
+
import datasets
|
|
|
|
|
5 |
|
6 |
_CITATION = ""
|
7 |
_DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
|
|
|
10 |
_URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
|
11 |
|
12 |
|
13 |
+
class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def _info(self):
|
15 |
+
return datasets.DatasetInfo(
|
16 |
description=_DESCRIPTION,
|
17 |
+
features=datasets.Features(
|
18 |
{
|
19 |
+
"curid": datasets.Value("string"),
|
20 |
+
"text": datasets.Value("string"),
|
21 |
"entities": [
|
22 |
{
|
23 |
+
"name": datasets.Value("string"),
|
24 |
+
"span": datasets.Sequence(
|
25 |
+
datasets.Value("int64"), length=2
|
26 |
+
),
|
27 |
+
"type": datasets.Value("string"),
|
28 |
}
|
29 |
],
|
30 |
}
|
|
|
66 |
return outputs
|
67 |
|
68 |
def _split_generators(
|
69 |
+
self, dl_manager: datasets.DownloadManager
|
70 |
+
) -> list[datasets.SplitGenerator]:
|
71 |
data_file = dl_manager.download_and_extract(_URL)
|
72 |
with open(data_file, "r", encoding="utf-8") as f:
|
73 |
data = json.load(f)
|
74 |
data = self._convert_data_format(data)
|
75 |
return [
|
76 |
+
datasets.SplitGenerator(
|
77 |
+
name=datasets.Split.TEST,
|
78 |
gen_kwargs={"data": data},
|
79 |
),
|
80 |
]
|