Datasets:

Languages:
Japanese
Size:
n<1K
Tags:
news
License:
Kosuke-Yamada commited on
Commit
d37cf2e
·
1 Parent(s): 52802e7

modify file

Browse files
Files changed (1) hide show
  1. ner-wikinews-dataset.py +24 -112
ner-wikinews-dataset.py CHANGED
@@ -1,100 +1,26 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
- import csv
18
  import json
19
- import os
20
- import random
21
- from typing import Any, Dict, List
22
 
23
  import datasets
24
 
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
33
- }
34
- """
35
-
36
- # TODO: Add description of the dataset here
37
- # You can copy an official description
38
- _DESCRIPTION = """\
39
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
- """
41
-
42
- # TODO: Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = ""
44
-
45
- # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = ""
47
-
48
- # TODO: Add link to the official dataset URLs here
49
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
  _URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
52
 
53
 
54
- class NerWikinewsDatasetConfig(datasets.BuilderConfig):
55
- """BuilderConfig for NerWikinewsDataset."""
56
-
57
- def __init__(self, **kwargs):
58
- """BuilderConfig for NerWikinewsDataset
59
- Args:
60
- **kwargs: keyword arguments forwarded to super.
61
- """
62
- super(NerWikinewsDatasetConfig, self).__init__(**kwargs)
63
-
64
-
65
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
66
  class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
67
- """TODO: Short description of my dataset."""
68
-
69
- VERSION = datasets.Version("1.1.0")
70
-
71
- # This is an example of a dataset with multiple configurations.
72
- # If you don't want/need to define several sub-sets in your dataset,
73
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
74
-
75
- # If you need to make complex sub-parts in the datasets with configurable options
76
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
77
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
78
-
79
- # You will be able to load one or the other configurations in the following list with
80
- # data = datasets.load_dataset('my_dataset', 'first_domain')
81
- # data = datasets.load_dataset('my_dataset', 'second_domain')
82
- # BUILDER_CONFIGS = [
83
- # datasets.BuilderConfig(
84
- # name="all",
85
- # version=VERSION,
86
- # description="This part of my dataset covers a first domain",
87
- # ),
88
- # ]
89
-
90
- # DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
91
 
92
  def _info(self):
93
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
94
  return datasets.DatasetInfo(
95
- # This is the description that will appear on the datasets page.
96
  description=_DESCRIPTION,
97
- # This defines the different columns of the dataset and their types
98
  features=datasets.Features(
99
  {
100
  "curid": datasets.Value("string"),
@@ -108,21 +34,16 @@ class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
108
  "type": datasets.Value(dtype="string"),
109
  }
110
  ],
111
- # These are the features of your dataset like images, labels ...
112
  }
113
- ), # Here we define them above because they are different between the two configurations
114
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
115
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
116
- # supervised_keys=("sentence", "label"),
117
- # Homepage of the dataset for documentation
118
  homepage=_HOMEPAGE,
119
- # License for the dataset if available
120
  license=_LICENSE,
121
- # Citation for the dataset
122
  citation=_CITATION,
123
  )
124
 
125
- def _change_data_format(self, annotated_data: List[Dict[str, Any]]):
 
 
126
  outputs = []
127
  for data in annotated_data:
128
  if data["annotations"] == []:
@@ -133,7 +54,10 @@ class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
133
  entities.append(
134
  {
135
  "name": result["value"]["text"],
136
- "span": [result["value"]["start"], result["value"]["end"]],
 
 
 
137
  "type": result["value"]["labels"][0],
138
  }
139
  )
@@ -147,32 +71,20 @@ class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
147
  return outputs
148
 
149
  def _split_generators(self, dl_manager):
150
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
151
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
152
-
153
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
154
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
155
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
156
- data_dir = dl_manager.download_and_extract(_URL)
157
-
158
- # ダウンロードしたファイルを読み込み、全てのデータを取得
159
-
160
- with open(data_dir, "r") as f:
161
- test_data = json.load(f)
162
 
163
- test_data = self._change_data_format(test_data)
164
 
165
  return [
166
  datasets.SplitGenerator(
167
  name=datasets.Split.TEST,
168
- gen_kwargs={"data": test_data},
169
  ),
170
  ]
171
 
172
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
173
  def _generate_examples(self, data):
174
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
175
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
176
  for key, data in enumerate(data):
177
  yield key, {
178
  "curid": data["curid"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
 
 
 
2
 
3
  import datasets
4
 
5
+ _CITATION = ""
6
+ _DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
7
+ _HOMEPAGE = "https://ja.wikinews.org/wiki/%E3%83%A1%E3%82%A4%E3%83%B3%E3%83%9A%E3%83%BC%E3%82%B8"
8
+ _LICENSE = "This work is licensed under CC BY 2.5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  _URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
13
+ BUILDER_CONFIGS = [
14
+ datasets.BuilderConfig(
15
+ name="new-wikinews-dataset",
16
+ version=datasets.Version("1.1.0"),
17
+ description=_DESCRIPTION,
18
+ ),
19
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def _info(self):
 
22
  return datasets.DatasetInfo(
 
23
  description=_DESCRIPTION,
 
24
  features=datasets.Features(
25
  {
26
  "curid": datasets.Value("string"),
 
34
  "type": datasets.Value(dtype="string"),
35
  }
36
  ],
 
37
  }
38
+ ),
 
 
 
 
39
  homepage=_HOMEPAGE,
 
40
  license=_LICENSE,
 
41
  citation=_CITATION,
42
  )
43
 
44
+ def _convert_data_format(
45
+ self, annotated_data: list[dict[str, any]]
46
+ ) -> list[dict[str, any]]:
47
  outputs = []
48
  for data in annotated_data:
49
  if data["annotations"] == []:
 
54
  entities.append(
55
  {
56
  "name": result["value"]["text"],
57
+ "span": [
58
+ result["value"]["start"],
59
+ result["value"]["end"],
60
+ ],
61
  "type": result["value"]["labels"][0],
62
  }
63
  )
 
71
  return outputs
72
 
73
  def _split_generators(self, dl_manager):
74
+ data_file = dl_manager.download_and_extract(_URL)
75
+ with open(data_file, "r") as f:
76
+ data = json.load(f)
 
 
 
 
 
 
 
 
 
77
 
78
+ data = self._convert_data_format(data)
79
 
80
  return [
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TEST,
83
+ gen_kwargs={"data": data},
84
  ),
85
  ]
86
 
 
87
  def _generate_examples(self, data):
 
 
88
  for key, data in enumerate(data):
89
  yield key, {
90
  "curid": data["curid"],