Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
dipteshkanojia commited on
Commit
e7c2267
·
1 Parent(s): c75d61d

first commit

Browse files
Files changed (1) hide show
  1. PLOD-unfiltered.py +115 -0
PLOD-unfiltered.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ from typing import List
5
+ import json
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+
10
+ _CITATION = """
11
+ """
12
+
13
+ _DESCRIPTION = """
14
+ This is the dataset repository for PLOD Dataset accepted to be published at LREC 2022.
15
+ The dataset can help build sequence labelling models for the task Abbreviation Detection.
16
+ """
17
+
18
+ class PLODfilteredConfig(datasets.BuilderConfig):
19
+ """BuilderConfig for Conll2003"""
20
+
21
+ def __init__(self, **kwargs):
22
+ """BuilderConfig forConll2003.
23
+ Args:
24
+ **kwargs: keyword arguments forwarded to super.
25
+ """
26
+ super(PLODfilteredConfig, self).__init__(**kwargs)
27
+
28
+
29
+ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
30
+ """PLOD Filtered dataset."""
31
+
32
+ BUILDER_CONFIGS = [
33
+ PLODfilteredConfig(name="PLODfiltered", version=datasets.Version("0.0.2"), description="PLOD filtered dataset"),
34
+ ]
35
+
36
+ def _info(self):
37
+ return datasets.DatasetInfo(
38
+ description=_DESCRIPTION,
39
+ features=datasets.Features(
40
+ {
41
+ "id": datasets.Value("string"),
42
+ "tokens": datasets.Sequence(datasets.Value("string")),
43
+ "pos_tags": datasets.Sequence(
44
+ datasets.features.ClassLabel(
45
+ names=[
46
+ "ADJ",
47
+ "ADP",
48
+ "ADV",
49
+ "AUX",
50
+ "CONJ",
51
+ "CCONJ",
52
+ "DET",
53
+ "INTJ",
54
+ "NOUN",
55
+ "NUM",
56
+ "PART",
57
+ "PRON",
58
+ "PROPN",
59
+ "PUNCT",
60
+ "SCONJ",
61
+ "SYM",
62
+ "VERB",
63
+ "X",
64
+ "SPACE"
65
+ ]
66
+ )
67
+ ),
68
+ "ner_tags": datasets.Sequence(
69
+ datasets.features.ClassLabel(
70
+ names=[
71
+ "B-O",
72
+ "B-AC",
73
+ "I-AC",
74
+ "B-LF",
75
+ "I-LF"
76
+ ]
77
+ )
78
+ ),
79
+ }
80
+ ),
81
+ supervised_keys=None,
82
+ homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
83
+ citation=_CITATION,
84
+ )
85
+
86
+ _URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/"
87
+ _URLS = {
88
+ "train": _URL + "PLOS-train70-filtered-pos_bio.json",
89
+ "dev": _URL + "PLOS-val15-filtered-pos_bio.json",
90
+ "test": _URL + "PLOS-test15-filtered-pos_bio.json"
91
+ }
92
+
93
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
+ urls_to_download = self._URLS
95
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
96
+
97
+ return [
98
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
99
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
100
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
101
+ ]
102
+
103
+ def _generate_examples(self, filepath):
104
+ """This function returns the examples in the raw (text) form."""
105
+ logger.info("generating examples from = %s", filepath)
106
+ with open(filepath) as f:
107
+ plod = json.load(f)
108
+ for object in plod:
109
+ id_ = int(object['id'])
110
+ yield id_, {
111
+ "id": str(id_),
112
+ "tokens": object['tokens'],
113
+ "pos_tags": object['pos_tags'],
114
+ "ner_tags": object['ner_tags'],
115
+ }