Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
Spanish
Size:
10K - 100K
License:
File size: 5,968 Bytes
25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 70ae3f4 f0aa28e db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b db0a277 25dc84b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
"""
A dataset loading script for the PharmaCoNER corpus.
The PharmaCoNER datset is a manually annotated collection of clinical case
studies derived from the Spanish Clinical Case Corpus (SPACCC). It was designed
for the Pharmacological Substances, Compounds and Proteins NER track, the first
shared task on detecting drug and chemical entities in Spanish medical documents.
"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{,
title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track",
author = "Gonzalez-Agirre, Aitor and
Marimon, Montserrat and
Intxaurrondo, Ander and
Rabal, Obdulia and
Villegas, Marta and
Krallinger, Martin",
booktitle = "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-5701",
doi = "10.18653/v1/D19-5701",
pages = "1--10",
abstract = "",
}
"""
_DESCRIPTION = """\
PharmaCoNER: Pharmacological Substances, Compounds and Proteins Named Entity Recognition track
This dataset is designed for the PharmaCoNER task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).
It is a manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an
open access electronic library that gathers Spanish medical publications from SciELO (Scientific Electronic Library Online).
The annotation of the entire set of entity mentions was carried out by medicinal chemistry experts
and it includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR.
The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets.
The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each.
In terms of training examples, this translates to a total of 8074, 3764 and 3931 annotated sentences in each set.
The original dataset was distributed in Brat format (https://brat.nlplab.org/standoff.html).
For further information, please visit https://temu.bsc.es/pharmaconer/ or send an email to [email protected]
"""
_HOMEPAGE = "https://temu.bsc.es/pharmaconer/index.php/datasets/"
_LICENSE = "Creative Commons Attribution 4.0 International"
_VERSION = "1.1.0"
_URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
_TRAINING_FILE = "train-set_1.1.conll"
_DEV_FILE = "dev-set_1.1.conll"
_TEST_FILE = "test-set_1.1.conll"
class PharmaCoNERConfig(datasets.BuilderConfig):
"""BuilderConfig for PharmaCoNER dataset."""
def __init__(self, **kwargs):
super(PharmaCoNERConfig, self).__init__(**kwargs)
class PharmaCoNER(datasets.GeneratorBasedBuilder):
"""PharmaCoNER dataset."""
BUILDER_CONFIGS = [
PharmaCoNERConfig(
name="PharmaCoNER",
version=datasets.Version(_VERSION),
description="PharmaCoNER dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-NO_NORMALIZABLES",
"B-NORMALIZABLES",
"B-PROTEINAS",
"B-UNCLEAR",
"I-NO_NORMALIZABLES",
"I-NORMALIZABLES",
"I-PROTEINAS",
"I-UNCLEAR",
]
)
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
ner_tags = []
for line in f:
if line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
splits = line.split("\t")
tokens.append(splits[0])
ner_tags.append(splits[-1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
} |