Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
Estonian
Size:
100K - 1M
ArXiv:
License:
parquet-converter
commited on
Commit
·
f534ffc
1
Parent(s):
e98aead
Update parquet files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +0 -54
- NoisyNER_labelset1/noisyner-test.parquet +3 -0
- NoisyNER_labelset1/noisyner-train.parquet +3 -0
- NoisyNER_labelset1/noisyner-validation.parquet +3 -0
- NoisyNER_labelset2/noisyner-test.parquet +3 -0
- NoisyNER_labelset2/noisyner-train.parquet +3 -0
- NoisyNER_labelset2/noisyner-validation.parquet +3 -0
- NoisyNER_labelset3/noisyner-test.parquet +3 -0
- NoisyNER_labelset3/noisyner-train.parquet +3 -0
- NoisyNER_labelset3/noisyner-validation.parquet +3 -0
- NoisyNER_labelset4/noisyner-test.parquet +3 -0
- NoisyNER_labelset4/noisyner-train.parquet +3 -0
- NoisyNER_labelset4/noisyner-validation.parquet +3 -0
- NoisyNER_labelset5/noisyner-test.parquet +3 -0
- NoisyNER_labelset5/noisyner-train.parquet +3 -0
- NoisyNER_labelset5/noisyner-validation.parquet +3 -0
- NoisyNER_labelset6/noisyner-test.parquet +3 -0
- NoisyNER_labelset6/noisyner-train.parquet +3 -0
- NoisyNER_labelset6/noisyner-validation.parquet +3 -0
- NoisyNER_labelset7/noisyner-test.parquet +3 -0
- NoisyNER_labelset7/noisyner-train.parquet +3 -0
- NoisyNER_labelset7/noisyner-validation.parquet +3 -0
- README.md +0 -220
- data/NoisyNER_labelset1_all.tsv +0 -0
- data/NoisyNER_labelset1_dev.tsv +0 -0
- data/NoisyNER_labelset1_test.tsv +0 -0
- data/NoisyNER_labelset1_train.tsv +0 -0
- data/NoisyNER_labelset2_all.tsv +0 -0
- data/NoisyNER_labelset2_dev.tsv +0 -0
- data/NoisyNER_labelset2_test.tsv +0 -0
- data/NoisyNER_labelset2_train.tsv +0 -0
- data/NoisyNER_labelset3_all.tsv +0 -0
- data/NoisyNER_labelset3_dev.tsv +0 -0
- data/NoisyNER_labelset3_test.tsv +0 -0
- data/NoisyNER_labelset3_train.tsv +0 -0
- data/NoisyNER_labelset4_all.tsv +0 -0
- data/NoisyNER_labelset4_dev.tsv +0 -0
- data/NoisyNER_labelset4_test.tsv +0 -0
- data/NoisyNER_labelset4_train.tsv +0 -0
- data/NoisyNER_labelset5_all.tsv +0 -0
- data/NoisyNER_labelset5_dev.tsv +0 -0
- data/NoisyNER_labelset5_test.tsv +0 -0
- data/NoisyNER_labelset5_train.tsv +0 -0
- data/NoisyNER_labelset6_all.tsv +0 -0
- data/NoisyNER_labelset6_dev.tsv +0 -0
- data/NoisyNER_labelset6_test.tsv +0 -0
- data/NoisyNER_labelset6_train.tsv +0 -0
- data/NoisyNER_labelset7_all.tsv +0 -0
- data/NoisyNER_labelset7_dev.tsv +0 -0
- data/NoisyNER_labelset7_test.tsv +0 -0
.gitattributes
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
# Audio files - uncompressed
|
37 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
40 |
-
# Audio files - compressed
|
41 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
42 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
46 |
-
# Image files - uncompressed
|
47 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
48 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
51 |
-
# Image files - compressed
|
52 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
NoisyNER_labelset1/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a1f54291096ac01372cbeb56ad41bc9dc92a5a5351e4d733d40204056d5a541
|
3 |
+
size 276470
|
NoisyNER_labelset1/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a301473fafcc9fc28703e02b0d69538561e2348992413d730ecd520c5e19029c
|
3 |
+
size 2145663
|
NoisyNER_labelset1/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f80eed5d0fcb4062ac0393af9b8aeff72ce8e5c51a139964cf64f2e858e08439
|
3 |
+
size 275023
|
NoisyNER_labelset2/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a93bc464450a619e112e5f574069a1e458b1e2d9e69f23b0b5762baf8e045fa
|
3 |
+
size 276869
|
NoisyNER_labelset2/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc6b7c60cfb868207bebf96dcaf934f5f75a38951a8306de8a501679b7990434
|
3 |
+
size 2148306
|
NoisyNER_labelset2/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96e29733142eee4c9510df6d54c1700f1087802b6ce37c844c4d758814bd2d9b
|
3 |
+
size 275359
|
NoisyNER_labelset3/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d062052d5acd9cfb8f9e130d0701cf660d289bffc93969c278f337984394c642
|
3 |
+
size 278152
|
NoisyNER_labelset3/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c98173020c923011c2898a345bf1f9caf23734fad50fc0c29701ac993fff57f
|
3 |
+
size 2158921
|
NoisyNER_labelset3/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:541f650ffa2f507dc81728d5f2b82ce0b90fd24f3822bb9aed1f46909610ad52
|
3 |
+
size 276722
|
NoisyNER_labelset4/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2af076cfd2e64fbd084372a40d64b3f253a08530e30b9dacec94850f41d2d775
|
3 |
+
size 276852
|
NoisyNER_labelset4/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e9a35ab86e61f6647b7c422a3e7e7c79116f483a883eeff98811595c28c24ea
|
3 |
+
size 2148182
|
NoisyNER_labelset4/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fac8e62363c7163c6679d0fa42df74f58e3813e6358e39c2246d97880246ddb
|
3 |
+
size 275350
|
NoisyNER_labelset5/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1ca0a31078c795802cd53faeea41b6f95e9f8b7d582f3c8138340de071ff023
|
3 |
+
size 278222
|
NoisyNER_labelset5/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fd8ac129452b523620a70685b2d44f847a14d8c1fde87d3c94d67963d2cba56
|
3 |
+
size 2159448
|
NoisyNER_labelset5/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e4ca6f905733c07ed172c4761c2053b3798a244a876f4a0187d824dc4ca9f41
|
3 |
+
size 276805
|
NoisyNER_labelset6/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e1c5f90b90d3a61ec42cb64882b793aeaa97cb13b58bef0964ecbb118483711
|
3 |
+
size 278019
|
NoisyNER_labelset6/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50f13cb85202bf57790a3e8c70c139300ceb8161173665a968eb45a8567ed8ef
|
3 |
+
size 2157544
|
NoisyNER_labelset6/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae55495cb71e666b888814e10513e0ab4e5abb5ad0ae24282478a66c4c909f61
|
3 |
+
size 276685
|
NoisyNER_labelset7/noisyner-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76e10477fa39e78b3addee9c319a9f89ef6fcf4e5b07be04085bf3994c2daee7
|
3 |
+
size 278139
|
NoisyNER_labelset7/noisyner-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d7d6017fcaffe300d2f18e49740b2e321a185242c4ec5bb3f353ba025b3b7eb
|
3 |
+
size 2158706
|
NoisyNER_labelset7/noisyner-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:048b48402284ad66f99d359399b19d9b448ec7aa92edf15990295c77c0f2ae71
|
3 |
+
size 276857
|
README.md
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- expert-generated
|
4 |
-
language:
|
5 |
-
- et
|
6 |
-
language_creators:
|
7 |
-
- found
|
8 |
-
license:
|
9 |
-
- cc-by-nc-4.0
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
paperswithcode_id: noisyner
|
13 |
-
pretty_name: NoisyNER
|
14 |
-
size_categories:
|
15 |
-
- 10K<n<100K
|
16 |
-
source_datasets:
|
17 |
-
- original
|
18 |
-
tags:
|
19 |
-
- newspapers
|
20 |
-
- 1997-2009
|
21 |
-
task_categories:
|
22 |
-
- token-classification
|
23 |
-
task_ids:
|
24 |
-
- named-entity-recognition
|
25 |
-
---
|
26 |
-
|
27 |
-
# Dataset Card for NoisyNER
|
28 |
-
|
29 |
-
## Table of Contents
|
30 |
-
- [Table of Contents](#table-of-contents)
|
31 |
-
- [Dataset Description](#dataset-description)
|
32 |
-
- [Dataset Summary](#dataset-summary)
|
33 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
34 |
-
- [Languages](#languages)
|
35 |
-
- [Dataset Structure](#dataset-structure)
|
36 |
-
- [Data Instances](#data-instances)
|
37 |
-
- [Data Fields](#data-fields)
|
38 |
-
- [Data Splits](#data-splits)
|
39 |
-
- [Dataset Creation](#dataset-creation)
|
40 |
-
- [Curation Rationale](#curation-rationale)
|
41 |
-
- [Source Data](#source-data)
|
42 |
-
- [Annotations](#annotations)
|
43 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
44 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
45 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
46 |
-
- [Discussion of Biases](#discussion-of-biases)
|
47 |
-
- [Other Known Limitations](#other-known-limitations)
|
48 |
-
- [Additional Information](#additional-information)
|
49 |
-
- [Dataset Curators](#dataset-curators)
|
50 |
-
- [Licensing Information](#licensing-information)
|
51 |
-
- [Citation Information](#citation-information)
|
52 |
-
- [Contributions](#contributions)
|
53 |
-
|
54 |
-
## Dataset Description
|
55 |
-
|
56 |
-
- **Repository:** [Estonian NER corpus](https://doi.org/10.15155/1-00-0000-0000-0000-00073L), [NoisyNER dataset](https://github.com/uds-lsv/NoisyNER)
|
57 |
-
- **Paper:** [Named Entity Recognition in Estonian](https://aclanthology.org/W13-2412/), [Analysing the Noise Model Error for Realistic Noisy Label Data](https://arxiv.org/abs/2101.09763)
|
58 |
-
- **Dataset:** NoisyNER
|
59 |
-
- **Domain:** News
|
60 |
-
|
61 |
-
### Dataset Summary
|
62 |
-
|
63 |
-
NoisyNER is a dataset for the evaluation of methods to handle noisy labels when training machine learning models.
|
64 |
-
|
65 |
-
- Entity Types: `PER`, `ORG`, `LOC`
|
66 |
-
|
67 |
-
It is from the NLP/Information Extraction domain and was created through a realistic distant supervision technique. Some highlights and interesting aspects of the data are:
|
68 |
-
|
69 |
-
- Seven sets of labels with differing noise patterns to evaluate different noise levels on the same instances
|
70 |
-
- Full parallel clean labels available to compute upper performance bounds or study scenarios where a small amount of gold-standard data can be leveraged
|
71 |
-
- Skewed label distribution (typical for Named Entity Recognition tasks)
|
72 |
-
- For some label sets: noise level higher than the true label probability
|
73 |
-
- Sequential dependencies between the labels
|
74 |
-
|
75 |
-
For more details on the dataset and its creation process, please refer to the original author's publication https://ojs.aaai.org/index.php/AAAI/article/view/16938 (published at AAAI'21).
|
76 |
-
|
77 |
-
This dataset is based on the Estonian NER corpus. For more details see https://aclanthology.org/W13-2412/
|
78 |
-
|
79 |
-
### Supported Tasks and Leaderboards
|
80 |
-
|
81 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
82 |
-
|
83 |
-
### Languages
|
84 |
-
|
85 |
-
The language data in NoisyNER is in Estonian (BCP-47 et)
|
86 |
-
|
87 |
-
## Dataset Structure
|
88 |
-
|
89 |
-
### Data Instances
|
90 |
-
|
91 |
-
An example of 'train' looks as follows.
|
92 |
-
```
|
93 |
-
{
|
94 |
-
'id': '0',
|
95 |
-
'tokens': ['Tallinna', 'õhusaaste', 'suureneb', '.'],
|
96 |
-
'lemmas': ['Tallinn+0', 'õhu_saaste+0', 'suurene+b', '.'],
|
97 |
-
'grammar': ['_H_ sg g', '_S_ sg n', '_V_ b', '_Z_'],
|
98 |
-
'ner_tags': [5, 0, 0, 0]
|
99 |
-
}
|
100 |
-
```
|
101 |
-
|
102 |
-
### Data Fields
|
103 |
-
|
104 |
-
The data fields are the same among all splits.
|
105 |
-
|
106 |
-
- `id`: a `string` feature.
|
107 |
-
- `tokens`: a `list` of `string` features.
|
108 |
-
- `lemmas`: a `list` of `string` features.
|
109 |
-
- `grammar`: a `list` of `string` features.
|
110 |
-
- `ner_tags`: a `list` of classification labels (`int`). Full tagset with indices:
|
111 |
-
|
112 |
-
```python
|
113 |
-
{'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-ORG': 3, 'I-ORG': 4, 'B-LOC': 5, 'I-LOC': 6}
|
114 |
-
```
|
115 |
-
|
116 |
-
### Data Splits
|
117 |
-
|
118 |
-
The splits are the same across all configurations.
|
119 |
-
|
120 |
-
|train|validation|test|
|
121 |
-
|----:|---------:|---:|
|
122 |
-
|11365| 1480|1433|
|
123 |
-
|
124 |
-
## Dataset Creation
|
125 |
-
|
126 |
-
### Curation Rationale
|
127 |
-
|
128 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
129 |
-
|
130 |
-
### Source Data
|
131 |
-
|
132 |
-
#### Initial Data Collection and Normalization
|
133 |
-
|
134 |
-
Tkachenko et al (2013) collected 572 news stories published in the local online newspapers [Delfi](http://delfi.ee/) and [Postimees](http://postimees.ee/) between 1997 and 2009. Selected articles cover both local and international news on a range of topics including politics, economics and sports. The raw text was preprocessed using the morphological disambiguator t3mesta ([Kaalep and
|
135 |
-
Vaino, 1998](https://www.cl.ut.ee/yllitised/kk_yhest_1998.pdf)) provided by [Filosoft](http://www.filosoft.ee/). The processing steps involve tokenization, lemmatization, part-of-speech tagging, grammatical and morphological analysis.
|
136 |
-
|
137 |
-
#### Who are the source language producers?
|
138 |
-
|
139 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
140 |
-
|
141 |
-
### Annotations
|
142 |
-
|
143 |
-
#### Annotation process
|
144 |
-
|
145 |
-
According to Tkachenko et al (2013) one of the authors manually tagged the corpus and the other author examined the tags, after which conflicting cases were resolved.
|
146 |
-
The total size of the corpus is 184,638 tokens. Tkachenko et al (2013) provide the following number of named entities in the corpus:
|
147 |
-
|
148 |
-
| | PER | LOC | ORG | Total |
|
149 |
-
|--------|------|------|------|-------|
|
150 |
-
| All | 5762 | 5711 | 3938 | 15411 |
|
151 |
-
| Unique | 3588 | 1589 | 1987 | 7164 |
|
152 |
-
|
153 |
-
Hedderich et al (2021) obtained the noisy labels through a distant supervision/automatic annotation approach. They extracted lists of named entities from Wikidata and matched them against words in the text via the ANEA tool ([Hedderich, Lange, and Klakow 2021](https://arxiv.org/abs/2102.13129)). They also used heuristic functions to correct errors caused by non-complete lists of entities,
|
154 |
-
grammatical complexities of Estonian that do not allow simple string matching or entity lists in conflict with each other. For instance, they normalized the grammatical form of a word or excluded certain high false-positive words. They provide seven sets of labels that differ in the noise process. This results in 8 different configurations, when added to the original split with clean labels.
|
155 |
-
|
156 |
-
#### Who are the annotators?
|
157 |
-
|
158 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
159 |
-
|
160 |
-
### Personal and Sensitive Information
|
161 |
-
|
162 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
163 |
-
|
164 |
-
## Considerations for Using the Data
|
165 |
-
|
166 |
-
### Social Impact of Dataset
|
167 |
-
|
168 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
169 |
-
|
170 |
-
### Discussion of Biases
|
171 |
-
|
172 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
173 |
-
|
174 |
-
### Other Known Limitations
|
175 |
-
|
176 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
177 |
-
|
178 |
-
## Additional Information
|
179 |
-
|
180 |
-
### Dataset Curators
|
181 |
-
|
182 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
183 |
-
|
184 |
-
### Licensing Information
|
185 |
-
|
186 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
187 |
-
|
188 |
-
|
189 |
-
### Citation Information
|
190 |
-
|
191 |
-
```
|
192 |
-
@inproceedings{tkachenko-etal-2013-named,
|
193 |
-
title = "Named Entity Recognition in {E}stonian",
|
194 |
-
author = "Tkachenko, Alexander and
|
195 |
-
Petmanson, Timo and
|
196 |
-
Laur, Sven",
|
197 |
-
booktitle = "Proceedings of the 4th Biennial International Workshop on {B}alto-{S}lavic Natural Language Processing",
|
198 |
-
month = aug,
|
199 |
-
year = "2013",
|
200 |
-
address = "Sofia, Bulgaria",
|
201 |
-
publisher = "Association for Computational Linguistics",
|
202 |
-
url = "https://aclanthology.org/W13-2412",
|
203 |
-
pages = "78--83",
|
204 |
-
}
|
205 |
-
@article{Hedderich_Zhu_Klakow_2021,
|
206 |
-
title={Analysing the Noise Model Error for Realistic Noisy Label Data},
|
207 |
-
author={Hedderich, Michael A. and Zhu, Dawei and Klakow, Dietrich},
|
208 |
-
volume={35},
|
209 |
-
url={https://ojs.aaai.org/index.php/AAAI/article/view/16938},
|
210 |
-
number={9},
|
211 |
-
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
|
212 |
-
year={2021},
|
213 |
-
month={May},
|
214 |
-
pages={7675-7684},
|
215 |
-
}
|
216 |
-
```
|
217 |
-
|
218 |
-
### Contributions
|
219 |
-
|
220 |
-
Thanks to [@phucdev](https://github.com/phucdev) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/NoisyNER_labelset1_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset1_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset1_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset1_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset2_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset2_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset2_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset2_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset3_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset3_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset3_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset3_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset4_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset4_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset4_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset4_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset5_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset5_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset5_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset5_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset6_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset6_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset6_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset6_train.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset7_all.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset7_dev.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/NoisyNER_labelset7_test.tsv
DELETED
The diff for this file is too large to render.
See raw diff
|
|