albertvillanova HF staff commited on
Commit
8603fef
·
verified ·
1 Parent(s): 9fa3472

Delete loading script

Browse files
Files changed (1) hide show
  1. ilist.py +0 -117
ilist.py DELETED
@@ -1,117 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Indo-Aryan Language Identification Shared Task Dataset"""
16
-
17
-
18
- import datasets
19
- from datasets.tasks import TextClassification
20
-
21
-
22
- _CITATION = r"""\
23
- @inproceedings{zampieri-etal-2018-language,
24
- title = "Language Identification and Morphosyntactic Tagging: The Second {V}ar{D}ial Evaluation Campaign",
25
- author = {Zampieri, Marcos and
26
- Malmasi, Shervin and
27
- Nakov, Preslav and
28
- Ali, Ahmed and
29
- Shon, Suwon and
30
- Glass, James and
31
- Scherrer, Yves and
32
- Samard{\v{z}}i{\'c}, Tanja and
33
- Ljube{\v{s}}i{\'c}, Nikola and
34
- Tiedemann, J{\"o}rg and
35
- van der Lee, Chris and
36
- Grondelaers, Stefan and
37
- Oostdijk, Nelleke and
38
- Speelman, Dirk and
39
- van den Bosch, Antal and
40
- Kumar, Ritesh and
41
- Lahiri, Bornini and
42
- Jain, Mayank},
43
- booktitle = "Proceedings of the Fifth Workshop on {NLP} for Similar Languages, Varieties and Dialects ({V}ar{D}ial 2018)",
44
- month = aug,
45
- year = "2018",
46
- address = "Santa Fe, New Mexico, USA",
47
- publisher = "Association for Computational Linguistics",
48
- url = "https://aclanthology.org/W18-3901",
49
- pages = "1--17",
50
- }
51
- """
52
-
53
- _DESCRIPTION = """\
54
- This dataset is introduced in a task which aimed at identifying 5 closely-related languages of Indo-Aryan language family –
55
- Hindi (also known as Khari Boli), Braj Bhasha, Awadhi, Bhojpuri, and Magahi.
56
- """
57
-
58
- _URL = "https://raw.githubusercontent.com/kmi-linguistics/vardial2018/master/dataset/{}.txt"
59
-
60
-
61
- class Ilist(datasets.GeneratorBasedBuilder):
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- "language_id": datasets.ClassLabel(names=["AWA", "BRA", "MAG", "BHO", "HIN"]),
68
- "text": datasets.Value("string"),
69
- }
70
- ),
71
- supervised_keys=None,
72
- homepage="https://github.com/kmi-linguistics/vardial2018",
73
- citation=_CITATION,
74
- task_templates=[TextClassification(text_column="text", label_column="language_id")],
75
- )
76
-
77
- def _split_generators(self, dl_manager):
78
- filepaths = dl_manager.download_and_extract(
79
- {
80
- "train": _URL.format("train"),
81
- "test": _URL.format("gold"),
82
- "dev": _URL.format("dev"),
83
- }
84
- )
85
-
86
- return [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TRAIN,
89
- # These kwargs will be passed to _generate_examples
90
- gen_kwargs={
91
- "filepath": filepaths["train"],
92
- },
93
- ),
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TEST,
96
- # These kwargs will be passed to _generate_examples
97
- gen_kwargs={
98
- "filepath": filepaths["test"],
99
- },
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.VALIDATION,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- "filepath": filepaths["dev"],
106
- },
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- """Yields examples."""
112
- with open(filepath, "r", encoding="utf-8") as file:
113
- for idx, row in enumerate(file):
114
- row = row.strip("\n").split("\t")
115
- if len(row) == 1:
116
- continue
117
- yield idx, {"language_id": row[1], "text": row[0]}