holylovenia
commited on
Upload nusaparagraph_topic.py with huggingface_hub
Browse files- nusaparagraph_topic.py +19 -19
nusaparagraph_topic.py
CHANGED
@@ -4,16 +4,16 @@ from typing import Dict, List, Tuple
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
11 |
|
12 |
_LOCAL = False
|
13 |
|
14 |
_DATASETNAME = "nusaparagraph_topic"
|
15 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
16 |
-
_UNIFIED_VIEW_NAME =
|
17 |
|
18 |
_LANGUAGES = [
|
19 |
"btk", "bew", "bug", "jav", "mad", "mak", "min", "mui", "rej", "sun"
|
@@ -43,7 +43,7 @@ _SUPPORTED_TASKS = [Tasks.TOPIC_MODELING]
|
|
43 |
|
44 |
_SOURCE_VERSION = "1.0.0"
|
45 |
|
46 |
-
|
47 |
|
48 |
_URLS = {
|
49 |
"train":
|
@@ -55,13 +55,13 @@ _URLS = {
|
|
55 |
}
|
56 |
|
57 |
|
58 |
-
def
|
59 |
-
"""Construct
|
60 |
-
if schema != "source" and schema != "
|
61 |
raise ValueError(f"Invalid schema: {schema}")
|
62 |
|
63 |
if lang == "":
|
64 |
-
return
|
65 |
name="nusaparagraph_topic_{schema}".format(schema=schema),
|
66 |
version=datasets.Version(version),
|
67 |
description=
|
@@ -71,7 +71,7 @@ def nusantara_config_constructor(lang, schema, version):
|
|
71 |
subset_id="nusaparagraph_topic",
|
72 |
)
|
73 |
else:
|
74 |
-
return
|
75 |
name="nusaparagraph_topic_{lang}_{schema}".format(lang=lang,
|
76 |
schema=schema),
|
77 |
version=datasets.Version(version),
|
@@ -101,15 +101,15 @@ class NusaParagraphTopic(datasets.GeneratorBasedBuilder):
|
|
101 |
"""NusaParagraph-Topic is a 8-labels (food & beverages, sports, leisure, religion, culture & heritage, a slice of life, technology, and business) topic modeling dataset for 10 Indonesian local languages."""
|
102 |
|
103 |
BUILDER_CONFIGS = ([
|
104 |
-
|
105 |
for lang in LANGUAGES_MAP
|
106 |
] + [
|
107 |
-
|
108 |
-
|
109 |
for lang in LANGUAGES_MAP
|
110 |
] + [
|
111 |
-
|
112 |
-
|
113 |
])
|
114 |
|
115 |
DEFAULT_CONFIG_NAME = "nusaparagraph_topic_ind_source"
|
@@ -121,7 +121,7 @@ class NusaParagraphTopic(datasets.GeneratorBasedBuilder):
|
|
121 |
"text": datasets.Value("string"),
|
122 |
"label": datasets.Value("string"),
|
123 |
})
|
124 |
-
elif self.config.schema == "
|
125 |
features = schemas.text_features([
|
126 |
"food & beverages", "sports", "leisures", "religion", "culture & heritage", "slice of life", "technology", "business"
|
127 |
])
|
@@ -138,7 +138,7 @@ class NusaParagraphTopic(datasets.GeneratorBasedBuilder):
|
|
138 |
self, dl_manager: datasets.DownloadManager
|
139 |
) -> List[datasets.SplitGenerator]:
|
140 |
"""Returns SplitGenerators."""
|
141 |
-
if self.config.name == "nusaparagraph_topic_source" or self.config.name == "
|
142 |
# Load all 12 languages
|
143 |
train_csv_path = dl_manager.download_and_extract([
|
144 |
_URLS["train"].format(lang=lang)
|
@@ -180,10 +180,10 @@ class NusaParagraphTopic(datasets.GeneratorBasedBuilder):
|
|
180 |
]
|
181 |
|
182 |
def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
|
183 |
-
if self.config.schema != "source" and self.config.schema != "
|
184 |
raise ValueError(f"Invalid config: {self.config.name}")
|
185 |
|
186 |
-
if self.config.name == "nusaparagraph_topic_source" or self.config.name == "
|
187 |
ldf = []
|
188 |
for fp in filepath:
|
189 |
ldf.append(pd.read_csv(fp))
|
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
+
from seacrowd.utils import schemas
|
8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
|
10 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
11 |
|
12 |
_LOCAL = False
|
13 |
|
14 |
_DATASETNAME = "nusaparagraph_topic"
|
15 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
16 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
17 |
|
18 |
_LANGUAGES = [
|
19 |
"btk", "bew", "bug", "jav", "mad", "mak", "min", "mui", "rej", "sun"
|
|
|
43 |
|
44 |
_SOURCE_VERSION = "1.0.0"
|
45 |
|
46 |
+
_SEACROWD_VERSION = "2024.06.20"
|
47 |
|
48 |
_URLS = {
|
49 |
"train":
|
|
|
55 |
}
|
56 |
|
57 |
|
58 |
+
def seacrowd_config_constructor(lang, schema, version):
|
59 |
+
"""Construct SEACrowdConfig with nusaparagraph_topic_{lang}_{schema} as the name format"""
|
60 |
+
if schema != "source" and schema != "seacrowd_text":
|
61 |
raise ValueError(f"Invalid schema: {schema}")
|
62 |
|
63 |
if lang == "":
|
64 |
+
return SEACrowdConfig(
|
65 |
name="nusaparagraph_topic_{schema}".format(schema=schema),
|
66 |
version=datasets.Version(version),
|
67 |
description=
|
|
|
71 |
subset_id="nusaparagraph_topic",
|
72 |
)
|
73 |
else:
|
74 |
+
return SEACrowdConfig(
|
75 |
name="nusaparagraph_topic_{lang}_{schema}".format(lang=lang,
|
76 |
schema=schema),
|
77 |
version=datasets.Version(version),
|
|
|
101 |
"""NusaParagraph-Topic is a 8-labels (food & beverages, sports, leisure, religion, culture & heritage, a slice of life, technology, and business) topic modeling dataset for 10 Indonesian local languages."""
|
102 |
|
103 |
BUILDER_CONFIGS = ([
|
104 |
+
seacrowd_config_constructor(lang, "source", _SOURCE_VERSION)
|
105 |
for lang in LANGUAGES_MAP
|
106 |
] + [
|
107 |
+
seacrowd_config_constructor(lang, "seacrowd_text",
|
108 |
+
_SEACROWD_VERSION)
|
109 |
for lang in LANGUAGES_MAP
|
110 |
] + [
|
111 |
+
seacrowd_config_constructor("", "source", _SOURCE_VERSION),
|
112 |
+
seacrowd_config_constructor("", "seacrowd_text", _SEACROWD_VERSION)
|
113 |
])
|
114 |
|
115 |
DEFAULT_CONFIG_NAME = "nusaparagraph_topic_ind_source"
|
|
|
121 |
"text": datasets.Value("string"),
|
122 |
"label": datasets.Value("string"),
|
123 |
})
|
124 |
+
elif self.config.schema == "seacrowd_text":
|
125 |
features = schemas.text_features([
|
126 |
"food & beverages", "sports", "leisures", "religion", "culture & heritage", "slice of life", "technology", "business"
|
127 |
])
|
|
|
138 |
self, dl_manager: datasets.DownloadManager
|
139 |
) -> List[datasets.SplitGenerator]:
|
140 |
"""Returns SplitGenerators."""
|
141 |
+
if self.config.name == "nusaparagraph_topic_source" or self.config.name == "nusaparagraph_topic_seacrowd_text":
|
142 |
# Load all 12 languages
|
143 |
train_csv_path = dl_manager.download_and_extract([
|
144 |
_URLS["train"].format(lang=lang)
|
|
|
180 |
]
|
181 |
|
182 |
def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
|
183 |
+
if self.config.schema != "source" and self.config.schema != "seacrowd_text":
|
184 |
raise ValueError(f"Invalid config: {self.config.name}")
|
185 |
|
186 |
+
if self.config.name == "nusaparagraph_topic_source" or self.config.name == "nusaparagraph_topic_seacrowd_text":
|
187 |
ldf = []
|
188 |
for fp in filepath:
|
189 |
ldf.append(pd.read_csv(fp))
|