Datasets:

Languages:
Estonian
License:
henryharm commited on
Commit
fd9f242
·
1 Parent(s): a22a48b

Added additional dataset configurations.

Browse files
Files changed (1) hide show
  1. ERRnews.py +43 -12
ERRnews.py CHANGED
@@ -6,31 +6,48 @@ import regex as re
6
 
7
 
8
  class ERRNewsConfig(datasets.BuilderConfig):
9
- def __init__(self, data_url, recordings_url, **kwargs):
10
  super().__init__(version=datasets.Version("1.0.0"), **kwargs)
11
  self.data_url = data_url
12
  self.recordings_url = recordings_url
 
13
 
14
 
15
  class ERRNews(datasets.GeneratorBasedBuilder):
16
  VERSION = datasets.Version("1.0.0")
17
- #data_url = "http://bark.phon.ioc.ee/lw/korpused/err_uudised/data.zip"
18
  data_url = "https://huggingface.co/datasets/TalTechNLP/ERRnews/resolve/main/data.zip"
 
 
19
 
20
  BUILDER_CONFIGS = [
21
  ERRNewsConfig(
22
- name="sum",
23
  data_url=data_url,
24
- recordings_url=None
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ),
26
  ERRNewsConfig(
27
  name="full",
28
  data_url=data_url,
29
- recordings_url="http://bark.phon.ioc.ee/lw/korpused/err_uudised/recordings.tar"
 
30
  )
31
  ]
32
 
33
- DEFAULT_CONFIG_NAME = "sum"
34
 
35
  def _info(self):
36
  description = (
@@ -58,15 +75,23 @@ class ERRNews(datasets.GeneratorBasedBuilder):
58
  "name": datasets.Value("string"),
59
  "summary": datasets.Value("string"),
60
  "transcript": datasets.Value("string"),
61
- "en_summary": datasets.Value("string"),
62
- "en_transcript": datasets.Value("string"),
63
  "url": datasets.Value("string"),
64
- "recording_id": datasets.Value("int32"),
65
  "meta": datasets.Value("string"),
66
  })
67
 
 
 
 
 
 
 
 
 
68
  if self.config.name == "full":
 
 
69
  features["audio"] = datasets.features.Audio(sampling_rate=48_000)
 
70
 
71
  return datasets.DatasetInfo(
72
  description=description,
@@ -112,20 +137,26 @@ class ERRNews(datasets.GeneratorBasedBuilder):
112
  ),
113
  ]
114
 
 
 
 
 
 
 
115
  def _generate_examples(self, file_path, audio_files, recordings_archive, data_archive):
116
  data = pd.read_csv(os.path.join(data_archive, file_path))
117
 
118
  if audio_files:
119
  for path, f in audio_files:
120
  id = re.sub("^recordings\/", "", re.sub(".ogv$", "", path))
121
- row = data.loc[data['recording_id']==int(id)]
122
  if len(row) > 0:
123
  result = row.to_dict('records')[0]
124
  # set the audio feature and the path to the extracted file
125
  path = os.path.join(recordings_archive, path) if recordings_archive else path
126
  result["audio"] = {"path": path, "bytes": f.read()}
127
- yield row.index[0].item(), result
128
  else:
129
  for row in data.iterrows():
130
  result = row[1].to_dict()
131
- yield row[0], result
 
6
 
7
 
8
  class ERRNewsConfig(datasets.BuilderConfig):
9
+ def __init__(self, data_url, features, recordings_url, **kwargs):
10
  super().__init__(version=datasets.Version("1.0.0"), **kwargs)
11
  self.data_url = data_url
12
  self.recordings_url = recordings_url
13
+ self.features = features
14
 
15
 
16
  class ERRNews(datasets.GeneratorBasedBuilder):
17
  VERSION = datasets.Version("1.0.0")
18
+ # data_url = "http://bark.phon.ioc.ee/lw/korpused/err_uudised/data.zip"
19
  data_url = "https://huggingface.co/datasets/TalTechNLP/ERRnews/resolve/main/data.zip"
20
+ recordings_url = "http://bark.phon.ioc.ee/lw/korpused/err_uudised/recordings.tar"
21
+ features = ["name", "summary", "transcript", "url", "meta"]
22
 
23
  BUILDER_CONFIGS = [
24
  ERRNewsConfig(
25
+ name="et",
26
  data_url=data_url,
27
+ recordings_url=None,
28
+ features=features
29
+ ),
30
+ ERRNewsConfig(
31
+ name="audio",
32
+ data_url=data_url,
33
+ recordings_url=recordings_url,
34
+ features=features + ["audio", "recording_id"]
35
+ ),
36
+ ERRNewsConfig(
37
+ name="et_en",
38
+ data_url=data_url,
39
+ recordings_url=None,
40
+ features=features + ["en_summary", "en_transcript"]
41
  ),
42
  ERRNewsConfig(
43
  name="full",
44
  data_url=data_url,
45
+ recordings_url=recordings_url,
46
+ features=features + ["audio", "recording_id", "en_summary", "en_transcript"]
47
  )
48
  ]
49
 
50
+ DEFAULT_CONFIG_NAME = "et"
51
 
52
  def _info(self):
53
  description = (
 
75
  "name": datasets.Value("string"),
76
  "summary": datasets.Value("string"),
77
  "transcript": datasets.Value("string"),
 
 
78
  "url": datasets.Value("string"),
 
79
  "meta": datasets.Value("string"),
80
  })
81
 
82
+ if self.config.name == "audio":
83
+ features["audio"] = datasets.features.Audio(sampling_rate=48_000)
84
+ features["recording_id"] = datasets.Value("int32")
85
+
86
+ if self.config.name == "et_en":
87
+ features["en_summary"] = datasets.Value("string")
88
+ features["en_transcript"] = datasets.Value("string")
89
+
90
  if self.config.name == "full":
91
+ features["en_summary"] = datasets.Value("string")
92
+ features["en_transcript"] = datasets.Value("string")
93
  features["audio"] = datasets.features.Audio(sampling_rate=48_000)
94
+ features["recording_id"] = datasets.Value("int32")
95
 
96
  return datasets.DatasetInfo(
97
  description=description,
 
137
  ),
138
  ]
139
 
140
+ def create_dict(self, data):
141
+ res = dict()
142
+ for key in self.config.features:
143
+ res[key] = data[key]
144
+ return res
145
+
146
  def _generate_examples(self, file_path, audio_files, recordings_archive, data_archive):
147
  data = pd.read_csv(os.path.join(data_archive, file_path))
148
 
149
  if audio_files:
150
  for path, f in audio_files:
151
  id = re.sub("^recordings\/", "", re.sub(".ogv$", "", path))
152
+ row = data.loc[data['recording_id'] == int(id)]
153
  if len(row) > 0:
154
  result = row.to_dict('records')[0]
155
  # set the audio feature and the path to the extracted file
156
  path = os.path.join(recordings_archive, path) if recordings_archive else path
157
  result["audio"] = {"path": path, "bytes": f.read()}
158
+ yield row.index[0].item(), self.create_dict(result)
159
  else:
160
  for row in data.iterrows():
161
  result = row[1].to_dict()
162
+ yield row[0], self.create_dict(result)