cdminix commited on
Commit
4a6964a
·
1 Parent(s): 7e2cea8

inital commit

Browse files
Files changed (2) hide show
  1. bu_radio.py +194 -0
  2. test.py +23 -0
bu_radio.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """(BURN) Boston University Radio News Corpus."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ import hashlib
6
+ import pickle
7
+
8
+ import datasets
9
+ import pandas as pd
10
+ import numpy as np
11
+ from tqdm.contrib.concurrent import process_map
12
+ from tqdm.auto import tqdm
13
+ from multiprocessing import cpu_count
14
+ # from phones.convert import Converter
15
+ import torchaudio
16
+ import torchaudio.transforms as AT
17
+ from functools import lru_cache
18
+
19
+ logger = datasets.logging.get_logger(__name__)
20
+
21
+ _PHONESET = "arpabet"
22
+
23
+ _MAX_WORKERS = os.environ.get("BURN_MAX_WORKERS", cpu_count())
24
+ _PATH = os.environ.get("BURN_PATH", None)
25
+
26
+ _VERSION = "0.0.1"
27
+
28
+ _CITATION = """\
29
+ @article{ostendorf1995boston,
30
+ title={The Boston University radio news corpus},
31
+ author={Ostendorf, Mari and Price, Patti J and Shattuck-Hufnagel, Stefanie},
32
+ journal={Linguistic Data Consortium},
33
+ pages={1--19},
34
+ year={1995}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ The Boston University Radio Speech Corpus was collected primarily to support research in text-to-speech synthesis, particularly generation of prosodic patterns. The corpus consists of professionally read radio news data, including speech and accompanying annotations, suitable for speech and language research.
40
+ """
41
+
42
+ _URL = "https://catalog.ldc.upenn.edu/LDC96S36"
43
+
44
+
45
+ class BURNConfig(datasets.BuilderConfig):
46
+ """BuilderConfig for BURN."""
47
+
48
+ def __init__(self, sampling_rate=16000, hop_length=256, win_length=1024, **kwargs):
49
+ """BuilderConfig for BURN.
50
+
51
+ Args:
52
+ **kwargs: keyword arguments forwarded to super.
53
+ """
54
+ super(BURNConfig, self).__init__(**kwargs)
55
+
56
+ self.sampling_rate = sampling_rate
57
+ self.hop_length = hop_length
58
+ self.win_length = win_length
59
+ self.seconds_per_frame = hop_length / sampling_rate
60
+
61
+ if _PATH is None:
62
+ raise ValueError("Please set the environment variable BURN_PATH to point to the BURN dataset directory.")
63
+
64
+ class BURN(datasets.GeneratorBasedBuilder):
65
+ """BURN dataset."""
66
+
67
+ BUILDER_CONFIGS = [
68
+ BURNConfig(
69
+ name="burn",
70
+ version=datasets.Version(_VERSION, ""),
71
+ ),
72
+ ]
73
+
74
+ def _info(self):
75
+ features = {
76
+ "speaker": datasets.Value("string"),
77
+ "words": datasets.Sequence(datasets.Value("string")),
78
+ "word_durations": datasets.Sequence(datasets.Value("int32")),
79
+ "prominence": datasets.Sequence(datasets.Value("bool")),
80
+ "break": datasets.Sequence(datasets.Value("bool")),
81
+ "audio": datasets.Value("string"),
82
+ }
83
+
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=datasets.Features(features),
87
+ supervised_keys=["prominence", "break"],
88
+ homepage="https://catalog.ldc.upenn.edu/LDC96S36",
89
+ citation=_CITATION,
90
+ task_templates=None,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ """Returns SplitGenerators."""
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name="train",
98
+ gen_kwargs={
99
+ "speakers": ["f1a", "f3a", "m1b", "m2b", "m3b", "m4b"],
100
+ }
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name="dev",
104
+ gen_kwargs={
105
+ "speakers": [],
106
+ }
107
+ ),
108
+ ]
109
+
110
+ def _generate_example(self, file):
111
+ words = []
112
+ word_ts = []
113
+ word_durations = []
114
+ if not file.with_suffix(".ton").exists():
115
+ return None
116
+ if not file.with_suffix(".brk").exists():
117
+ return None
118
+ if not file.with_suffix(".wrd").exists():
119
+ return None
120
+ with open(file.with_suffix(".wrd"), "r") as f:
121
+ lines = f.readlines()
122
+ lines = [line for line in lines if line != "\n"]
123
+ # get index of "#\n" line
124
+ idx = lines.index("#\n")
125
+ lines = lines[idx+1:]
126
+ lines = [tuple(line.strip().split()) for line in lines]
127
+ # remove lines with no word
128
+ lines = [line for line in lines if len(line) == 3]
129
+ word_ts = np.array([float(start) for start, _, _ in lines])
130
+ words = [word for _, _, word in lines]
131
+ prominence = np.zeros(len(words))
132
+ boundary = np.zeros(len(words))
133
+ if len(words) <= 1:
134
+ return None
135
+ with open(file.with_suffix(".ton"), "r") as f:
136
+ lines = f.readlines()
137
+ lines = [line for line in lines if line != "\n"]
138
+ wrd_idx = 0
139
+ idx = lines.index("#\n")
140
+ lines = lines[idx+1:]
141
+ lines = [tuple(line.strip().split()[:3]) for line in lines]
142
+ # remove lines with no word
143
+ lines = [line for line in lines if len(line) == 3]
144
+ for start, _, accent in lines:
145
+ # find word index
146
+ while float(start) > word_ts[wrd_idx]:
147
+ wrd_idx += 1
148
+ if wrd_idx >= len(word_ts):
149
+ # warning
150
+ logger.warning(f"Word index {wrd_idx} out of bounds for file {file}")
151
+ return None
152
+ if accent in ['H*', 'L*', 'L*+H', 'L+H*', 'H+', '!H*']:
153
+ prominence[wrd_idx] = 1
154
+ with open(file.with_suffix(".brk"), "r") as f:
155
+ lines = f.readlines()
156
+ lines = [line for line in lines if line != "\n"]
157
+ wrd_idx = 0
158
+ idx = lines.index("#\n")
159
+ lines = lines[idx+1:]
160
+ lines = [tuple(line.strip().split()) for line in lines]
161
+ if np.abs(len(lines) - len(words)) > 2:
162
+ logger.warning(f"Word count mismatch for file {file}")
163
+ return None
164
+ for l in lines:
165
+ if len(l) < 3:
166
+ continue
167
+ score = l[2]
168
+ start = float(l[0])
169
+ # find word index, by finding the start value closest to word_ts
170
+ wrd_idx = np.argmin(np.abs(word_ts - start))
171
+ if "3" in score or "4" in score:
172
+ boundary[wrd_idx] = 1
173
+ # compute word durations using self.config.seconds_per_frame
174
+ word_diff = np.concatenate([[word_ts[0]], np.diff(word_ts)])
175
+ word_durations = np.round(word_diff / self.config.seconds_per_frame).astype(np.int32)
176
+ return {
177
+ "words": words,
178
+ "word_durations": word_durations,
179
+ "prominence": prominence,
180
+ "break": boundary,
181
+ "audio": str(file),
182
+ }
183
+
184
+ def _generate_examples(self, speakers):
185
+ files = list((Path(_PATH)).glob(f"**/*.sph"))
186
+ speakers = [str(file).replace(_PATH, "").split("/")[1] for file in files]
187
+ #speaker_list.extend([speaker] * len(speaker_sph_files))
188
+ j = 0
189
+ for i, file in enumerate(files):
190
+ example = self._generate_example(file)
191
+ if example is not None:
192
+ example["speaker"] = speakers[i]
193
+ yield j, example
194
+ j += 1
test.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import numpy as np
3
+
4
+ ds = load_dataset("bu_radio.py")
5
+
6
+ prominence = ds["train"]["prominence"]
7
+ boundary = ds["train"]["break"]
8
+ audio = ds["train"]["audio"]
9
+
10
+ audio_names = [a.split("/")[-1] for a in audio]
11
+ print(len(audio_names), len(set(audio_names)))
12
+
13
+ # flatten
14
+ prominence = np.concatenate(prominence)
15
+ boundary = np.concatenate(boundary)
16
+
17
+ prom_true = np.sum(prominence==True)
18
+ prom_false = np.sum(prominence==False)
19
+ bound_true = np.sum(boundary==True)
20
+ bound_false = np.sum(boundary==False)
21
+
22
+ print("prominence:", prom_true, prom_false, np.round(prom_false/(prom_true+prom_false)*100,1), (prom_true+prom_false))
23
+ print("break: ", bound_true, bound_false, np.round(bound_false/(bound_true+bound_false)*100,1), (bound_true+bound_false))