File size: 6,743 Bytes
e18bb13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
# coding=utf-8
import csv
import os
import yaml
from itertools import groupby
from pathlib import Path
import torchaudio
import datasets
_VERSION = "3.0.0"
_CITATION = """
@article{CATTONI2021101155,
title = {MuST-C: A multilingual corpus for end-to-end speech translation},
author = {Roldano Cattoni and Mattia Antonino {Di Gangi} and Luisa Bentivogli and Matteo Negri and Marco Turchi},
journal = {Computer Speech & Language},
volume = {66},
pages = {101155},
year = {2021},
issn = {0885-2308},
doi = {https://doi.org/10.1016/j.csl.2020.101155},
url = {https://www.sciencedirect.com/science/article/pii/S0885230820300887},
}
"""
_DESCRIPTION = """
MuST-C is a multilingual speech translation corpus whose size and quality facilitates
the training of end-to-end systems for speech translation from English into several languages.
For each target language, MuST-C comprises several hundred hours of audio recordings
from English [TED Talks](https://www.ted.com/talks), which are automatically aligned
at the sentence level with their manual transcriptions and translations.
"""
_HOMEPAGE = "https://ict.fbk.eu/must-c/"
_LANGUAGES = ["de", "ja", "zh"]
_SAMPLE_RATE = 16_000
class MUSTC(datasets.GeneratorBasedBuilder):
"""MUSTC Dataset."""
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=f"en-{lang}", version=datasets.Version(_VERSION)) for lang in _LANGUAGES
]
@property
def manual_download_instructions(self):
return f"""Please download the MUST-C v3 from https://ict.fbk.eu/must-c/
and unpack it with `tar xvzf MUSTC_v3.0_{self.config.name}.tar.gz`.
Make sure to pass the path to the directory in which you unpacked the downloaded
file as `data_dir`: `datasets.load_dataset('mustc', data_dir="path/to/dir")`
"""
# MUSTC_ROOT # <- point here in --data_dir in arg
# βββ en-de
# βββ data
# βββ dev
# β βββ txt
# β β βββ dev.de
# β β βββ dev.en
# β β βββ dev.yaml
# β βββ wav
# β βββ ted_767.wav
# β βββ [...]
# β βββ ted_837.wav
# βββ train
# β βββ txt/
# β βββ wav/
# βββ tst-COMMON
# β βββ txt/
# β βββ wav/
# βββ tst-HE
# βββ txt/
# βββ wav/
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
client_id=datasets.Value("string"),
file=datasets.Value("string"),
audio=datasets.Audio(sampling_rate=_SAMPLE_RATE),
sentence=datasets.Value("string"),
translation=datasets.Value("string"),
id=datasets.Value("string"),
),
supervised_keys=("file", "translation"),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
source_lang, target_lang = self.config.name.split("-")
assert source_lang == "en"
assert target_lang in _LANGUAGES
data_root = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
root_path = Path(data_root) / self.config.name
if not os.path.exists(root_path):
raise FileNotFoundError(
"Dataset not found. Manual download required. "
f"{self.manual_download_instructions}"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"root_path": root_path, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"root_path": root_path, "split": "dev"},
),
datasets.SplitGenerator(
name=datasets.Split("tst.COMMON"),
gen_kwargs={"root_path": root_path, "split": "tst-COMMON"},
),
datasets.SplitGenerator(
name=datasets.Split("tst.HE"),
gen_kwargs={"root_path": root_path, "split": "tst-HE"},
),
]
def _generate_examples(self, root_path, split):
source_lang, target_lang = self.config.name.split("-")
# Load audio segments
txt_root = Path(root_path) / "data" / split / "txt"
with (txt_root / f"{split}.yaml").open("r") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
with open(txt_root / f"{split}.{source_lang}", "r") as s_f:
with open(txt_root / f"{split}.{target_lang}", "r") as t_f:
s_lines = s_f.readlines()
t_lines = t_f.readlines()
assert len(s_lines) == len(t_lines) == len(segments)
for i, (src, trg) in enumerate(zip(s_lines, t_lines)):
segments[i][source_lang] = src.rstrip()
segments[i][target_lang] = trg.rstrip()
# Load waveforms
_id = 0
wav_root = Path(root_path) / "data" / split / "wav"
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * int(_SAMPLE_RATE))
duration = int(float(segment["duration"]) * int(_SAMPLE_RATE))
waveform, sr = torchaudio.load(wav_path,
frame_offset=offset,
num_frames=duration)
assert duration == waveform.size(1), (duration, waveform.size(1))
assert sr == int(_SAMPLE_RATE), (sr, int(_SAMPLE_RATE))
yield _id, {
"file": wav_path.as_posix(),
"audio": {
"array": waveform.squeeze().numpy(),
"path": wav_path.as_posix(),
"sampling_rate": sr,
},
"sentence": segment[source_lang],
"translation": segment[target_lang],
"client_id": segment["speaker_id"],
"id": f"{wav_path.stem}_{i}",
}
_id += 1
|