DavidErikMollberg commited on
Commit
c0b72bf
·
1 Parent(s): f04cdc3

Upload 2 files

Browse files

Adding metadata and builder script

Files changed (2) hide show
  1. metadata.tsv +0 -0
  2. ruv_tv_unknown_speakers.py +82 -0
metadata.tsv ADDED
The diff for this file is too large to render. See raw diff
 
ruv_tv_unknown_speakers.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from collections import defaultdict
3
+ import os
4
+ import csv
5
+ import datasets
6
+
7
+ _NAME="RUV TV unknown speakers"
8
+ _VERSION="1.0.0"
9
+ _DESCRIPTION = """
10
+ RUV TV unknown speakers
11
+ """
12
+
13
+ class RuvConfig(datasets.BuilderConfig):
14
+ """BuilderConfig for The RUV TV unknown speakers Corpus"""
15
+
16
+ def __init__(self, name, **kwargs):
17
+ name=_NAME
18
+ super().__init__(name=name, **kwargs)
19
+
20
+ class RuvAsr(datasets.GeneratorBasedBuilder):
21
+ """RUV TV unknown speakers 1.0"""
22
+
23
+ VERSION = datasets.Version(_VERSION)
24
+ BUILDER_CONFIGS = [
25
+ RuvConfig(
26
+ name=_NAME,
27
+ version=datasets.Version(_VERSION),
28
+ )
29
+ ]
30
+
31
+ def _info(self):
32
+ features = datasets.Features(
33
+ {
34
+ "audio_id": datasets.Value("string"),
35
+ "audio": datasets.Audio(sampling_rate=16000),
36
+ "show_name": datasets.Value("string"),
37
+ "episode_id": datasets.Value("string"),
38
+ "text": datasets.Value("string"),
39
+ }
40
+ )
41
+ return datasets.DatasetInfo(
42
+ description=_DESCRIPTION,
43
+ features=features,
44
+ )
45
+
46
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
47
+ metadata_path = dl_manager.download_and_extract("metadata.tsv")
48
+ audio_dir = "audio"
49
+ audio_paths = {}
50
+ audio_archives = {}
51
+ texts_path = dl_manager.download_and_extract("text")
52
+
53
+ with open(metadata_path) as f:
54
+ metadata = csv.DictReader(f, fieldnames=["audio_id", "episode_id", "show_name", "duration"], delimiter="\t")
55
+ for line in metadata:
56
+ audio_path = os.path.join(audio_dir, line["show_name"], line["episode_id"], line["audio_id"] + ".flac")
57
+ audio_paths[line["audio_id"]] = audio_path
58
+ try:
59
+ audio_archives[line["audio_id"]] = dl_manager.download_and_extract(audio_path)
60
+ except FileNotFoundError as e:
61
+ print(e)
62
+ print("Failed to download", audio_path, "continuing...")
63
+ continue
64
+
65
+ return [
66
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"metadata_path": metadata_path, "audio_archives": audio_archives, "texts_path": texts_path}),
67
+ ]
68
+
69
+ def _generate_examples(self, metadata_path, audio_archives, texts_path):
70
+ with open(metadata_path) as f, open(texts_path) as g:
71
+ metadata = csv.DictReader(f, fieldnames=["audio_id", "episode_id", "show_name", "duration"], delimiter="\t")
72
+ for line, text_line in zip(metadata, g.readlines()):
73
+ audio_id = line["audio_id"]
74
+ audio_file = audio_archives[audio_id]
75
+
76
+ yield audio_id, {
77
+ "audio_id": audio_id,
78
+ "audio": {"path": audio_file, "bytes": open(audio_file, "rb").read()},
79
+ "show_name": line["show_name"],
80
+ "episode_id": line["episode_id"],
81
+ "text": text_line.split(" ", maxsplit=1)[1].rstrip(),
82
+ }