Commit
·
f79068d
1
Parent(s):
a9e305a
add script
Browse files
transcription_data/ami-ihm.py → ami-ihm.py
RENAMED
@@ -265,6 +265,10 @@ _AUDIO_ARCHIVE_URL = _BASE_DATA_URL + "audio/{subset}/{split}/{_id}.tar.gz"
|
|
265 |
|
266 |
_ANNOTATIONS_ARCHIVE_URL = _BASE_DATA_URL + "annotations/{split}/text"
|
267 |
|
|
|
|
|
|
|
|
|
268 |
logger = datasets.utils.logging.get_logger(__name__)
|
269 |
|
270 |
|
@@ -297,6 +301,7 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
297 |
"end_time": datasets.Value("float32"),
|
298 |
"microphone_id": datasets.Value("string"),
|
299 |
"speaker_id": datasets.Value("string"),
|
|
|
300 |
}
|
301 |
)
|
302 |
return datasets.DatasetInfo(
|
@@ -329,6 +334,9 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
329 |
annotations_urls = {split: _ANNOTATIONS_ARCHIVE_URL.format(split=split) for split in splits}
|
330 |
annotations = dl_manager.download(annotations_urls)
|
331 |
|
|
|
|
|
|
|
332 |
return [
|
333 |
datasets.SplitGenerator(
|
334 |
name=datasets.Split.TRAIN,
|
@@ -336,6 +344,7 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
336 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
|
337 |
"local_extracted_archives_paths": local_extracted_archives_paths["train"],
|
338 |
"annotation": annotations["train"],
|
|
|
339 |
"split": "train"
|
340 |
},
|
341 |
),
|
@@ -345,6 +354,7 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
345 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
|
346 |
"local_extracted_archives_paths": local_extracted_archives_paths["dev"],
|
347 |
"annotation": annotations["dev"],
|
|
|
348 |
"split": "dev"
|
349 |
},
|
350 |
),
|
@@ -354,12 +364,13 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
354 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["eval"]],
|
355 |
"local_extracted_archives_paths": local_extracted_archives_paths["eval"],
|
356 |
"annotation": annotations["eval"],
|
|
|
357 |
"split": "eval"
|
358 |
},
|
359 |
),
|
360 |
]
|
361 |
|
362 |
-
def _generate_examples(self, audio_archives, local_extracted_archives_paths, annotation, split):
|
363 |
# open annotation file
|
364 |
assert len(audio_archives) == len(local_extracted_archives_paths)
|
365 |
|
@@ -391,6 +402,14 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
391 |
"speaker_id": speaker_id,
|
392 |
}
|
393 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
features = ["meeting_id", "audio_id", "text", "begin_time", "end_time", "microphone_id", "speaker_id"]
|
395 |
for archive, local_archive_path in zip(audio_archives, local_extracted_archives_paths):
|
396 |
for audio_path, audio_file in archive:
|
@@ -407,5 +426,7 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
407 |
"path": os.path.join(local_archive_path, audio_path) if local_archive_path else audio_path,
|
408 |
"bytes": audio_file.read(),
|
409 |
},
|
410 |
-
**{feature: audio_meta[feature] for feature in features}
|
|
|
411 |
}
|
|
|
|
265 |
|
266 |
_ANNOTATIONS_ARCHIVE_URL = _BASE_DATA_URL + "annotations/{split}/text"
|
267 |
|
268 |
+
_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/ami/resolve/main/transcription_data/greedy_search/"
|
269 |
+
|
270 |
+
_TRANSCRIPT_URLS = _TRANSCRIPT_URL + "{config}/{split}.txt"
|
271 |
+
|
272 |
logger = datasets.utils.logging.get_logger(__name__)
|
273 |
|
274 |
|
|
|
301 |
"end_time": datasets.Value("float32"),
|
302 |
"microphone_id": datasets.Value("string"),
|
303 |
"speaker_id": datasets.Value("string"),
|
304 |
+
"whisper_transcript": datasets.Value("string"),
|
305 |
}
|
306 |
)
|
307 |
return datasets.DatasetInfo(
|
|
|
334 |
annotations_urls = {split: _ANNOTATIONS_ARCHIVE_URL.format(split=split) for split in splits}
|
335 |
annotations = dl_manager.download(annotations_urls)
|
336 |
|
337 |
+
transcription_urls = {split: _TRANSCRIPT_URLS.format(split=split) for split in splits}
|
338 |
+
transcript_archive_path = dl_manager.download(transcription_urls[self.config.name])
|
339 |
+
|
340 |
return [
|
341 |
datasets.SplitGenerator(
|
342 |
name=datasets.Split.TRAIN,
|
|
|
344 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
|
345 |
"local_extracted_archives_paths": local_extracted_archives_paths["train"],
|
346 |
"annotation": annotations["train"],
|
347 |
+
"transcript_files": transcript_archive_path["train"],
|
348 |
"split": "train"
|
349 |
},
|
350 |
),
|
|
|
354 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
|
355 |
"local_extracted_archives_paths": local_extracted_archives_paths["dev"],
|
356 |
"annotation": annotations["dev"],
|
357 |
+
"transcript_files": transcript_archive_path["dev"],
|
358 |
"split": "dev"
|
359 |
},
|
360 |
),
|
|
|
364 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["eval"]],
|
365 |
"local_extracted_archives_paths": local_extracted_archives_paths["eval"],
|
366 |
"annotation": annotations["eval"],
|
367 |
+
"transcript_files": transcript_archive_path["eval"],
|
368 |
"split": "eval"
|
369 |
},
|
370 |
),
|
371 |
]
|
372 |
|
373 |
+
def _generate_examples(self, audio_archives, local_extracted_archives_paths, annotation, transcript_files, split):
|
374 |
# open annotation file
|
375 |
assert len(audio_archives) == len(local_extracted_archives_paths)
|
376 |
|
|
|
402 |
"speaker_id": speaker_id,
|
403 |
}
|
404 |
|
405 |
+
whisper_transcripts = []
|
406 |
+
|
407 |
+
with open(transcript_files, encoding="utf-8") as f:
|
408 |
+
for row in f:
|
409 |
+
whisper_transcripts.append(row.rstrip("\n"))
|
410 |
+
|
411 |
+
idx = 0
|
412 |
+
|
413 |
features = ["meeting_id", "audio_id", "text", "begin_time", "end_time", "microphone_id", "speaker_id"]
|
414 |
for archive, local_archive_path in zip(audio_archives, local_extracted_archives_paths):
|
415 |
for audio_path, audio_file in archive:
|
|
|
426 |
"path": os.path.join(local_archive_path, audio_path) if local_archive_path else audio_path,
|
427 |
"bytes": audio_file.read(),
|
428 |
},
|
429 |
+
**{feature: audio_meta[feature] for feature in features},
|
430 |
+
"whisper_transcript": whisper_transcripts[idx],
|
431 |
}
|
432 |
+
idx += 1
|