diarray commited on
Commit
b7efb7d
1 Parent(s): 521a965

Git Add -> Python scripts used to create this new version of the Dataset

Browse files
scripts/clean_tsv.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script to remove some of the most common issues in the .tsv transcription file"""
2
+
3
+ import csv
4
+ import re
5
+ import os
6
+ import sys
7
+ import glob
8
+
9
+ def clean_tsv(input_file : str, output_file: str, revision_file: str) -> None:
10
+ """Generic function
11
+
12
+ Args:
13
+ input_file (str): The path to the tsv file to clean
14
+ output_file (str): The path to the file to save the cleaned rows in. (Typically the same file)
15
+ revision_file (str): The path to the file to save still inconsistent rows in
16
+
17
+ Returns:
18
+ _type_: _description_
19
+ """
20
+ cleaned_rows = []
21
+ revision_rows = []
22
+
23
+ # Helper function to clean the line
24
+ def clean_line(line):
25
+ # Remove unwanted characters
26
+ line = re.sub(r'[<>"]', '', line)
27
+
28
+ # Replace consecutive tabs with a single tab
29
+ line = re.sub(r'\t+', '\t', line)
30
+
31
+ return line
32
+
33
+ # Open the input file and process each row
34
+ with open(input_file, 'r', encoding='utf-8') as infile:
35
+ reader = csv.reader(infile, delimiter='\t')
36
+
37
+ for row in reader:
38
+ # Clean the row
39
+ row = [clean_line(item) for item in row]
40
+
41
+ # If the row has exactly 4 items, ensure the first two are numbers
42
+ if len(row) == 4:
43
+ try:
44
+ # Ensure the first two items are numbers
45
+ start_time = int(row[0])
46
+ end_time = int(row[1])
47
+
48
+ # Append the cleaned row
49
+ cleaned_rows.append(row)
50
+ except ValueError:
51
+ # If timestamps are not valid integers, move to revision
52
+ revision_rows.append(row)
53
+ print("One problematic row has been added to revision")
54
+ # If the row has more than 4 items and no consecutive \t, check for \t swapped with space
55
+ elif len(row) > 4:
56
+ row_str = "\t".join(row)
57
+ if ',' in row_str:
58
+ row_str = re.sub(r',\t', ', ', row_str)
59
+ # After fixing spaces, split again and check the length
60
+ row_fixed = row_str.split('\t')
61
+ if len(row_fixed) == 4:
62
+ cleaned_rows.append(row_fixed)
63
+ else:
64
+ revision_rows.append(row_fixed)
65
+ print("One problematic row has been added to revision")
66
+ else:
67
+ # For rows with incorrect number of elements, move to revision
68
+ revision_rows.append(row)
69
+ print("One problematic row has been added to revision")
70
+
71
+ # Write the cleaned rows to the output file
72
+ with open(output_file, 'w', encoding='utf-8', newline='') as outfile:
73
+ writer = csv.writer(outfile, delimiter='\t')
74
+ writer.writerows(cleaned_rows)
75
+ print(f"**** New cleaned tsv file saved at {output_file} ****")
76
+
77
+ if revision_rows:
78
+ # Write the revision rows to the revision file if there are rows to review
79
+ with open(revision_file, 'w', encoding='utf-8', newline='') as revfile:
80
+ writer = csv.writer(revfile, delimiter='\t')
81
+ writer.writerows(revision_rows)
82
+ print(f"**** New revision file saved at {revision_file} ****")
83
+
84
+ if __name__ == "__main__":
85
+ transcription_dir = sys.argv[1]
86
+
87
+ # Ensure revision directory exist
88
+ rev_dir = f'{transcription_dir}/revisions'
89
+ os.makedirs(rev_dir, exist_ok=True)
90
+
91
+ # get the paths to all the tsv files
92
+ tsv_files = glob.glob(transcription_dir + "/*.tsv")
93
+
94
+ for tsv_file in tsv_files:
95
+ in_file = tsv_file
96
+ out_file = in_file
97
+ rev_file = rev_dir + "/" + tsv_file.split("/")[-1][:-4] + "-rev.tsv"
98
+ clean_tsv(input_file=in_file, output_file=out_file, revision_file=rev_file)
scripts/create_data_manifest.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script to create Nemo compatible data manifests for jeli-asr"""
2
+
3
+ ## Imports
4
+ import glob
5
+ import os
6
+ import csv
7
+ import random
8
+ import json
9
+ import shutil
10
+ import sys
11
+ from pydub import AudioSegment
12
+
13
+ # Key callable to sort wav files paths
14
+ def key_sort_paths(path: str) -> int:
15
+ """Serve as key function to sort the wav files paths
16
+
17
+ Args:
18
+ path (str): An individual path
19
+
20
+ Returns:
21
+ int: The number of the split (between 1 and 6)
22
+ """
23
+ return int(path[-5])
24
+
25
+ # Function to read and combine the audios
26
+ def read_audios(glob_paths: list[str]) -> AudioSegment:
27
+ """Read the six 10 mns audio as AudioSegments and returns the combined 1 hr audio
28
+
29
+ Args:
30
+ glob_paths (list[str]): list of the paths of the 6 .wav files
31
+
32
+ Returns:
33
+ AudioSegment: The combined audio
34
+ """
35
+ audios = []
36
+ for wav_file in sorted(glob_paths, key=key_sort_paths):
37
+ audios.append(AudioSegment.from_file(file=wav_file, format="wav"))
38
+ final_audio = sum(audios[1:], start=audios[0])
39
+ return final_audio
40
+
41
+ # A function that reads and return the utterances from .tsv files
42
+ def read_tsv(tsv_file_path: str) -> list[list[int | str]]:
43
+ """Read a .tsv file and return the utterances in it
44
+
45
+ Args:
46
+ tsv_file_path (str): The path to the tsv file
47
+
48
+ Returns:
49
+ list[list[int | str]]: The returned utterances with the timestamps coverted to int
50
+ """
51
+ with open(tsv_file_path,"r", encoding='utf-8') as recording_transcript:
52
+ tsv_file_rows = csv.reader(recording_transcript, delimiter="\t")
53
+ utterances = [[int(start), int(end), bam, french] for start, end, bam, french in tsv_file_rows]
54
+ return utterances
55
+
56
+ # Function to subdivide the audio (transcript) into multiple variable length slices
57
+ def create_var_length_samples(utterances: list[list[int | str]], min_duration: int = 1000,
58
+ max_duration: int = 120000) -> list[list[list[int | str]]]:
59
+ """Create variable length combination of utterances to make samples which duration vary between 1s and 2mns
60
+
61
+ Args:
62
+ utterances (list[list[int | str]]): The read tsv file containing the transcriptions of the audio
63
+ min_duration (int, optional): min duration of a sample in milliseconds. Defaults to 1000.
64
+ max_duration (int, optional): max duration of a sample in milliseconds. Defaults to 120000.
65
+
66
+ Returns:
67
+ list[list[list[int | str]]]: The list of created samples
68
+ """
69
+ samples = []
70
+ current_slice = []
71
+ current_duration = 0
72
+
73
+ i = 0
74
+ while i < len(utterances):
75
+ utterance_start, utterance_end = utterances[i][:2]
76
+ utterance_duration = utterance_end - utterance_start
77
+
78
+ # If current slice duration is less than max duration, add the utterance to this sample
79
+ if current_duration + utterance_duration <= max_duration:
80
+ current_slice.append(utterances[i])
81
+ current_duration += utterance_duration
82
+ i += 1
83
+ else:
84
+ # Save the current sample and reset for a new one
85
+ samples.append(current_slice)
86
+ current_slice = []
87
+ current_duration = 0
88
+
89
+ # Randomly decide whether to end the current sample based on time or number of utterances
90
+ if current_duration >= min_duration:
91
+ if random.choice([True, False, False]) or len(current_slice) >= random.randint(1, 20):
92
+ samples.append(current_slice)
93
+ current_slice = []
94
+ current_duration = 0
95
+
96
+ # Add the final slice if it exists
97
+ if current_slice: # equivalent to if current_slice is empty
98
+ samples.append(current_slice)
99
+
100
+ return samples
101
+
102
+ # Function to create and save the audio samples for a specific list of samples
103
+ def slice_and_save_audios(samples: list[list[list[int | str]]], griot_id: str,
104
+ data_dir: str, audio_dir_path: str) -> list[list[float | str]]:
105
+ """Slice and save the audio samples created for a specific 1hr recording
106
+
107
+ Args:
108
+ samples (list[list[list[int | str]]]): The samples created with function "create_var_length_samples"
109
+ griot_id (str): The ID of the griot in the recording (eg: griots_r17)
110
+ data_dir (str): The directory containing all the data.
111
+ audio_dir_path (str): The diretory the save the sliced audios in.
112
+
113
+ Returns:
114
+ list[list[int | str]]: A list version of manifests (eg: [[audiofile_path, duration, bambara, translation], ...])
115
+ """
116
+ wav_files_paths = glob.glob(f'{data_dir}/{griot_id}/*.wav')
117
+ griot_recording = read_audios(glob_paths=wav_files_paths)
118
+ # A list to store only the data needed to create
119
+ list_manifests = []
120
+
121
+ for sample in samples:
122
+ start = sample[0][0]
123
+ end = sample[-1][1]
124
+ duration = (end - start) / 1000 # in seconds
125
+ # Flag audios with more than 100 seconds
126
+ more_than_100s = " ###" if duration >= 100 else ""
127
+
128
+ # get trancriptions and translations of utterances composing the samples
129
+ transcriptions, translations = [utt[2] for utt in sample], [utt[3] for utt in sample]
130
+ transcription = " ".join(transcriptions)
131
+ translation = " ".join(translations)
132
+
133
+ # create the sample wav file and save it
134
+ audio_file_path = f"{audio_dir_path}/{griot_id}-{start}-{end}.wav"
135
+ griot_recording[start:end].export(out_f=audio_file_path, format="wav")
136
+ print(f"Sample {griot_id}-{start}-{end} saved in {audio_file_path}{more_than_100s}")
137
+
138
+ # Create the manifest list and save it
139
+ list_manifests.append([audio_file_path, duration, transcription, translation])
140
+ return list_manifests
141
+
142
+ # A function to shuffle and split samples
143
+ def shuffle_and_split(dataset: list[list[float | str]],
144
+ test: int | float = 0.15) -> tuple[list[list[float | str]]]:
145
+ """Shuffle and split the whole dataset
146
+
147
+ Args:
148
+ dataset (list[list[int | str]]): The combined list of all list manifest returned by "slice_and_save_audios"
149
+ test (int | float, optional): The number of sample to include that make the test set or and percentage of the whole dataset to use as the test set. Defaults to 0.15.
150
+
151
+ Returns:
152
+ tuple[list[list[list[int | str]]]]: The train and test sets samples returned separately
153
+ """
154
+ random.shuffle(dataset)
155
+ if isinstance(test, float):
156
+ test = int(test * len(dataset))
157
+ test_set_samples = dataset[0:test]
158
+ train_set_samples = dataset[test:]
159
+ return train_set_samples, test_set_samples
160
+
161
+ # A function to create audio sample files and manifests
162
+ def create_manifest(dataset_split: list[list[float | str]], split_name: str,
163
+ dir_path: str) -> None:
164
+ """Create manifest files
165
+
166
+ Args:
167
+ dataset_split (list[list[float | str]]): Split of the dataset to create manifest for
168
+ split_name (str): Name of the split
169
+ dir_path (str): The directory to save the new data manifest in
170
+ """
171
+ # Ensure directories for manifests and audios
172
+ os.makedirs(f'{dir_path}/manifests', exist_ok=True)
173
+ os.makedirs(f'{dir_path}/french-manifests', exist_ok=True)
174
+ os.makedirs(f'{dir_path}/audios/{split_name}', exist_ok=True)
175
+
176
+ # Define manifest file paths
177
+ manifest_path = f'{dir_path}/manifests/{split_name}_manifest.json'
178
+ french_manifest_path = f'{dir_path}/french-manifests/{split_name}_french_manifest.json'
179
+ audio_dir_path = f'{dir_path}/audios/{split_name}'
180
+
181
+ with open(manifest_path, 'w', encoding="utf-8") as manifest_file, open(french_manifest_path, 'w', encoding="utf-8") as french_file:
182
+ for sample in dataset_split:
183
+ # move the audio sample file in the corresponding split directory
184
+ new_audio_path = f'{audio_dir_path}/{sample[0].split("/")[-1]}'
185
+ shutil.move(src=sample[0], dst=new_audio_path)
186
+
187
+ # Prepare the manifest line
188
+ manifest_line = {
189
+ "audio_filepath": os.path.relpath(new_audio_path),
190
+ "duration": sample[1],
191
+ "text": sample[2] # Bambara transcription goes to the text field
192
+ }
193
+
194
+ french_manifest_line = {
195
+ "audio_filepath": os.path.relpath(new_audio_path),
196
+ "duration": sample[1],
197
+ "text": sample[3]
198
+ }
199
+
200
+ # Write manifest files
201
+ manifest_file.write(json.dumps(manifest_line) + '\n')
202
+ french_file.write(json.dumps(french_manifest_line) + '\n')
203
+ print(f"{split_name} manifests files have been created successfully!\nCorresponding audios files have been moved to {audio_dir_path}")
204
+
205
+ if __name__ == "__main__":
206
+ data_path = sys.argv[1]
207
+ manifest_dir = sys.argv[2]
208
+ tsv_dir = f'{data_path}/aligned-transcriptions'
209
+
210
+ # Get all the revised transcription files in .tsv format
211
+ tsv_paths = glob.glob(f'{tsv_dir}/*.tsv')
212
+ # list to store the list manifests per griots
213
+ final_list_manifest = []
214
+ for tsv_file in tsv_paths:
215
+ id_griot = tsv_file.split("/")[-1][:-4]
216
+ griot_utterances = read_tsv(tsv_file_path=tsv_file)
217
+ # Get samples (can be made of one or more utterances)
218
+ griot_samples = create_var_length_samples(utterances=griot_utterances)
219
+ list_manifest = slice_and_save_audios(samples=griot_samples, griot_id=id_griot,
220
+ data_dir=data_path, audio_dir_path=f'{manifest_dir}/audios')
221
+ final_list_manifest.append(list_manifest)
222
+ # Get a single list manifest for all the samples
223
+ final_list_manifest = sum(final_list_manifest, start=[])
224
+ # Shuffle and split the final list of all sample,manifests
225
+ train_set, test_set = shuffle_and_split(dataset=final_list_manifest, test=0.15) # Use 15% of the dataset for test
226
+ print(f'len(train_set) == {len(train_set)} and len(test_set) == {len(test_set)}')
227
+
228
+ create_manifest(dataset_split=train_set, split_name="train", dir_path=manifest_dir)
229
+ create_manifest(dataset_split=test_set, split_name="test", dir_path=manifest_dir)