PerSets commited on
Commit
5dc4acb
·
1 Parent(s): 9f0227f

fix: new approach

Browse files
__pycache__/asr_dataset.cpython-312.pyc ADDED
Binary file (4.81 kB). View file
 
asr_dataset.py CHANGED
@@ -22,6 +22,35 @@ _CITATION = """
22
  Use this repo info/link for citation.
23
  """
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  class ASRDataset(datasets.GeneratorBasedBuilder):
27
  """ASR dataset with audio files stored in tar archives."""
@@ -35,7 +64,7 @@ class ASRDataset(datasets.GeneratorBasedBuilder):
35
  "file_name": datasets.Value("string"),
36
  "audio": datasets.Audio(sampling_rate=16000),
37
  "sentence": datasets.Value("string"),
38
- "tar_file": datasets.Value("string"),
39
  }),
40
  supervised_keys=None,
41
  citation=_CITATION,
@@ -43,13 +72,21 @@ class ASRDataset(datasets.GeneratorBasedBuilder):
43
 
44
  def _split_generators(self, dl_manager):
45
  """Returns SplitGenerators with added error handling."""
 
 
 
 
 
46
  try:
47
  return [
48
  datasets.SplitGenerator(
49
  name=datasets.Split.TRAIN,
50
  gen_kwargs={
51
- "split": "train",
52
- "data_dir": self.config.data_dir,
 
 
 
53
  },
54
  ),
55
  ]
@@ -58,102 +95,27 @@ class ASRDataset(datasets.GeneratorBasedBuilder):
58
  logger.error(traceback.format_exc())
59
  raise
60
 
61
- def _prepare_metadata(self, data_dir: str) -> pd.DataFrame:
62
- """Prepare metadata with comprehensive error handling."""
63
- try:
64
- logger.info("Preparing metadata with tar file information...")
65
-
66
- # Read metadata
67
- metadata_path = os.path.join(data_dir, "metadata.csv")
68
-
69
- # Add error handling for metadata reading
70
- try:
71
- df = pd.read_csv(metadata_path, sep='\t', names=['file_name', 'sentence'], encoding="utf-8")
72
- except Exception as read_error:
73
- logger.error(f"Failed to read metadata file: {read_error}")
74
- raise
75
-
76
- # Validate dataframe
77
- if df.empty:
78
- raise ValueError("Metadata file is empty")
79
-
80
- # Add tar_file column
81
- clips_dir = os.path.join(data_dir, "clips")
82
- tar_files = [f for f in os.listdir(clips_dir) if f.endswith('.tar')]
83
-
84
- # Initialize tar_file column
85
- df['tar_file'] = None
86
-
87
- # Find which tar file contains each audio file
88
- for tar_file in tar_files:
89
- tar_path = os.path.join(clips_dir, tar_file)
90
- try:
91
- with tarfile.open(tar_path, 'r') as tar:
92
- file_list = tar.getnames()
93
- mask = df['file_name'].isin(file_list)
94
- df.loc[mask, 'tar_file'] = tar_file
95
- except Exception as tar_error:
96
- logger.warning(f"Error processing tar file {tar_file}: {tar_error}")
97
-
98
- # Remove entries where tar_file is None (file not found in any tar)
99
- df = df.dropna(subset=['tar_file'])
100
-
101
- # Additional logging
102
- logger.info(f"Total entries after tar file mapping: {len(df)}")
103
-
104
- return df
105
-
106
- except Exception as e:
107
- logger.error(f"Unexpected error in _prepare_metadata: {e}")
108
- logger.error(traceback.format_exc())
109
- raise
110
-
111
- def _generate_examples(self, split, data_dir):
112
- """Yields examples with comprehensive error handling."""
113
- try:
114
- # Prepare metadata with tar file information
115
- df = self._prepare_metadata(data_dir)
116
- clips_dir = os.path.join(data_dir, "clips")
117
-
118
- idx = 0
119
- # Group by tar_file for efficient processing
120
- for tar_file, group in df.groupby('tar_file'):
121
- tar_path = os.path.join(clips_dir, tar_file)
122
- logger.info(f"Processing tar file: {tar_file}")
123
-
124
- try:
125
- with tarfile.open(tar_path, 'r') as tar:
126
- for _, row in group.iterrows():
127
- try:
128
- # More robust file extraction
129
- member = tar.getmember(row['file_name'])
130
- f = tar.extractfile(member)
131
- if f is None:
132
- logger.warning(f"Could not extract file: {row['file_name']}")
133
- continue
134
-
135
- audio_bytes = f.read()
136
- f.close()
137
-
138
- yield idx, {
139
- "file_name": row['file_name'],
140
- "audio": {"path": f"{tar_path}::{row['file_name']}", "bytes": audio_bytes},
141
- "sentence": row['sentence'],
142
- "tar_file": tar_file,
143
- }
144
- idx += 1
145
-
146
- except Exception as file_error:
147
- logger.warning(f"Error processing file {row['file_name']}: {file_error}")
148
- continue
149
-
150
- except Exception as tar_error:
151
- logger.error(f"Error processing tar file {tar_file}: {tar_error}")
152
- continue
153
-
154
- logger.info(f"Total examples generated: {idx}")
155
-
156
- except Exception as e:
157
- logger.error(f"Unexpected error in _generate_examples: {e}")
158
- logger.error(traceback.format_exc())
159
- raise
 
22
  Use this repo info/link for citation.
23
  """
24
 
25
+ _LICENSE = "MIT"
26
+
27
+ _DATA_URL = [
28
+ "clips/clips_001.tar",
29
+ "clips/clips_002.tar",
30
+ "clips/clips_003.tar",
31
+ "clips/clips_004.tar",
32
+ "clips/clips_005.tar",
33
+ "clips/clips_006.tar",
34
+ "clips/clips_007.tar",
35
+ "clips/clips_008.tar",
36
+ "clips/clips_009.tar",
37
+ "clips/clips_010.tar",
38
+ "clips/clips_011.tar",
39
+ "clips/clips_012.tar",
40
+ "clips/clips_013.tar",
41
+ "clips/clips_014.tar",
42
+ "clips/clips_015.tar",
43
+ "clips/clips_016.tar",
44
+ "clips/clips_017.tar",
45
+ "clips/clips_018.tar",
46
+ "clips/clips_019.tar",
47
+ "clips/clips_020.tar",
48
+ "clips/clips_021.tar",
49
+ ]
50
+
51
+ _PROMPTS_URLS = {
52
+ "train": "clips/metadata.csv"
53
+ }
54
 
55
  class ASRDataset(datasets.GeneratorBasedBuilder):
56
  """ASR dataset with audio files stored in tar archives."""
 
64
  "file_name": datasets.Value("string"),
65
  "audio": datasets.Audio(sampling_rate=16000),
66
  "sentence": datasets.Value("string"),
67
+ #"tar_file": datasets.Value("string"),
68
  }),
69
  supervised_keys=None,
70
  citation=_CITATION,
 
72
 
73
  def _split_generators(self, dl_manager):
74
  """Returns SplitGenerators with added error handling."""
75
+
76
+ prompts_paths = dl_manager.download_and_extract(_PROMPTS_URLS)
77
+ archive = dl_manager.download(_DATA_URL)
78
+ train_dir = "clips"
79
+
80
  try:
81
  return [
82
  datasets.SplitGenerator(
83
  name=datasets.Split.TRAIN,
84
  gen_kwargs={
85
+ #"split": "train",
86
+ #"data_dir": self.config.data_dir,
87
+ "prompts_path": prompts_paths["train"],
88
+ "path_to_clips": train_dir,
89
+ "audio_files": dl_manager.iter_archive(archive)
90
  },
91
  ),
92
  ]
 
95
  logger.error(traceback.format_exc())
96
  raise
97
 
98
+ def _generate_examples(self, prompts_path, path_to_clips, audio_files):
99
+ """Yields examples as (key, example) tuples."""
100
+ examples = {}
101
+ metadata
102
+ with open(prompts_path, encoding="utf-8") as f:
103
+ for row in f:
104
+ data = row.strip().split("\t", 1)
105
+ file_name = data[0].strip()
106
+ audio_path = "/".join([path_to_clips, file_name])
107
+ examples[audio_path] = {
108
+ "path": audio_path,
109
+ "sentence": data[1],
110
+ }
111
+ inside_clips_dir = False
112
+ id_ = 0
113
+ for path, f in audio_files:
114
+ if path.startswith(path_to_clips):
115
+ inside_clips_dir = True
116
+ if path in examples:
117
+ audio = {"path": path, "bytes": f.read()}
118
+ yield id_, {**examples[path], "audio": audio}
119
+ id_ += 1
120
+ elif inside_clips_dir:
121
+ break