jherng commited on
Commit
f3e0cd7
·
1 Parent(s): 32ad42d

Update rsna-2023-abdominal-trauma-detection.py

Browse files
rsna-2023-abdominal-trauma-detection.py CHANGED
@@ -156,27 +156,21 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
156
  # segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
157
  # classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
158
  # classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
159
- series_meta_file = dl_manager.download_and_extract(
160
- urllib.parse.urljoin(_URL, "train_series_meta.csv")
161
- )
162
- dicom_tags_file = dl_manager.download_and_extract(
163
- urllib.parse.urljoin(_URL, "train_dicom_tags.parquet")
164
- )
165
- labels_file = (
166
- dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
167
- if self.config.name != "segmentation"
168
- else None
169
  )
170
-
171
- series_meta_df = pd.read_csv(series_meta_file)
172
  if (
173
  self.config.name == "classification-with-mask"
174
  or self.config.name == "segmentation"
175
  ):
176
- series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
 
 
177
 
178
  train_series_meta_df, test_series_meta_df = train_test_split(
179
- series_meta_df, test_size=0.1, random_state=42, shuffle=True
180
  )
181
 
182
  train_img_files = dl_manager.download(
@@ -206,7 +200,7 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
206
  ).tolist()
207
  )
208
  test_seg_files = dl_manager.download(
209
- train_series_meta_df.apply(
210
  lambda x: urllib.parse.urljoin(
211
  _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
212
  ),
@@ -215,7 +209,7 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
215
  )
216
  else:
217
  train_series_meta_df, test_series_meta_df = train_test_split(
218
- series_meta_df, test_size=0.1, random_state=42, shuffle=True
219
  )
220
 
221
  train_img_files = dl_manager.download(
@@ -239,44 +233,11 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
239
  train_seg_files = None
240
  test_seg_files = None
241
 
242
- return [
243
- datasets.SplitGenerator(
244
- name=datasets.Split.TRAIN,
245
- gen_kwargs={
246
- "series_ids": train_series_meta_df["series_id"].tolist(),
247
- "dicom_tags_file": dicom_tags_file,
248
- "series_meta_file": series_meta_file,
249
- "labels_file": labels_file,
250
- "img_files": train_img_files,
251
- "seg_files": train_seg_files,
252
- },
253
- ),
254
- datasets.SplitGenerator(
255
- name=datasets.Split.TEST,
256
- gen_kwargs={
257
- "series_ids": test_series_meta_df["series_id"].tolist(),
258
- "dicom_tags_file": dicom_tags_file,
259
- "series_meta_file": series_meta_file,
260
- "labels_file": labels_file,
261
- "img_files": test_img_files,
262
- "seg_files": test_seg_files,
263
- },
264
- ),
265
- ]
266
 
267
- def _generate_examples(
268
- self,
269
- series_ids,
270
- dicom_tags_file,
271
- series_meta_file,
272
- labels_file,
273
- img_files,
274
- seg_files,
275
- ):
276
- series_meta_df = pd.read_csv(series_meta_file)
277
- dicom_tags_df = datasets.load_dataset("parquet", data_files=dicom_tags_file)[
278
- "train"
279
- ].to_pandas()[
280
  [
281
  "SeriesInstanceUID",
282
  "PixelRepresentation",
@@ -284,6 +245,17 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
284
  "BitsStored",
285
  ]
286
  ]
 
 
 
 
 
 
 
 
 
 
 
287
  dicom_tags_df["SeriesID"] = dicom_tags_df["SeriesInstanceUID"].apply(
288
  lambda x: int(x.split(".")[-1])
289
  )
@@ -297,19 +269,52 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
297
  "BitsStored": "bits_stored",
298
  }
299
  )
300
- series_meta_df = pd.merge(
301
- left=series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
 
 
 
 
 
 
302
  )
303
- labels_df = (
304
  pd.read_csv(labels_file) if self.config.name != "segmentation" else None
305
  )
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  if self.config.name == "segmentation":
308
  for key, (series_id, img_path, seg_path) in enumerate(
309
  zip(series_ids, img_files, seg_files)
310
  ):
311
  series_meta = (
312
- series_meta_df.loc[series_meta_df["series_id"] == series_id]
 
 
313
  .iloc[0]
314
  .to_dict()
315
  )
@@ -332,13 +337,15 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
332
  zip(series_ids, img_files, seg_files)
333
  ):
334
  series_meta = (
335
- series_meta_df.loc[series_meta_df["series_id"] == series_id]
 
 
336
  .iloc[0]
337
  .to_dict()
338
  )
339
  patient_id = series_meta["patient_id"]
340
  label_data = (
341
- labels_df.loc[labels_df["patient_id"] == patient_id]
342
  .iloc[0]
343
  .to_dict()
344
  )
@@ -390,13 +397,15 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
390
  else:
391
  for key, (series_id, img_path) in enumerate(zip(series_ids, img_files)):
392
  series_meta = (
393
- series_meta_df.loc[series_meta_df["series_id"] == series_id]
 
 
394
  .iloc[0]
395
  .to_dict()
396
  )
397
  patient_id = series_meta["patient_id"]
398
  label_data = (
399
- labels_df.loc[labels_df["patient_id"] == patient_id]
400
  .iloc[0]
401
  .to_dict()
402
  )
 
156
  # segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
157
  # classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
158
  # classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
159
+ self.series_meta_df = pd.read_csv(
160
+ dl_manager.download_and_extract(
161
+ urllib.parse.urljoin(_URL, "train_series_meta.csv")
162
+ )
 
 
 
 
 
 
163
  )
 
 
164
  if (
165
  self.config.name == "classification-with-mask"
166
  or self.config.name == "segmentation"
167
  ):
168
+ self.series_meta_df = self.series_meta_df.loc[
169
+ self.series_meta_df["has_segmentation"] == 1
170
+ ]
171
 
172
  train_series_meta_df, test_series_meta_df = train_test_split(
173
+ self.series_meta_df, test_size=0.1, random_state=42, shuffle=True
174
  )
175
 
176
  train_img_files = dl_manager.download(
 
200
  ).tolist()
201
  )
202
  test_seg_files = dl_manager.download(
203
+ test_series_meta_df.apply(
204
  lambda x: urllib.parse.urljoin(
205
  _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
206
  ),
 
209
  )
210
  else:
211
  train_series_meta_df, test_series_meta_df = train_test_split(
212
+ self.series_meta_df, test_size=0.1, random_state=42, shuffle=True
213
  )
214
 
215
  train_img_files = dl_manager.download(
 
233
  train_seg_files = None
234
  test_seg_files = None
235
 
236
+ dicom_tags_file = dl_manager.download_and_extract(
237
+ urllib.parse.urljoin(_URL, "train_dicom_tags.parquet")
238
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
+ dicom_tags_df = pd.read_parquet(dicom_tags_file)[
 
 
 
 
 
 
 
 
 
 
 
 
241
  [
242
  "SeriesInstanceUID",
243
  "PixelRepresentation",
 
245
  "BitsStored",
246
  ]
247
  ]
248
+
249
+ # dicom_tags_df = datasets.load_dataset("parquet", data_files=dicom_tags_file)[
250
+ # "train"
251
+ # ].to_pandas()[
252
+ # [
253
+ # "SeriesInstanceUID",
254
+ # "PixelRepresentation",
255
+ # "BitsAllocated",
256
+ # "BitsStored",
257
+ # ]
258
+ # ]
259
  dicom_tags_df["SeriesID"] = dicom_tags_df["SeriesInstanceUID"].apply(
260
  lambda x: int(x.split(".")[-1])
261
  )
 
269
  "BitsStored": "bits_stored",
270
  }
271
  )
272
+ self.series_meta_df = pd.merge(
273
+ left=self.series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
274
+ )
275
+
276
+ labels_file = (
277
+ dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
278
+ if self.config.name != "segmentation"
279
+ else None
280
  )
281
+ self.labels_df = (
282
  pd.read_csv(labels_file) if self.config.name != "segmentation" else None
283
  )
284
 
285
+ return [
286
+ datasets.SplitGenerator(
287
+ name=datasets.Split.TRAIN,
288
+ gen_kwargs={
289
+ "series_ids": train_series_meta_df["series_id"].tolist(),
290
+ "img_files": train_img_files,
291
+ "seg_files": train_seg_files,
292
+ },
293
+ ),
294
+ datasets.SplitGenerator(
295
+ name=datasets.Split.TEST,
296
+ gen_kwargs={
297
+ "series_ids": test_series_meta_df["series_id"].tolist(),
298
+ "img_files": test_img_files,
299
+ "seg_files": test_seg_files,
300
+ },
301
+ ),
302
+ ]
303
+
304
+ def _generate_examples(
305
+ self,
306
+ series_ids,
307
+ img_files,
308
+ seg_files,
309
+ ):
310
  if self.config.name == "segmentation":
311
  for key, (series_id, img_path, seg_path) in enumerate(
312
  zip(series_ids, img_files, seg_files)
313
  ):
314
  series_meta = (
315
+ self.series_meta_df.loc[
316
+ self.series_meta_df["series_id"] == series_id
317
+ ]
318
  .iloc[0]
319
  .to_dict()
320
  )
 
337
  zip(series_ids, img_files, seg_files)
338
  ):
339
  series_meta = (
340
+ self.series_meta_df.loc[
341
+ self.series_meta_df["series_id"] == series_id
342
+ ]
343
  .iloc[0]
344
  .to_dict()
345
  )
346
  patient_id = series_meta["patient_id"]
347
  label_data = (
348
+ self.labels_df.loc[self.labels_df["patient_id"] == patient_id]
349
  .iloc[0]
350
  .to_dict()
351
  )
 
397
  else:
398
  for key, (series_id, img_path) in enumerate(zip(series_ids, img_files)):
399
  series_meta = (
400
+ self.series_meta_df.loc[
401
+ self.series_meta_df["series_id"] == series_id
402
+ ]
403
  .iloc[0]
404
  .to_dict()
405
  )
406
  patient_id = series_meta["patient_id"]
407
  label_data = (
408
+ self.labels_df.loc[self.labels_df["patient_id"] == patient_id]
409
  .iloc[0]
410
  .to_dict()
411
  )