|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import Dict, List, Mapping, Optional, Set, Sequence, Tuple, Union |
|
|
|
import numpy as np |
|
import pandas as pd |
|
|
|
import datasets |
|
import skimage |
|
import SimpleITK as sitk |
|
|
|
|
|
def import_csv_data(filepath: str) -> List[Dict[str, str]]: |
|
"""Import all rows of CSV file.""" |
|
results = [] |
|
with open(filepath, encoding='utf-8') as f: |
|
reader = csv.DictReader(f) |
|
for line in reader: |
|
results.append(line) |
|
return results |
|
|
|
def standardize_3D_image( |
|
image: np.ndarray, |
|
resize_shape: Tuple[int, int, int] |
|
) -> np.ndarray: |
|
"""Aligns dimensions of image to be (height, width, channels) and resizes |
|
images to values specified in resize_shape.""" |
|
|
|
if image.shape[0] < image.shape[2]: |
|
image = np.transpose(image, axes=[1, 2, 0]) |
|
|
|
image = skimage.transform.resize(image, resize_shape) |
|
return image |
|
|
|
|
|
|
|
N_PATIENTS = 218 |
|
MIN_IVD = 0 |
|
MAX_IVD = 9 |
|
DEFAULT_SCAN_TYPES = ['t1', 't2', 't2_SPACE'] |
|
DEFAULT_RESIZE = (512, 512, 30) |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
_HOMEPAGE = "https://zenodo.org/records/10159290" |
|
|
|
_LICENSE = """Creative Commons Attribution 4.0 International License \ |
|
(https://creativecommons.org/licenses/by/4.0/legalcode)""" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"images":"https://zenodo.org/records/10159290/files/images.zip", |
|
"masks":"https://zenodo.org/records/10159290/files/masks.zip", |
|
"overview":"https://zenodo.org/records/10159290/files/overview.csv", |
|
"gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv", |
|
} |
|
|
|
class CustomBuilderConfig(datasets.BuilderConfig): |
|
|
|
def __init__( |
|
self, |
|
name: str = 'default', |
|
version: str = '0.0.0', |
|
data_dir: Optional[str] = None, |
|
data_files: Optional[Union[str, Sequence, Mapping]] = None, |
|
description: Optional[str] = None, |
|
scan_types: List[str] = DEFAULT_SCAN_TYPES, |
|
resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE, |
|
): |
|
super().__init__(name, version, data_dir, data_files, description) |
|
self.scan_types = scan_types |
|
self.resize_shape = resize_shape |
|
|
|
|
|
class SPIDER(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIG_CLASS = CustomBuilderConfig |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __init__( |
|
self, |
|
*args, |
|
scan_types: List[str] = DEFAULT_SCAN_TYPES, |
|
resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE, |
|
**kwargs, |
|
): |
|
super().__init__(*args, **kwargs) |
|
self.scan_types = scan_types |
|
self.resize_shape = resize_shape |
|
|
|
def _info(self): |
|
""" |
|
This method specifies the datasets.DatasetInfo object which contains |
|
informations and typings for the dataset. |
|
""" |
|
image_size = self.config.resize_shape |
|
features = datasets.Features({ |
|
"patient_id": datasets.Value("string"), |
|
"scan_type": datasets.Value("string"), |
|
|
|
"image_array": datasets.Array3D(shape=image_size, dtype='float64'), |
|
|
|
"mask_array": datasets.Array3D(shape=image_size, dtype='float64'), |
|
"metadata": { |
|
"num_vertebrae": datasets.Value(dtype="string"), |
|
"num_discs": datasets.Value(dtype="string"), |
|
"sex": datasets.Value(dtype="string"), |
|
"birth_date": datasets.Value(dtype="string"), |
|
"AngioFlag": datasets.Value(dtype="string"), |
|
"BodyPartExamined": datasets.Value(dtype="string"), |
|
"DeviceSerialNumber": datasets.Value(dtype="string"), |
|
"EchoNumbers": datasets.Value(dtype="string"), |
|
"EchoTime": datasets.Value(dtype="string"), |
|
"EchoTrainLength": datasets.Value(dtype="string"), |
|
"FlipAngle": datasets.Value(dtype="string"), |
|
"ImagedNucleus": datasets.Value(dtype="string"), |
|
"ImagingFrequency": datasets.Value(dtype="string"), |
|
"InPlanePhaseEncodingDirection": datasets.Value(dtype="string"), |
|
"MRAcquisitionType": datasets.Value(dtype="string"), |
|
"MagneticFieldStrength": datasets.Value(dtype="string"), |
|
"Manufacturer": datasets.Value(dtype="string"), |
|
"ManufacturerModelName": datasets.Value(dtype="string"), |
|
"NumberOfPhaseEncodingSteps": datasets.Value(dtype="string"), |
|
"PercentPhaseFieldOfView": datasets.Value(dtype="string"), |
|
"PercentSampling": datasets.Value(dtype="string"), |
|
"PhotometricInterpretation": datasets.Value(dtype="string"), |
|
"PixelBandwidth": datasets.Value(dtype="string"), |
|
"PixelSpacing": datasets.Value(dtype="string"), |
|
"RepetitionTime": datasets.Value(dtype="string"), |
|
"SAR": datasets.Value(dtype="string"), |
|
"SamplesPerPixel": datasets.Value(dtype="string"), |
|
"ScanningSequence": datasets.Value(dtype="string"), |
|
"SequenceName": datasets.Value(dtype="string"), |
|
"SeriesDescription": datasets.Value(dtype="string"), |
|
"SliceThickness": datasets.Value(dtype="string"), |
|
"SoftwareVersions": datasets.Value(dtype="string"), |
|
"SpacingBetweenSlices": datasets.Value(dtype="string"), |
|
"SpecificCharacterSet": datasets.Value(dtype="string"), |
|
"TransmitCoilName": datasets.Value(dtype="string"), |
|
"WindowCenter": datasets.Value(dtype="string"), |
|
"WindowWidth": datasets.Value(dtype="string"), |
|
}, |
|
"rad_gradings": { |
|
"IVD label": datasets.Sequence(datasets.Value("string")), |
|
"Modic": datasets.Sequence(datasets.Value("string")), |
|
"UP endplate": datasets.Sequence(datasets.Value("string")), |
|
"LOW endplate": datasets.Sequence(datasets.Value("string")), |
|
"Spondylolisthesis": datasets.Sequence(datasets.Value("string")), |
|
"Disc herniation": datasets.Sequence(datasets.Value("string")), |
|
"Disc narrowing": datasets.Sequence(datasets.Value("string")), |
|
"Disc bulging": datasets.Sequence(datasets.Value("string")), |
|
"Pfirrman grade": datasets.Sequence(datasets.Value("string")), |
|
} |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
This method is tasked with downloading/extracting the data |
|
and defining the splits depending on the configuration |
|
If several configurations are possible (listed in BUILDER_CONFIGS), |
|
the configuration selected by the user is in self.config.name |
|
""" |
|
|
|
|
|
|
|
|
|
paths_dict = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "train", |
|
"scan_types": self.scan_types, |
|
"resize_shape": self.resize_shape, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "validate", |
|
"scan_types": self.scan_types, |
|
"resize_shape": self.resize_shape, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"paths_dict": paths_dict, |
|
"split": "test", |
|
"scan_types": self.scan_types, |
|
"resize_shape": self.resize_shape, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
paths_dict: Dict[str, str], |
|
split: str, |
|
scan_types: List[str], |
|
resize_shape: Tuple[int, int, int], |
|
validate_share: float = 0.3, |
|
test_share: float = 0.2, |
|
random_seed: int = 9999, |
|
) -> Tuple[str, Dict]: |
|
""" |
|
This method handles input defined in _split_generators to yield |
|
(key, example) tuples from the dataset. The `key` is for legacy reasons |
|
(tfds) and is not important in itself, but must be unique for each example. |
|
|
|
Args |
|
paths_dict: mapping of data element name to temporary file location |
|
split: specify training, validation, or testing set; |
|
options = 'train', 'validate', OR 'test' |
|
scan_types: list of sagittal scan types to use in examples; |
|
options = ['t1', 't2', 't2_SPACE'] |
|
validate_share: float indicating share of data to use for validation; |
|
must be in range (0.0, 1.0); note that training share is |
|
calculated as (1 - validate_share - test_share) |
|
test_share: float indicating share of data to use for testing; |
|
must be in range (0.0, 1.0); note that training share is |
|
calculated as (1 - validate_share - test_share) |
|
|
|
Yields |
|
Tuple (unique patient-scan ID, dict of |
|
""" |
|
|
|
train_share = (1.0 - validate_share - test_share) |
|
np.random.seed(int(random_seed)) |
|
|
|
|
|
for item in scan_types: |
|
if item not in ['t1', 't2', 't2_SPACE']: |
|
raise ValueError( |
|
'Scan type "{item}" not recognized as valid scan type.\ |
|
Verify scan type argument.' |
|
) |
|
if split not in ['train', 'validate', 'test']: |
|
raise ValueError( |
|
f'Split argument "{split}" is not recognized. \ |
|
Please enter one of ["train", "validate", "test"]' |
|
) |
|
if train_share <= 0.0: |
|
raise ValueError( |
|
f'Training share is calculated as (1 - validate_share - test_share) \ |
|
and must be greater than 0. Current calculated value is \ |
|
{round(train_share, 3)}. Adjust validate_share and/or \ |
|
test_share parameters.' |
|
) |
|
if validate_share > 1.0 or validate_share < 0.0: |
|
raise ValueError( |
|
f'Validation share must be between (0, 1). Current value is \ |
|
{validate_share}.' |
|
) |
|
if test_share > 1.0 or test_share < 0.0: |
|
raise ValueError( |
|
f'Testing share must be between (0, 1). Current value is \ |
|
{test_share}.' |
|
) |
|
|
|
|
|
partition = np.random.choice( |
|
['train', 'dev', 'test'], |
|
p=[train_share, validate_share, test_share], |
|
size=N_PATIENTS, |
|
) |
|
patient_ids = (np.arange(N_PATIENTS) + 1) |
|
train_ids = set(patient_ids[partition == 'train']) |
|
validate_ids = set(patient_ids[partition == 'dev']) |
|
test_ids = set(patient_ids[partition == 'test']) |
|
assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS |
|
|
|
|
|
overview_data = import_csv_data(paths_dict['overview']) |
|
grades_data = import_csv_data(paths_dict['gradings']) |
|
|
|
|
|
exclude_vars = ['new_file_name', 'subset'] |
|
overview_dict = {} |
|
for item in overview_data: |
|
key = item['new_file_name'] |
|
overview_dict[key] = { |
|
k:v for k,v in item.items() if k not in exclude_vars |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grades_dict = {} |
|
for patient_id in patient_ids: |
|
patient_grades = [ |
|
x for x in grades_data if x['Patient'] == str(patient_id) |
|
] |
|
|
|
IVD_values = [x['IVD label'] for x in patient_grades] |
|
for i in range(MIN_IVD, MAX_IVD + 1): |
|
if str(i) not in IVD_values: |
|
patient_grades.append({ |
|
"Patient": f"{patient_id}", |
|
"IVD label": f"{i}", |
|
"Modic": "", |
|
"UP endplate": "", |
|
"LOW endplate": "", |
|
"Spondylolisthesis": "", |
|
"Disc herniation": "", |
|
"Disc narrowing": "", |
|
"Disc bulging": "", |
|
"Pfirrman grade": "", |
|
}) |
|
assert len(patient_grades) == (MAX_IVD - MIN_IVD + 1), "Radiological\ |
|
gradings not padded correctly" |
|
|
|
|
|
df = ( |
|
pd.DataFrame(patient_grades) |
|
.sort_values("IVD label") |
|
.reset_index(drop=True) |
|
) |
|
grades_dict[str(patient_id)] = { |
|
col:df[col].tolist() for col in df.columns |
|
if col not in ['Patient'] |
|
} |
|
|
|
|
|
image_files = [ |
|
file for file in os.listdir(os.path.join(paths_dict['images'], 'images')) |
|
if file.endswith('.mha') |
|
] |
|
assert len(image_files) > 0, "No image files found--check directory path." |
|
|
|
mask_files = [ |
|
file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks')) |
|
if file.endswith('.mha') |
|
] |
|
assert len(mask_files) > 0, "No mask files found--check directory path." |
|
|
|
|
|
image_files = [ |
|
file for file in image_files |
|
if any(scan_type in file for scan_type in scan_types) |
|
] |
|
|
|
mask_files = [ |
|
file for file in mask_files |
|
if any(scan_type in file for scan_type in scan_types) |
|
] |
|
|
|
|
|
if split == 'train': |
|
subset_ids = train_ids |
|
elif split == 'validate': |
|
subset_ids = validate_ids |
|
elif split == 'test': |
|
subset_ids = test_ids |
|
|
|
image_files = [ |
|
file for file in image_files |
|
if any(str(patient_id) == file.split('_')[0] for patient_id in subset_ids) |
|
] |
|
|
|
mask_files = [ |
|
file for file in mask_files |
|
if any(str(patient_id) == file.split('_')[0] for patient_id in subset_ids) |
|
] |
|
assert len(image_files) == len(mask_files), "The number of image files\ |
|
does not match the number of mask files--verify subsetting operation." |
|
|
|
|
|
|
|
|
|
np.random.shuffle(image_files) |
|
|
|
|
|
|
|
for idx, example in enumerate(image_files): |
|
|
|
|
|
scan_id = example.replace('.mha', '') |
|
patient_id = scan_id.split('_')[0] |
|
scan_type = '_'.join(scan_id.split('_')[1:]) |
|
|
|
|
|
image_path = os.path.join(paths_dict['images'], 'images', example) |
|
image = sitk.ReadImage(image_path) |
|
|
|
|
|
image_array = standardize_3D_image( |
|
sitk.GetArrayFromImage(image), resize_shape |
|
) |
|
|
|
|
|
mask_path = os.path.join(paths_dict['masks'], 'masks', example) |
|
mask = sitk.ReadImage(mask_path) |
|
|
|
|
|
mask_array = standardize_3D_image( |
|
sitk.GetArrayFromImage(mask), resize_shape |
|
) |
|
|
|
|
|
image_overview = overview_dict[scan_id] |
|
|
|
|
|
patient_grades_dict = grades_dict[patient_id] |
|
|
|
|
|
return_dict = { |
|
'patient_id':patient_id, |
|
'scan_type':scan_type, |
|
'raw_image':None, |
|
'raw_mask':None, |
|
'image_array':image_array, |
|
'mask_array':mask_array, |
|
'metadata':image_overview, |
|
'rad_gradings':patient_grades_dict, |
|
} |
|
|
|
|
|
yield scan_id, return_dict |
|
|