Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-label-image-classification
Languages:
English
Size:
100B<n<1T
License:
File size: 6,527 Bytes
0233002 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import pydicom
from PIL import Image
import numpy as np
import io
import datasets
import gdown
import re
import s3fs
import random
example_manifest_url = "https://drive.google.com/uc?id=1JBkQTXeieyN9_6BGdTF_DDlFFyZrGyU6"
example_manifest_file = gdown.download(example_manifest_url, 'manifest_file.s5cmd', quiet = False)
full_manifest_url = "https://drive.google.com/uc?id=1KP6qxcQoPF4MJdEPNwW7J6BlL_sUJ17j"
full_manifest_file = gdown.download(full_manifest_url, 'full_manifest_file.s5cmd', quiet = False)
fs = s3fs.S3FileSystem(anon=True)
_DESCRIPTION = "This is the description"
_HOMEPAGE = "https://imaging.datacommons.cancer.gov/"
_LICENSE = "https://fairsharing.org/FAIRsharing.0b5a1d"
_CITATION = "National Cancer Institute Imaging Data Commons (IDC) Collections was accessed on DATE from https://registry.opendata.aws/nci-imaging-data-commons"
class ColonCancerCTDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="example", version=VERSION, description="This is a subset of the full dataset for demonstration purposes"),
datasets.BuilderConfig(name="full_data", version=VERSION, description="This is the complete dataset"),
]
DEFAULT_CONFIG_NAME = "example"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"ImageType": datasets.Sequence(datasets.Value('string')),
"StudyDate": datasets.Value('string'),
"SeriesDate": datasets.Value('string'),
"Manufacturer": datasets.Value('string'),
"StudyDescription": datasets.Value('string'),
"SeriesDescription": datasets.Value('string'),
"PatientSex": datasets.Value('string'),
"PatientAge": datasets.Value('string'),
"PregnancyStatus": datasets.Value('string'),
"BodyPartExamined": datasets.Value('string'),
}),
homepage = _HOMEPAGE,
license = _LICENSE,
citation = _CITATION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the
s3_series_paths = []
s3_individual_paths = []
if self.config.name == 'example':
manifest_file = example_manifest_file
else:
manifest_file = full_manifest_file
with open(manifest_file, 'r') as file:
for line in file:
match = re.search(r'cp (s3://[\S]+) .', line)
if match:
s3_series_paths.append(match.group(1)[:-2]) # Deleting the '/*' in directories
for series in s3_series_paths:
for content in fs.ls(series):
s3_individual_paths.append(fs.info(content)['Key'])
random.shuffle(s3_individual_paths)
# Define the split sizes
train_size = int(0.7 * len(s3_individual_paths))
val_size = int(0.15 * len(s3_individual_paths))
# Split the paths into train, validation, and test sets
train_paths = s3_individual_paths[:train_size]
val_paths = s3_individual_paths[train_size:train_size + val_size]
test_paths = s3_individual_paths[train_size + val_size:]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"paths": train_paths,
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"paths": val_paths,
"split": "dev"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"paths": test_paths,
"split": "test"
},
),
]
def _generate_examples(self, paths, split):
"""Yields examples."""
# TODO: This method will yield examples, i.e. rows in the dataset.
for path in paths:
key = path
with fs.open(path, 'rb') as f:
dicom_data = pydicom.dcmread(f)
pixel_array = dicom_data.pixel_array
# Adjust for MONOCHROME1 to invert the grayscale values
if dicom_data.PhotometricInterpretation == "MONOCHROME1":
pixel_array = np.max(pixel_array) - pixel_array
# Normalize or scale 16-bit or other depth images to 8-bit
if pixel_array.dtype != np.uint8:
pixel_array = (np.divide(pixel_array, np.max(pixel_array)) * 255).astype(np.uint8)
# Convert to RGB if it is not already (e.g., for color images)
if len(pixel_array.shape) == 2:
im = Image.fromarray(pixel_array, mode="L") # L mode is for grayscale
elif len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]:
im = Image.fromarray(pixel_array, mode="RGB")
else:
raise ValueError("Unsupported DICOM image format")
with io.BytesIO() as output:
im.save(output, format="PNG")
png_image = output.getvalue()
# Extracting metadata
ImageType = dicom_data.get("ImageType", "")
StudyDate = dicom_data.get("StudyDate", "")
SeriesDate = dicom_data.get("SeriesDate", "")
Manufacturer = dicom_data.get("Manufacturer", "")
StudyDescription = dicom_data.get("StudyDescription", "")
SeriesDescription = dicom_data.get("SeriesDescription", "")
PatientSex = dicom_data.get("PatientSex", "")
PatientAge = dicom_data.get("PatientAge", "")
PregnancyStatus = dicom_data.get("PregnancyStatus", "")
if PregnancyStatus == None:
PregnancyStatus = "None"
else:
PregnancyStatus = "Yes"
BodyPartExamined = dicom_data.get("BodyPartExamined", "")
yield key, {"image": png_image,
"ImageType": ImageType,
"StudyDate": StudyDate,
"SeriesDate": SeriesDate,
"Manufacturer": Manufacturer,
"StudyDescription": StudyDescription,
"SeriesDescription": SeriesDescription,
"PatientSex": PatientSex,
"PatientAge": PatientAge,
"PregnancyStatus": PregnancyStatus,
"BodyPartExamined": BodyPartExamined} |