File size: 21,249 Bytes
61fd3fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fa2de2
61fd3fa
 
 
02961c5
61fd3fa
 
2ebae65
61fd3fa
 
508d191
f7a1cfb
61fd3fa
5fa2de2
 
 
 
 
 
 
 
 
00ba0c6
508d191
 
 
 
 
 
 
01d5f4f
 
508d191
 
01d5f4f
 
508d191
00ba0c6
e0f64ef
9997395
b9661d6
01d5f4f
 
5fa2de2
61fd3fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02961c5
 
 
 
61fd3fa
 
02961c5
 
 
 
 
 
 
 
 
01d5f4f
508d191
02961c5
 
 
508d191
02961c5
 
61fd3fa
 
 
 
 
02961c5
61fd3fa
01d5f4f
 
 
 
 
 
508d191
01d5f4f
 
 
 
 
 
508d191
01d5f4f
 
 
 
 
 
508d191
01d5f4f
 
 
 
 
 
508d191
01d5f4f
 
 
 
61fd3fa
01d5f4f
 
 
 
508d191
01d5f4f
 
 
 
508d191
61fd3fa
 
02961c5
 
 
 
58a2488
e9b9cd5
 
 
6bba779
5a56741
48bacc5
5a56741
e9b9cd5
48bacc5
e9b9cd5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9997395
 
 
 
 
 
 
 
 
02961c5
e9b9cd5
02961c5
61fd3fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00ba0c6
 
 
 
 
 
61fd3fa
 
 
 
02961c5
61fd3fa
 
 
 
 
 
01d5f4f
508d191
61fd3fa
 
 
 
 
 
02961c5
01d5f4f
508d191
61fd3fa
 
 
 
 
 
02961c5
01d5f4f
508d191
61fd3fa
 
 
 
5fa2de2
 
 
01d5f4f
 
508d191
5fa2de2
 
d004e7b
5fa2de2
 
 
 
 
 
 
02961c5
 
 
 
 
 
 
 
 
 
 
61fd3fa
5fa2de2
d004e7b
5fa2de2
d004e7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7a1cfb
61fd3fa
 
d004e7b
61fd3fa
 
 
 
 
 
 
f7a1cfb
61fd3fa
 
 
 
5fa2de2
e9b9cd5
5fa2de2
 
 
e9b9cd5
 
 
5fa2de2
b9661d6
 
 
 
 
 
4097551
b949ef2
 
 
 
 
 
9997395
 
 
 
b949ef2
 
 
 
 
 
 
 
 
 
 
 
9997395
 
 
 
 
 
 
 
 
 
 
 
 
b949ef2
01d5f4f
61fd3fa
 
 
 
 
 
 
 
 
 
 
 
01d5f4f
f7a1cfb
 
 
 
 
 
 
 
 
 
 
61fd3fa
f7a1cfb
61fd3fa
f7a1cfb
61fd3fa
f7a1cfb
61fd3fa
f7a1cfb
 
c213c69
f7a1cfb
 
 
 
c213c69
f7a1cfb
 
 
 
 
 
 
 
4b43c77
f7a1cfb
 
bfb9e3f
9c3c13b
f7a1cfb
 
 
 
 
5a56741
f7a1cfb
 
 
508d191
 
 
 
 
5a56741
 
 
4b43c77
5a56741
 
 
 
 
f7a1cfb
5fa2de2
f7a1cfb
d004e7b
9997395
18d6eb1
5fa2de2
48bacc5
 
 
 
 
 
5a56741
48bacc5
 
 
 
5fa2de2
9997395
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""

# Import packages
import csv
import json
import os
from typing import Dict, List, Mapping, Optional, Set, Sequence, Tuple, Union

import numpy as np
import pandas as pd

import datasets
import skimage
import SimpleITK as sitk

# Define functions
def import_csv_data(filepath: str) -> List[Dict[str, str]]:
    """Import all rows of CSV file."""
    results = []
    with open(filepath, encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for line in reader:
            results.append(line)
    return results

def standardize_3D_image(
    image: np.ndarray,
    resize_shape: Tuple[int, int, int]
) -> np.ndarray:
    """Aligns dimensions of image to be (height, width, channels) and resizes
    images to values specified in resize_shape."""
    # Align height, width, channel dims
    if image.shape[0] < image.shape[2]:
        image = np.transpose(image, axes=[1, 2, 0])
    # Resize image
    image = skimage.transform.resize(image, resize_shape)
    return image


# Define constants
N_PATIENTS = 218
MIN_IVD = 0
MAX_IVD = 9
DEFAULT_SCAN_TYPES = ['t1', 't2', 't2_SPACE']
DEFAULT_RESIZE = (512, 512, 30)

# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""

# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""

_HOMEPAGE = "https://zenodo.org/records/10159290"

_LICENSE = """Creative Commons Attribution 4.0 International License \
(https://creativecommons.org/licenses/by/4.0/legalcode)"""

# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
    "images":"https://zenodo.org/records/10159290/files/images.zip",
    "masks":"https://zenodo.org/records/10159290/files/masks.zip",
    "overview":"https://zenodo.org/records/10159290/files/overview.csv",
    "gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv",
}

class CustomBuilderConfig(datasets.BuilderConfig):
    
    def __init__(
        self,
        name: str = 'default',
        version: str = '0.0.0',
        data_dir: Optional[str] = None,
        data_files: Optional[Union[str, Sequence, Mapping]] = None,
        description: Optional[str] = None,
        scan_types: List[str] = DEFAULT_SCAN_TYPES,
        resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE,
    ):
        super().__init__(name, version, data_dir, data_files, description)
        self.scan_types = scan_types
        self.resize_shape = resize_shape


class SPIDER(datasets.GeneratorBasedBuilder):
    """TODO: Short description of my dataset."""

    VERSION = datasets.Version("1.1.0")

    BUILDER_CONFIG_CLASS = CustomBuilderConfig

    # BUILDER_CONFIGS = [
    #     CustomBuilderConfig(
    #         name="all_scan_types",
    #         version=VERSION,
    #         description="Use images of all scan types (t1, t2, t2 SPACE)",
    #         scan_types=['t1', 't2', 't2_SPACE'],
    #         resize_shape=DEFAULT_RESIZE,
    #     ),
    #     CustomBuilderConfig(
    #         name="t1_scan_types",
    #         version=VERSION,
    #         description="Use images of t1 scan types only",
    #         scan_types=['t1'],
    #         resize_shape=DEFAULT_RESIZE,
    #     ),
    #     CustomBuilderConfig(
    #         name="t2_scan_types",
    #         version=VERSION,
    #         description="Use images of t2 scan types only",
    #         scan_types=['t2'],
    #         resize_shape=DEFAULT_RESIZE,
    #     ),
    #     CustomBuilderConfig(
    #         name="t2_SPACE_scan_types",
    #         version=VERSION,
    #         description="Use images of t2 SPACE scan types only",
    #         scan_types=['t2_SPACE'],
    #         resize_shape=DEFAULT_RESIZE,
    #     ),
    # ]

    # DEFAULT_CONFIG_NAME = "all_scan_types"

    def __init__(
        self,
        *args,
        scan_types: List[str] = DEFAULT_SCAN_TYPES,
        resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
        self.scan_types = scan_types
        self.resize_shape = resize_shape

    def _info(self):
        """
        This method specifies the datasets.DatasetInfo object which contains 
        informations and typings for the dataset.
        """
        image_size = self.config.resize_shape
        features = datasets.Features({
            "patient_id": datasets.Value("string"),
            "scan_type": datasets.Value("string"),
            # "raw_image": datasets.Image(),
            "image_array": datasets.Array3D(shape=image_size, dtype='float64'),
            # "raw_mask": datasets.Image(),
            "mask_array": datasets.Array3D(shape=image_size, dtype='float64'),
            "metadata": {
                "num_vertebrae": datasets.Value(dtype="string"), #TODO: more specific types
                "num_discs": datasets.Value(dtype="string"),
                "sex": datasets.Value(dtype="string"),
                "birth_date": datasets.Value(dtype="string"),
                "AngioFlag": datasets.Value(dtype="string"),
                "BodyPartExamined": datasets.Value(dtype="string"),
                "DeviceSerialNumber": datasets.Value(dtype="string"),
                "EchoNumbers": datasets.Value(dtype="string"),
                "EchoTime": datasets.Value(dtype="string"),
                "EchoTrainLength": datasets.Value(dtype="string"),
                "FlipAngle": datasets.Value(dtype="string"),
                "ImagedNucleus": datasets.Value(dtype="string"),
                "ImagingFrequency": datasets.Value(dtype="string"),
                "InPlanePhaseEncodingDirection": datasets.Value(dtype="string"),
                "MRAcquisitionType": datasets.Value(dtype="string"),
                "MagneticFieldStrength": datasets.Value(dtype="string"),
                "Manufacturer": datasets.Value(dtype="string"),
                "ManufacturerModelName": datasets.Value(dtype="string"),
                "NumberOfPhaseEncodingSteps": datasets.Value(dtype="string"),
                "PercentPhaseFieldOfView": datasets.Value(dtype="string"),
                "PercentSampling": datasets.Value(dtype="string"),
                "PhotometricInterpretation": datasets.Value(dtype="string"),
                "PixelBandwidth": datasets.Value(dtype="string"),
                "PixelSpacing": datasets.Value(dtype="string"),
                "RepetitionTime": datasets.Value(dtype="string"),
                "SAR": datasets.Value(dtype="string"),
                "SamplesPerPixel": datasets.Value(dtype="string"),
                "ScanningSequence": datasets.Value(dtype="string"),
                "SequenceName": datasets.Value(dtype="string"),
                "SeriesDescription": datasets.Value(dtype="string"),
                "SliceThickness": datasets.Value(dtype="string"),
                "SoftwareVersions": datasets.Value(dtype="string"),
                "SpacingBetweenSlices": datasets.Value(dtype="string"),
                "SpecificCharacterSet": datasets.Value(dtype="string"),
                "TransmitCoilName": datasets.Value(dtype="string"),
                "WindowCenter": datasets.Value(dtype="string"),
                "WindowWidth": datasets.Value(dtype="string"),
            },
            "rad_gradings": {
                "IVD label": datasets.Sequence(datasets.Value("string")),
                "Modic": datasets.Sequence(datasets.Value("string")),
                "UP endplate": datasets.Sequence(datasets.Value("string")),
                "LOW endplate": datasets.Sequence(datasets.Value("string")),
                "Spondylolisthesis": datasets.Sequence(datasets.Value("string")),
                "Disc herniation": datasets.Sequence(datasets.Value("string")),
                "Disc narrowing": datasets.Sequence(datasets.Value("string")),
                "Disc bulging": datasets.Sequence(datasets.Value("string")),
                "Pfirrman grade": datasets.Sequence(datasets.Value("string")),
            }
        })

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """
        This method is tasked with downloading/extracting the data 
        and defining the splits depending on the configuration
        If several configurations are possible (listed in BUILDER_CONFIGS), 
        the configuration selected by the user is in self.config.name
        """

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
        paths_dict = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "paths_dict": paths_dict,
                    "split": "train",
                    "scan_types": self.scan_types,
                    "resize_shape": self.resize_shape,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "paths_dict": paths_dict,
                    "split": "validate",
                    "scan_types": self.scan_types,
                    "resize_shape": self.resize_shape,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "paths_dict": paths_dict,
                    "split": "test",
                    "scan_types": self.scan_types,
                    "resize_shape": self.resize_shape,
                },
            ),
        ]

    def _generate_examples(
        self,
        paths_dict: Dict[str, str],
        split: str,
        scan_types: List[str],
        resize_shape: Tuple[int, int, int],
        validate_share: float = 0.3,
        test_share: float = 0.2,
        random_seed: int = 9999,
    ) -> Tuple[str, Dict]:
        """
        This method handles input defined in _split_generators to yield 
        (key, example) tuples from the dataset. The `key` is for legacy reasons 
        (tfds) and is not important in itself, but must be unique for each example.
        
        Args
            paths_dict: mapping of data element name to temporary file location
            split: specify training, validation, or testing set;
                options = 'train', 'validate', OR 'test'
            scan_types: list of sagittal scan types to use in examples; 
                options = ['t1', 't2', 't2_SPACE']
            validate_share: float indicating share of data to use for validation;
                must be in range (0.0, 1.0); note that training share is 
                calculated as (1 - validate_share - test_share)
            test_share: float indicating share of data to use for testing;
                must be in range (0.0, 1.0); note that training share is 
                calculated as (1 - validate_share - test_share)
        
        Yields
            Tuple (unique patient-scan ID, dict of 
        """
        # Set constants
        train_share = (1.0 - validate_share - test_share)
        np.random.seed(int(random_seed))

        # Validate params
        for item in scan_types:
            if item not in ['t1', 't2', 't2_SPACE']:
                raise ValueError(
                    'Scan type "{item}" not recognized as valid scan type.\
                    Verify scan type argument.'
                )
        if split not in ['train', 'validate', 'test']:
            raise ValueError( 
                f'Split argument "{split}" is not recognized. \
                Please enter one of ["train", "validate", "test"]'
            )
        if train_share <= 0.0:
            raise ValueError(
                f'Training share is calculated as (1 - validate_share - test_share) \
                and must be greater than 0. Current calculated value is \
                {round(train_share, 3)}. Adjust validate_share and/or \
                test_share parameters.'
            )
        if validate_share > 1.0 or validate_share < 0.0:
            raise ValueError(
                f'Validation share must be between (0, 1). Current value is \
                {validate_share}.'
            )
        if test_share > 1.0 or test_share < 0.0:
            raise ValueError(
                f'Testing share must be between (0, 1). Current value is \
                {test_share}.'
            )

        # Generate train/validate/test partitions of patient IDs
        partition = np.random.choice(
            ['train', 'dev', 'test'],
            p=[train_share, validate_share, test_share],
            size=N_PATIENTS,
        )
        patient_ids = (np.arange(N_PATIENTS) + 1)
        train_ids = set(patient_ids[partition == 'train'])
        validate_ids = set(patient_ids[partition == 'dev'])
        test_ids = set(patient_ids[partition == 'test'])
        assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS

        # Import patient/scanner data and radiological gradings data
        overview_data = import_csv_data(paths_dict['overview'])
        grades_data = import_csv_data(paths_dict['gradings'])
        
        # Convert overview data list of dicts to dict of dicts
        exclude_vars = ['new_file_name', 'subset'] # Original data only lists train and validate
        overview_dict = {}
        for item in overview_data:
            key = item['new_file_name']
            overview_dict[key] = {
                k:v for k,v in item.items() if k not in exclude_vars
            }
        
        # # Determine maximum number of radiological gradings per patient
        # max_ivd = 0
        # for temp_dict_1 in grades_dict.values():
        #     for temp_dict_2 in temp_dict_1:
        #         if int(temp_dict_2['IVD label']) > max_ivd:
        #             max_ivd = int(temp_dict_2['IVD label'])

        # Merge patient records for radiological gradings data
        grades_dict = {}
        for patient_id in patient_ids:
            patient_grades = [
                x for x in grades_data if x['Patient'] == str(patient_id)
            ]
            # Pad so that all patients have same number of IVD observations
            IVD_values = [x['IVD label'] for x in patient_grades]
            for i in range(MIN_IVD, MAX_IVD + 1):
                if str(i) not in IVD_values:
                    patient_grades.append({
                        "Patient": f"{patient_id}",
                        "IVD label": f"{i}",
                        "Modic": "",
                        "UP endplate": "",
                        "LOW endplate": "",
                        "Spondylolisthesis": "",
                        "Disc herniation": "",
                        "Disc narrowing": "",
                        "Disc bulging": "",
                        "Pfirrman grade": "",
                    })
            assert len(patient_grades) == (MAX_IVD - MIN_IVD + 1), "Radiological\
                gradings not padded correctly"
        
            # Convert to sequences
            df = (
                pd.DataFrame(patient_grades)
                .sort_values("IVD label")
                .reset_index(drop=True)
            )
            grades_dict[str(patient_id)] = {
                col:df[col].tolist() for col in df.columns
                if col not in ['Patient']
            }

        # Get list of image and mask data files
        image_files = [
            file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
            if file.endswith('.mha')
        ]
        assert len(image_files) > 0, "No image files found--check directory path."
        
        mask_files = [
            file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks'))
            if file.endswith('.mha')
        ]
        assert len(mask_files) > 0, "No mask files found--check directory path."
        
        # Filter image and mask data files based on scan types
        image_files = [
            file for file in image_files 
            if any(scan_type in file for scan_type in scan_types)
        ]

        mask_files = [
            file for file in mask_files 
            if any(scan_type in file for scan_type in scan_types)
        ]
        
        # Subset train/validation/test partition images and mask files
        if split == 'train':
            subset_ids = train_ids
        elif split == 'validate':
            subset_ids = validate_ids
        elif split == 'test':
            subset_ids = test_ids
        
        image_files = [
            file for file in image_files
            if any(str(patient_id) == file.split('_')[0] for patient_id in subset_ids)
        ]
            
        mask_files = [
            file for file in mask_files
            if any(str(patient_id) == file.split('_')[0] for patient_id in subset_ids)
        ]
        assert len(image_files) == len(mask_files), "The number of image files\
            does not match the number of mask files--verify subsetting operation."
        
        # Shuffle order of patient scans
        # (note that only images need to be shuffled since masks and metadata
        # will be linked to the selected image)
        np.random.shuffle(image_files) 

        ## Generate next example
        # ----------------------
        for idx, example in enumerate(image_files):

            # Extract linking data
            scan_id = example.replace('.mha', '')
            patient_id = scan_id.split('_')[0]
            scan_type = '_'.join(scan_id.split('_')[1:])

            # Load .mha image file
            image_path = os.path.join(paths_dict['images'], 'images', example)
            image = sitk.ReadImage(image_path)
            
            # Convert .mha image to standardized numeric array
            image_array = standardize_3D_image(
                sitk.GetArrayFromImage(image), resize_shape
            )

            # Load .mha mask file
            mask_path = os.path.join(paths_dict['masks'], 'masks', example)
            mask = sitk.ReadImage(mask_path)
            
            # Convert .mha mask to standardized numeric array
            mask_array = standardize_3D_image(
                sitk.GetArrayFromImage(mask), resize_shape
            )
    
            # Extract overview data corresponding to image
            image_overview = overview_dict[scan_id]

            # Extract patient radiological gradings corresponding to patient
            patient_grades_dict = grades_dict[patient_id]

            # Prepare example return dict
            return_dict = {
                'patient_id':patient_id,
                'scan_type':scan_type,
                'raw_image':None, #TODO
                'raw_mask':None, #TODO
                'image_array':image_array,
                'mask_array':mask_array,
                'metadata':image_overview,
                'rad_gradings':patient_grades_dict,
                }

            # Yield example
            yield scan_id, return_dict