Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Formats:
parquet
Sub-tasks:
semantic-segmentation
Languages:
English
Size:
10K - 100K
License:
from pathlib import Path | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from typing import List | |
from cleanvision import Imagelab | |
from PIL import Image | |
from datasets import Dataset, Features, Image as ImageFeature | |
def images_with_deduplication(data_path): | |
imagelab = Imagelab(data_path=data_path) | |
# Automatically check for a predefined list of issues within your dataset | |
imagelab.find_issues({"near_duplicates": {}, "exact_duplicates": {}}) | |
# load names of all images in the dataset | |
image_paths = list(Path(data_path).rglob("*.png")) | |
image_paths = [str(path.resolve()) for path in image_paths] | |
print(f"Number of images before deduplication: {len(image_paths)}") | |
duplicate_sets = imagelab.info["near_duplicates"]["sets"] | |
num_duplicates = sum([len(duplicate_set) - 1 for duplicate_set in duplicate_sets]) | |
for duplicate_set in duplicate_sets: | |
for i in range(len(duplicate_set)): | |
if i > 0: | |
image_name = duplicate_set[i] | |
del image_paths[image_paths.index(image_name)] | |
print(f"Number of images after deduplication: {len(image_paths)}") | |
print(f"Number of images removed: {num_duplicates}") | |
return image_paths | |
def find_closest_pair(ref_timestamp, search_dir, threshold_ms=100): | |
search_files = list(search_dir.glob("*")) | |
search_timestamps = [int(f.stem.split("_")[0]) for f in search_files] | |
diffs = np.abs(np.array(search_timestamps) - ref_timestamp) / 1e6 | |
min_idx = np.argmin(diffs) | |
if diffs[min_idx] <= threshold_ms: | |
return str(search_files[min_idx]) | |
return None | |
def find_image_groups( | |
base_dir, ref_subdir, search_subdirs: List[str], threshold_ms=100 | |
): | |
base_path = Path(base_dir) | |
ref_dir = base_path / ref_subdir | |
search_dirs = [base_path / subdir for subdir in search_subdirs] | |
# deduplicate images from the reference directory | |
ref_dir_files = images_with_deduplication(ref_dir) | |
pairs = [] | |
for ref_file in ref_dir_files: | |
ref_ts = int(ref_file.split("/")[-1].split("_")[0]) | |
image_group = (ref_file,) | |
for search_dir in search_dirs: | |
assert search_dir.exists(), f"{search_dir} does not exist" | |
match = find_closest_pair(ref_ts, search_dir, threshold_ms) | |
if match: | |
image_group += (match,) | |
else: | |
image_group += (None,) | |
pairs.append(image_group) | |
return pairs | |
def visualize_images(image_tuple): | |
n = len(image_tuple) | |
fig, axes = plt.subplots(1, n, figsize=(6 * n, 4)) | |
if n == 1: | |
axes = [axes] | |
for ax, img_path in zip(axes, image_tuple): | |
if img_path is None: | |
ax.axis("off") | |
continue | |
img = Image.open(img_path) | |
if "DEPTH" in str(img_path): | |
ax.imshow(img, cmap="viridis") | |
elif "THERMAL" in str(img_path): | |
ax.imshow(img, cmap="hot") | |
else: | |
img = Image.open(img_path) | |
ax.imshow(img) | |
ax.set_title(img_path.split("/")[-2]) | |
plt.show() | |
# prepare the dataset for upload to huggingface | |
def create_image_dataset(image_tuples): | |
""" | |
Create a HuggingFace dataset from a list of image tuples. | |
Args: | |
image_tuples: List of tuples, each containing (color, depth, depth_16bit, thermal, thermal_rgb) image paths | |
""" | |
features = Features( | |
{ | |
"color": ImageFeature(decode=True), | |
"depth": ImageFeature(decode=True), | |
"depth_16bit": ImageFeature(decode=True), | |
"thermal": ImageFeature(decode=True), | |
"thermal_rgb": ImageFeature(decode=True), | |
} | |
) | |
# Unzip the tuples into separate lists | |
color_imgs, depth_imgs, depth_16bit_imgs, thermal_imgs, thermal_rgb_imgs = zip( | |
*image_tuples | |
) | |
dataset_dict = { | |
"color": list(color_imgs), | |
"depth": list(depth_imgs), | |
"depth_16bit": list(depth_16bit_imgs), | |
"thermal": list(thermal_imgs), | |
"thermal_rgb": list(thermal_rgb_imgs), | |
} | |
dataset = Dataset.from_dict(dataset_dict, features=features) | |
return dataset | |