Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Formats:
parquet
Sub-tasks:
semantic-segmentation
Languages:
English
Size:
10K - 100K
License:
hassanjbara
commited on
Upload 2 files
Browse files- utils.py +137 -0
- workplace.ipynb +67 -0
utils.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import numpy as np
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
from typing import List
|
5 |
+
from cleanvision import Imagelab
|
6 |
+
from PIL import Image
|
7 |
+
from datasets import Dataset, Features, Image as ImageFeature
|
8 |
+
|
9 |
+
|
10 |
+
def images_with_deduplication(data_path):
|
11 |
+
imagelab = Imagelab(data_path=data_path)
|
12 |
+
|
13 |
+
# Automatically check for a predefined list of issues within your dataset
|
14 |
+
imagelab.find_issues({"near_duplicates": {}, "exact_duplicates": {}})
|
15 |
+
|
16 |
+
# load names of all images in the dataset
|
17 |
+
image_paths = list(Path(data_path).rglob("*.png"))
|
18 |
+
image_paths = [str(path.resolve()) for path in image_paths]
|
19 |
+
print(f"Number of images before deduplication: {len(image_paths)}")
|
20 |
+
|
21 |
+
duplicate_sets = imagelab.info["near_duplicates"]["sets"]
|
22 |
+
num_duplicates = sum([len(duplicate_set) - 1 for duplicate_set in duplicate_sets])
|
23 |
+
|
24 |
+
for duplicate_set in duplicate_sets:
|
25 |
+
for i in range(len(duplicate_set)):
|
26 |
+
if i > 0:
|
27 |
+
image_name = duplicate_set[i]
|
28 |
+
del image_paths[image_paths.index(image_name)]
|
29 |
+
|
30 |
+
print(f"Number of images after deduplication: {len(image_paths)}")
|
31 |
+
print(f"Number of images removed: {num_duplicates}")
|
32 |
+
|
33 |
+
return image_paths
|
34 |
+
|
35 |
+
|
36 |
+
def find_closest_pair(ref_timestamp, search_dir, threshold_ms=100):
|
37 |
+
search_files = list(search_dir.glob("*"))
|
38 |
+
search_timestamps = [int(f.stem.split("_")[0]) for f in search_files]
|
39 |
+
|
40 |
+
diffs = np.abs(np.array(search_timestamps) - ref_timestamp) / 1e6
|
41 |
+
min_idx = np.argmin(diffs)
|
42 |
+
|
43 |
+
if diffs[min_idx] <= threshold_ms:
|
44 |
+
return str(search_files[min_idx])
|
45 |
+
return None
|
46 |
+
|
47 |
+
|
48 |
+
def find_image_groups(
|
49 |
+
base_dir, ref_subdir, search_subdirs: List[str], threshold_ms=100
|
50 |
+
):
|
51 |
+
base_path = Path(base_dir)
|
52 |
+
ref_dir = base_path / ref_subdir
|
53 |
+
search_dirs = [base_path / subdir for subdir in search_subdirs]
|
54 |
+
|
55 |
+
# deduplicate images from the reference directory
|
56 |
+
ref_dir_files = images_with_deduplication(ref_dir)
|
57 |
+
|
58 |
+
pairs = []
|
59 |
+
for ref_file in ref_dir_files:
|
60 |
+
ref_ts = int(ref_file.split("/")[-1].split("_")[0])
|
61 |
+
image_group = (ref_file,)
|
62 |
+
|
63 |
+
for search_dir in search_dirs:
|
64 |
+
assert search_dir.exists(), f"{search_dir} does not exist"
|
65 |
+
|
66 |
+
match = find_closest_pair(ref_ts, search_dir, threshold_ms)
|
67 |
+
if match:
|
68 |
+
image_group += (match,)
|
69 |
+
else:
|
70 |
+
image_group += (None,)
|
71 |
+
|
72 |
+
pairs.append(image_group)
|
73 |
+
|
74 |
+
return pairs
|
75 |
+
|
76 |
+
|
77 |
+
def visualize_images(image_tuple):
|
78 |
+
n = len(image_tuple)
|
79 |
+
fig, axes = plt.subplots(1, n, figsize=(6 * n, 4))
|
80 |
+
|
81 |
+
if n == 1:
|
82 |
+
axes = [axes]
|
83 |
+
|
84 |
+
for ax, img_path in zip(axes, image_tuple):
|
85 |
+
if img_path is None:
|
86 |
+
ax.axis("off")
|
87 |
+
continue
|
88 |
+
|
89 |
+
img = Image.open(img_path)
|
90 |
+
|
91 |
+
if "DEPTH" in str(img_path):
|
92 |
+
ax.imshow(img, cmap="viridis")
|
93 |
+
elif "THERMAL" in str(img_path):
|
94 |
+
ax.imshow(img, cmap="hot")
|
95 |
+
else:
|
96 |
+
img = Image.open(img_path)
|
97 |
+
ax.imshow(img)
|
98 |
+
|
99 |
+
ax.set_title(img_path.split("/")[-2])
|
100 |
+
plt.show()
|
101 |
+
|
102 |
+
|
103 |
+
# prepare the dataset for upload to huggingface
|
104 |
+
def create_image_dataset(image_tuples):
|
105 |
+
"""
|
106 |
+
Create a HuggingFace dataset from a list of image tuples.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
image_tuples: List of tuples, each containing (color, depth, depth_16bit, thermal, thermal_rgb) image paths
|
110 |
+
"""
|
111 |
+
|
112 |
+
features = Features(
|
113 |
+
{
|
114 |
+
"color": ImageFeature(decode=True),
|
115 |
+
"depth": ImageFeature(decode=True),
|
116 |
+
"depth_16bit": ImageFeature(decode=True),
|
117 |
+
"thermal": ImageFeature(decode=True),
|
118 |
+
"thermal_rgb": ImageFeature(decode=True),
|
119 |
+
}
|
120 |
+
)
|
121 |
+
|
122 |
+
# Unzip the tuples into separate lists
|
123 |
+
color_imgs, depth_imgs, depth_16bit_imgs, thermal_imgs, thermal_rgb_imgs = zip(
|
124 |
+
*image_tuples
|
125 |
+
)
|
126 |
+
|
127 |
+
dataset_dict = {
|
128 |
+
"color": list(color_imgs),
|
129 |
+
"depth": list(depth_imgs),
|
130 |
+
"depth_16bit": list(depth_16bit_imgs),
|
131 |
+
"thermal": list(thermal_imgs),
|
132 |
+
"thermal_rgb": list(thermal_rgb_imgs),
|
133 |
+
}
|
134 |
+
|
135 |
+
dataset = Dataset.from_dict(dataset_dict, features=features)
|
136 |
+
|
137 |
+
return dataset
|
workplace.ipynb
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"%load_ext autoreload\n",
|
10 |
+
"%autoreload 2"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"cell_type": "code",
|
15 |
+
"execution_count": null,
|
16 |
+
"metadata": {
|
17 |
+
"scrolled": true
|
18 |
+
},
|
19 |
+
"outputs": [],
|
20 |
+
"source": [
|
21 |
+
"from utils import find_image_groups, create_image_dataset\n",
|
22 |
+
"from datasets import load_dataset, concatenate_datasets\n",
|
23 |
+
"\n",
|
24 |
+
"data_paths = [\n",
|
25 |
+
" \"./2023-07-21_14-08-29\",\n",
|
26 |
+
" \"./2023-07-21_14-44-56\",\n",
|
27 |
+
" # \"./2023-07-21_14-51-07\",\n",
|
28 |
+
" \"./2023-07-22_16-24-27\",\n",
|
29 |
+
"]\n",
|
30 |
+
"datasets = []\n",
|
31 |
+
"\n",
|
32 |
+
"for data_path in data_paths:\n",
|
33 |
+
" image_groups = find_image_groups(\n",
|
34 |
+
" data_path, \"RS_COLOR\", [\"RS_DEPTH\", \"RS_DEPTH_16bit\", \"THERMAL\", \"THERMAL_RGB\"], threshold_ms=100\n",
|
35 |
+
" )\n",
|
36 |
+
" new_dataset = create_image_dataset(image_groups)\n",
|
37 |
+
" datasets.append(new_dataset)\n",
|
38 |
+
" print(f\"Dataset {data_path} created with {len(new_dataset)} samples\")\n",
|
39 |
+
"\n",
|
40 |
+
"existing_dataset = load_dataset(\"hassanjbara/BASEPROD\", split=\"train\")\n",
|
41 |
+
"combined_dataset = concatenate_datasets([existing_dataset, *datasets])\n",
|
42 |
+
"combined_dataset.push_to_hub(\"hassanjbara/BASEPROD\",)"
|
43 |
+
]
|
44 |
+
}
|
45 |
+
],
|
46 |
+
"metadata": {
|
47 |
+
"kernelspec": {
|
48 |
+
"display_name": "Python 3 (ipykernel)",
|
49 |
+
"language": "python",
|
50 |
+
"name": "python3"
|
51 |
+
},
|
52 |
+
"language_info": {
|
53 |
+
"codemirror_mode": {
|
54 |
+
"name": "ipython",
|
55 |
+
"version": 3
|
56 |
+
},
|
57 |
+
"file_extension": ".py",
|
58 |
+
"mimetype": "text/x-python",
|
59 |
+
"name": "python",
|
60 |
+
"nbconvert_exporter": "python",
|
61 |
+
"pygments_lexer": "ipython3",
|
62 |
+
"version": "3.10.16"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"nbformat": 4,
|
66 |
+
"nbformat_minor": 4
|
67 |
+
}
|