Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Formats:
parquet
Sub-tasks:
semantic-segmentation
Languages:
English
Size:
10K - 100K
License:
File size: 1,788 Bytes
28f8c87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"from utils import find_image_groups, create_image_dataset\n",
"from datasets import load_dataset, concatenate_datasets\n",
"\n",
"data_paths = [\n",
" \"./2023-07-21_14-08-29\",\n",
" \"./2023-07-21_14-44-56\",\n",
" # \"./2023-07-21_14-51-07\",\n",
" \"./2023-07-22_16-24-27\",\n",
"]\n",
"datasets = []\n",
"\n",
"for data_path in data_paths:\n",
" image_groups = find_image_groups(\n",
" data_path, \"RS_COLOR\", [\"RS_DEPTH\", \"RS_DEPTH_16bit\", \"THERMAL\", \"THERMAL_RGB\"], threshold_ms=100\n",
" )\n",
" new_dataset = create_image_dataset(image_groups)\n",
" datasets.append(new_dataset)\n",
" print(f\"Dataset {data_path} created with {len(new_dataset)} samples\")\n",
"\n",
"existing_dataset = load_dataset(\"hassanjbara/BASEPROD\", split=\"train\")\n",
"combined_dataset = concatenate_datasets([existing_dataset, *datasets])\n",
"combined_dataset.push_to_hub(\"hassanjbara/BASEPROD\",)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.16"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|