|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Pathfinder-X2 Segmentation Benchmark.""" |
|
|
|
import os |
|
|
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{suard2023pathfinder, |
|
title={Pathfinder-X2: A Challenging Dataset for Evaluating Large Language Models on Long-Range Dependencies}, |
|
author={Suard, Tyler}, |
|
journal={}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The rapid progress of large language models has led to impressive results in a wide array of tasks. However, there remains a need for increasingly challenging datasets to evaluate these models' ability to handle long-range dependencies. In this paper, we present Pathfinder-X2, a novel dataset that builds upon the Pathfinder and Pathfinder-X datasets. Pathfinder-X2 comprises 512x512 pixel images, designed to test large language models' capacity to segment a specific white line dash "snake" with a circle at its tip among a collection of similar, distractor snakes. The increased image resolution and complexity of Pathfinder-X2 present a substantially more challenging task for large language models, contributing to the ongoing development and assessment of such models. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/Tylersuard/PathfinderX2/" |
|
|
|
_LICENSE = "C.C. B.Y. 4.0" |
|
|
|
_URLS = { |
|
"instance_segmentation": { |
|
"images": "https://pathfinder-x2.s3.us-west-1.amazonaws.com/Pathfinder-X2+images.zip", |
|
"annotations": "https://pathfinder-x2.s3.us-west-1.amazonaws.com/Pathfinder-X2+masks.zip", |
|
}, |
|
} |
|
|
|
class PathfinderX2(datasets.GeneratorBasedBuilder): |
|
"""Pathfinder X2 Segmentation Benchmark dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="instance_segmentation", version=VERSION, description="The instance segmentation variant." |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "instance_segmentation" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"annotation": datasets.Image(), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
data_dirs = dl_manager.download_and_extract(urls) |
|
train_data = os.path.join(data_dirs["images"]), os.path.join( |
|
data_dirs["annotations"] |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data": train_data, |
|
"split": "training", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data, split): |
|
if split == "training": |
|
images_dir, annotations_dir = data |
|
image_dict = {} |
|
annotation_dict = {} |
|
|
|
|
|
for root, _, files in os.walk(annotations_dir): |
|
for file_annot in files: |
|
if file_annot.endswith(".png"): |
|
image_id = os.path.splitext(file_annot)[0] |
|
annotation_dict[image_id] = os.path.join(root, file_annot) |
|
|
|
idx = 0 |
|
for root, _, files in os.walk(images_dir): |
|
for file_img in files: |
|
if file_img.endswith(".png"): |
|
image_id = os.path.splitext(file_img)[0] |
|
image_dict[image_id] = os.path.join(root, file_img) |
|
|
|
path_img = image_dict[image_id] |
|
path_annot = annotation_dict[image_id] |
|
|
|
with open(path_img, "rb") as f_img: |
|
bytes_img = f_img.read() |
|
with open(path_annot, "rb") as f_annot: |
|
bytes_annot = f_annot.read() |
|
|
|
yield idx, { |
|
"image": {"path": path_img, "bytes": bytes_img}, |
|
"annotation": {"path": path_annot, "bytes": bytes_annot}, |
|
} |
|
idx += 1 |