File size: 4,611 Bytes
4260285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83cdd89
 
4260285
83cdd89
ad30fc7
4260285
 
b6ef0cd
83cdd89
b6ef0cd
 
83cdd89
b6ef0cd
 
4260285
 
8e52119
4260285
5ee0946
4260285
8e52119
4260285
 
 
5ee0946
4260285
 
 
 
 
 
 
8e52119
35c8dd8
4260285
 
 
 
 
 
98b0688
4260285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6ef0cd
4260285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6ef0cd
4260285
 
b6ef0cd
 
4260285
 
 
 
79a1581
4260285
 
 
 
 
79a1581
4260285
 
 
 
 
 
 
 
 
 
8e52119
4260285
 
 
6f688c2
4260285
09a00f3
4260285
 
 
 
161144b
4260285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os
import datasets
from huggingface_hub import HfFileSystem


logger = datasets.logging.get_logger(__name__)
fs = HfFileSystem()


_CITATION = """

"""
_DESCRIPTION = """

"""
_HOMEPAGE = "https://github.com/FPT-VVU/ViVidBot"
_REPO_ID = "datasets/Vividbot/instruct500k_vi"
_REPO_URL = f"https://huggingface.co/{_REPO_ID}/resolve/main"
_URLS = {
    "meta": f"{_REPO_URL}/instruct500k_vi.json",
    "image": f"{_REPO_URL}/images/" + "{shard}.zip",
}

_CONFIGS = ["all"]
if fs.exists(_REPO_ID + "/images"):
    _CONFIGS.extend([
        os.path.basename(file_name).split(".")[0]
        for file_name in fs.listdir(_REPO_ID + "/images", detail=False)
        if file_name.endswith(".zip")
    ])

class Instruct500k_ViConfig(datasets.BuilderConfig):
    """BuilderConfig for Instruct500k_ViConfig."""

    def __init__(self, name, **kwargs):
        """
        :param name:    Name of subset.
        :param kwargs:  Arguments.
        """
        super().__init__(
            name=name,
            version=datasets.Version("1.0.0"),
            description=_DESCRIPTION,
            **kwargs,
        )


class Instruck500k_Vi(datasets.GeneratorBasedBuilder):
    """Instruct500k Vi dataset."""
    BUILDER_CONFIGS = [Instruct500k_ViConfig(name) for name in _CONFIGS]

    def _info(self) -> datasets.DatasetInfo:
        features = datasets.Features(
            {
                "id": datasets.Value("string"),
                "image": datasets.Value("binary"),
                "conversations": [{'from': datasets.Value("string"), 'value': datasets.Value("string")}],
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> list[datasets.SplitGenerator]:
        """
        Get splits.
        :param dl_manager:  Download manager.
        :return:            Splits.
        """
        config_names = _CONFIGS[1:] if self.config.name == "all" else [self.config.name]

        metadata_paths = dl_manager.download(_URLS["meta"])
        dataset = datasets.load_dataset(
            "json",
            data_files=metadata_paths,
            split="train",
        )
        dataset = dataset.train_test_split(test_size=0.1, shuffle=True, seed=42)
        train_set = dataset["train"]
        val_test_set = dataset["test"].train_test_split(test_size=0.5)
        val_set = val_test_set["train"]
        test_set = val_test_set["test"]

        split_dict = {
            datasets.Split.TRAIN: train_set,
            datasets.Split.VALIDATION: val_set,
            datasets.Split.TEST: test_set,
        }

        image_dirs = dl_manager.download_and_extract(
            [_URLS["image"].format(shard=shard) for shard in config_names]
        )
        image_dict = {
            shard: image_dir
            for shard, image_dir in zip(config_names, image_dirs)
        }

        return [
            datasets.SplitGenerator(
                name=name,
                gen_kwargs={
                    "split": split,
                    "image_dict": image_dict,
                },
            )
            for name, split in split_dict.items()
        ]

    def _generate_examples(
        self,
        split: datasets.Dataset,
        image_dict: dict,
    ) -> tuple[int, dict]:
        """
        Generate examples.
        :param split:                   Split.
        :param image_dict:              Paths to directory containing image files.
        :return:                        Example.
        """
        for i, sample in enumerate(split):
            shard = sample["image"].split("/")[0]
            image_path = os.path.join(
                image_dict[shard], shard, sample["image"].split("/")[1]
            )

            yield i, {
                "id": sample["id"],
                "image": self.__get_binary_data(image_path),
                "conversations": sample["conversations"],
            }

    def __get_binary_data(self, path: str) -> bytes:
        """
        Get binary data from path.
        :param path:    Path to file.
        :return:        Binary data.
        """
        with open(path, "rb") as f:
            return f.read()

    def __get_text_data(self, path: str) -> str:
        """
        Get transcript from path.
        :param path:     Path to transcript.
        :return:         Transcript.
        """
        with open(path, "r", encoding="utf-8") as f:
            return f.read().strip()