# dataset loading script
# import os
# import csv
import json
import random
import datasets

# from typing import List

_DESCRIPTION = """\
A video-centric instruction-tuning dataset involving timestamps for Video Large Language Models
"""

# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://github.com/RenShuhuai-Andy/TimeChat"

# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_SEED = 1234  # for determinstic random sampling


# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
    "charades": {
        "train": "./data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json",
        "instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
    },
    "didemo": {
        "train": "./data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json",
        "instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
    },
    "queryd": {
        "train": "./data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json",
        "instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
    },
    "hirest_grounding": {
        "train": "./data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json",
        "instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
    },
    "qvhighlights": {
        "train": "./data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json",
        "instruction": "./data/video_highlight_detection/video_highlight_detection_instructions.json",
    },
    "youcook2": {
        "train": "./data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json",
        "instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
    },
    "anet": {
        "train": "./data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json",
        "instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
    },
    "vitt": {
        "train": "./data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json",
        "instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
    },
    "tvsum": {
        "train": "./data/video_summarization/tvsum/instruct_vhd_50_tvsum.json",
        "instruction": "./data/video_summarization/video_summarization_instructions.json",
    },
    "summe": {
        "train": "./data/video_summarization/summe/instruct_vhd_25_summe.json",
        "instruction": "./data/video_summarization/video_summarization_instructions.json",
    },
    "coin": {
        "train": "./data/step_localization/coin/instruct_action_9.0k_coin.json",
        "instruction": "./data/step_localization/step_localization_instructions.json",
    },
    "hirest_step": {
        "train": "./data/step_localization/hirest_step/instruct_action_0.5k_hirest.json",
        "instruction": "./data/step_localization/step_localization_instructions.json",
    },
    "yttemporal": {
        "train": "./data/transcribed_speech_generation/yttemporal/instruct_tsg_31.6k_yttemporal.json",
        "instruction": "./data/transcribed_speech_generation/transcribed_speech_generation_instructions.json",
    },
}

_CITATION = ""


class TimeITDataset(datasets.GeneratorBasedBuilder):
    """TODO: Short description of my dataset."""

    VERSION = datasets.Version("1.0.1")

    # This is an example of a dataset with multiple configurations.
    # If you don't want/need to define several sub-sets in your dataset,
    # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.

    # If you need to make complex sub-parts in the datasets with configurable options
    # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
    # BUILDER_CONFIG_CLASS = MyBuilderConfig

    # You will be able to load one or the other configurations in the following list with
    # data = datasets.load_dataset('my_dataset', 'first_domain')
    # data = datasets.load_dataset('my_dataset', 'second_domain')
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="charades", version=VERSION, description="Charades-STA dataset for Temporal Video Grounding"
        ),
        datasets.BuilderConfig(
            name="didemo", version=VERSION, description="DiDeMo dataset for Temporal Video Grounding"
        ),
        datasets.BuilderConfig(
            name="queryd", version=VERSION, description="QuerYD dataset for Temporal Video Grounding"
        ),
        datasets.BuilderConfig(
            name="hirest_grounding", version=VERSION, description="HiREST_grounding dataset for Temporal Video Grounding"
        ),
        datasets.BuilderConfig(
            name="qvhighlights", version=VERSION, description="QVHighlights dataset for Video Highlight Detection"
        ),
        datasets.BuilderConfig(
            name="youcook2", version=VERSION, description="YouCook2 dataset for Dense Video Captioning"
        ),
        datasets.BuilderConfig(
            name="anet", version=VERSION, description="ActivityNet Captions dataset for Dense Video Captioning"
        ),
        datasets.BuilderConfig(
            name="vitt", version=VERSION, description="ViTT dataset for Dense Video Captioning"
        ),
        datasets.BuilderConfig(
            name="tvsum", version=VERSION, description="TVSum dataset for Video Summarization"
        ),
        datasets.BuilderConfig(
            name="summe", version=VERSION, description="SumMe dataset for Video Summarization"
        ),
        datasets.BuilderConfig(
            name="coin", version=VERSION, description="COIN dataset for Step Localization"
        ),
        datasets.BuilderConfig(
            name="hirest_step", version=VERSION, description="HiREST_step dataset for Step Localization"
        ),
        datasets.BuilderConfig(
            name="yttemporal", version=VERSION, description="YT-Temporal dataset for Transcribed Speech Generation"
        ),
    ]

    DEFAULT_CONFIG_NAME = "youcook2"  # It's not mandatory to have a default configuration. Just use one if it make sense.

    def _info(self):
        # unified schema
        features = datasets.Features(
            {
                "video_path": datasets.Value("string"),
                "question": datasets.Value("string"),
                "answer": datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
        urls = _URLS[self.config.name]
        data_dir = dl_manager.download_and_extract(urls)  # a dict string
        ret = []
        # control the choice of the instruction
        random.seed(_SEED)

        ret.append(
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": data_dir["train"],
                    "split": "train",
                    "instruction_path": data_dir["instruction"],
                    "data_dir": data_dir,
                },
            )
        )
        return ret

        # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`

    def _generate_examples(self, filepath, split, instruction_path, data_dir=None):
        # print("instruction path: ", instruction_path)
        instructions = json.load(open(instruction_path))
        # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
        # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
        timeitdata = json.load(open(filepath))
        for i, d in enumerate(timeitdata):
            yield i, {
                "question": d["QA"][0]['q'],
                "answer": d["QA"][0]['a'],
                "video_path": d["video"],
            }