dstc10_track2_task2 / dstc10_track2_task2.py
ndaheim's picture
dataloader
6e83e03
raw
history blame
6.03 kB
import json
from typing import List
import datasets
from datasets import BuilderConfig
from datasets.features import Features
MAX_DIRECTORY_NAME_LENGTH = 255
_CITATION = """\
@article{kim2020domain,
title={Beyond Domain APIs: Task-oriented Conversational Modeling with Unstructured Knowledge Access},
author={Seokhwan Kim and Mihail Eric and Karthik Gopalakrishnan and Behnam Hedayatnia and Yang Liu and Dilek Hakkani-Tur},
journal={arXiv preprint arXiv:2006.03533}
year={2020}
}
"""
_HOMEPAGE = "https://github.com/alexa/alexa-with-dstc10-track2-dataset"
_DESCRIPTION = """\
"""
_BASE_URL_DSTC10 = "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2"
_BASE_URL_DSTC9 = "https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master"
_URLs = {
"train": {
"logs": f"{_BASE_URL_DSTC9}/data/train/logs.json",
"labels": f"{_BASE_URL_DSTC9}/data/train/labels.json",
"knowledge": f"{_BASE_URL_DSTC9}/data/knowledge.json",
},
"validation": {
"logs": f"{_BASE_URL_DSTC10}/data/val/logs.json",
"labels": f"{_BASE_URL_DSTC10}/data/val/labels.json",
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json",
},
"test": {
"logs": f"{_BASE_URL_DSTC10}/data/test/logs.json",
"labels": f"{_BASE_URL_DSTC10}/data/test/labels.json",
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json",
}
}
class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
BuilderConfig(
name="detection",
version=VERSION,
description="",
),
BuilderConfig(
name="selection",
version=VERSION,
description="",
),
BuilderConfig(
name="generation",
version=VERSION,
description="",
),
]
DEFAULT_CONFIG_NAME = "generation"
def _info(self):
if self.config.name == "generation":
features = datasets.Features(
{
"id": datasets.Value("string"),
"gem_id": datasets.Value("string"),
"turns": [
{
"speaker": datasets.Value("string"),
"text": datasets.Value("string"),
"nbest": [
{
"hyp": datasets.Value("string"),
"score": datasets.Value("float")
}
]
}
],
"knowledge":
{
"domain": datasets.Value("string"),
"entity_name": datasets.Value("string"),
"title": datasets.Value("string"),
"body": datasets.Value("string"),
},
"response": datasets.Value("string"),
"source": datasets.Value("string"),
}
)
else:
raise Exception(f"Unexpected config name: {self.config.name}")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples(self, logs, knowledge, labels, split=None):
with open(logs) as fp:
logs_data = json.load(fp)
with open(labels) as fp:
labels_data = json.load(fp)
with open(knowledge) as fp:
knowledge_data = json.load(fp)
i = 0
for log, label in zip(logs_data, labels_data):
if not label["target"]:
continue
# Ensure that nbest is in all turns
for turn in log:
if "nbest" not in turn:
turn["nbest"] = []
if "source" not in label:
source = "multiwoz"
else:
source = label["source"]
domain, entity_id, doc_id = (label["knowledge"][0].get(key) for key in ["domain", "entity_id", "doc_id"])
entity_name = knowledge_data[domain][str(entity_id)]["name"]
snippet = knowledge_data[domain][str(entity_id)]["docs"][str(doc_id)]
x = {
"id": str(i),
"gem_id": f"GEM-dstc10_track2_task2-{split}-{i}",
"turns": log,
"source": source,
"knowledge": {
"domain": domain,
"entity_name": entity_name,
"title": snippet["title"],
"body": snippet["body"]
},
"response": label["response"]
}
i += 1
yield x["id"], x
def _download_files(self, urls, data_files, dl_manager):
if data_files is not None:
for split, update_dict in data_files.items():
if isinstance(update_dict, dict):
for key, value in update_dict.items():
urls[split][key] = value
return dl_manager.download_and_extract(urls)
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
urls_to_download = _URLs
downloaded_files = self._download_files(urls_to_download, self.config.data_files, dl_manager)
for split in ["train", "validation", "test"]:
downloaded_files[split]["split"] = split
return [
datasets.SplitGenerator(name=ds_split, gen_kwargs=downloaded_files[split])
for ds_split, split in (
(datasets.Split.TRAIN, "train"),
(datasets.Split.VALIDATION, "validation"),
(datasets.Split.TEST, "test"),
)
]