|
import json |
|
from typing import List |
|
|
|
import datasets |
|
|
|
from datasets import BuilderConfig |
|
from datasets.features import Features |
|
|
|
MAX_DIRECTORY_NAME_LENGTH = 255 |
|
|
|
|
|
_CITATION = """\ |
|
@article{kim2020domain, |
|
title={Beyond Domain APIs: Task-oriented Conversational Modeling with Unstructured Knowledge Access}, |
|
author={Seokhwan Kim and Mihail Eric and Karthik Gopalakrishnan and Behnam Hedayatnia and Yang Liu and Dilek Hakkani-Tur}, |
|
journal={arXiv preprint arXiv:2006.03533} |
|
year={2020} |
|
} |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/alexa/alexa-with-dstc10-track2-dataset" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
|
""" |
|
|
|
_BASE_URL_DSTC10 = "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2" |
|
_BASE_URL_DSTC9 = ( |
|
"https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master" |
|
) |
|
_URLs = { |
|
"train": { |
|
"logs": f"{_BASE_URL_DSTC9}/data/train/logs.json", |
|
"labels": f"{_BASE_URL_DSTC9}/data/train/labels.json", |
|
"knowledge": f"{_BASE_URL_DSTC9}/data/knowledge.json", |
|
}, |
|
"validation": { |
|
"logs": f"{_BASE_URL_DSTC10}/data/val/logs.json", |
|
"labels": f"{_BASE_URL_DSTC10}/data/val/labels.json", |
|
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json", |
|
}, |
|
"test": { |
|
"logs": f"{_BASE_URL_DSTC10}/data/test/logs.json", |
|
"labels": f"{_BASE_URL_DSTC10}/data/test/labels.json", |
|
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json", |
|
}, |
|
} |
|
|
|
|
|
class DSTC10Track2Task2(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
BuilderConfig( |
|
name="generation", |
|
version=VERSION, |
|
description="", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "generation" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"gem_id": datasets.Value("string"), |
|
"turns": [ |
|
{ |
|
"speaker": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"nbest": [ |
|
{ |
|
"hyp": datasets.Value("string"), |
|
"score": datasets.Value("float"), |
|
} |
|
], |
|
} |
|
], |
|
"knowledge": { |
|
"domain": datasets.Value("string"), |
|
"entity_name": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"body": datasets.Value("string"), |
|
}, |
|
"response": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"linearized_input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _generate_examples(self, logs, knowledge, labels, split=None): |
|
with open(logs) as fp: |
|
logs_data = json.load(fp) |
|
with open(labels) as fp: |
|
labels_data = json.load(fp) |
|
with open(knowledge) as fp: |
|
knowledge_data = json.load(fp) |
|
|
|
i = 0 |
|
|
|
for log, label in zip(logs_data, labels_data): |
|
if not label["target"]: |
|
continue |
|
|
|
|
|
for turn in log: |
|
if "nbest" not in turn: |
|
turn["nbest"] = [] |
|
|
|
if "source" not in label: |
|
source = "multiwoz" |
|
else: |
|
source = label["source"] |
|
|
|
domain, entity_id, doc_id = ( |
|
label["knowledge"][0].get(key) |
|
for key in ["domain", "entity_id", "doc_id"] |
|
) |
|
entity_name = knowledge_data[domain][str(entity_id)]["name"] |
|
snippet = knowledge_data[domain][str(entity_id)]["docs"][str(doc_id)] |
|
|
|
x = { |
|
"id": str(i), |
|
"gem_id": f"GEM-dstc10_track2_task2-{split}-{i}", |
|
"turns": log, |
|
"source": source, |
|
"knowledge": { |
|
"domain": domain, |
|
"entity_name": entity_name, |
|
"title": snippet["title"], |
|
"body": snippet["body"], |
|
}, |
|
"response": label["response"], |
|
"target": label["response"], |
|
"references": [label["response"]], |
|
} |
|
|
|
x["linearized_input"] = self._linearize_example(x) |
|
|
|
i += 1 |
|
|
|
yield x["id"], x |
|
|
|
def _download_files(self, urls, data_files, dl_manager): |
|
if data_files is not None: |
|
for split, update_dict in data_files.items(): |
|
if isinstance(update_dict, dict): |
|
for key, value in update_dict.items(): |
|
urls[split][key] = value |
|
|
|
return dl_manager.download_and_extract(urls) |
|
|
|
def _linearize_example(self, d): |
|
repr_string = "" |
|
for t in d["turns"]: |
|
repr_string += f"<{t['speaker']}> {t['text']} " |
|
repr_string += f"|| knowledge domain: {d['knowledge']['domain']}, entity: {d['knowledge']['entity_name']}, title: {d['knowledge']['title']}, information: {d['knowledge']['body']}" |
|
return repr_string |
|
|
|
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: |
|
urls_to_download = _URLs |
|
downloaded_files = self._download_files( |
|
urls_to_download, self.config.data_files, dl_manager |
|
) |
|
for split in ["train", "validation", "test"]: |
|
downloaded_files[split]["split"] = split |
|
|
|
return [ |
|
datasets.SplitGenerator(name=ds_split, gen_kwargs=downloaded_files[split]) |
|
for ds_split, split in ( |
|
(datasets.Split.TRAIN, "train"), |
|
(datasets.Split.VALIDATION, "validation"), |
|
(datasets.Split.TEST, "test"), |
|
) |
|
] |
|
|