File size: 6,187 Bytes
70bc399 9563d4b 70bc399 eaca996 22e3128 eaca996 22e3128 70bc399 eaca996 70bc399 eaca996 70bc399 eaca996 70bc399 eaca996 70bc399 eaca996 70bc399 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import json
import os
import datasets
_CITATION = """\
@inproceedings{lin-etal-2020-commongen,
title = "{C}ommon{G}en: A Constrained Text Generation Challenge for Generative Commonsense Reasoning",
author = "Lin, Bill Yuchen and
Zhou, Wangchunshu and
Shen, Ming and
Zhou, Pei and
Bhagavatula, Chandra and
Choi, Yejin and
Ren, Xiang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.findings-emnlp.165",
pages = "1823--1840",
}
"""
_DESCRIPTION = """\
CommonGen is a constrained text generation task, associated with a benchmark
dataset, to explicitly test machines for the ability of generative commonsense
reasoning. Given a set of common concepts; the task is to generate a coherent
sentence describing an everyday scenario using these concepts.
"""
_URLs = {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/common_gen.zip",
}
class CommonGen(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "common_gen"
def _info(self):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"concept_set_id": datasets.Value("int32"),
"concepts": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [
datasets.Value("string")
], # multiple references for validation
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=datasets.info.SupervisedKeysData(
input="concepts", output="target"
),
homepage="https://inklab.usc.edu/CommonGen/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs)
challenge_sets = [
("challenge_train_sample", "train_common_gen_RandomSample500.json"),
(
"challenge_validation_sample",
"validation_common_gen_RandomSample500.json",
),
(
"challenge_test_scramble",
"test_common_gen_ScrambleInputStructure500.json",
),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.dev.jsonl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(
dl_dir["data"], "commongen.test_noref.jsonl"
),
"split": "test",
},
),
] + [
datasets.SplitGenerator(
name=challenge_split,
gen_kwargs={
"filepath": os.path.join(
dl_dir["challenge_set"], "common_gen", filename
),
"split": challenge_split,
},
)
for challenge_split, filename in challenge_sets
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
if split.startswith("challenge"):
exples = json.load(open(filepath, encoding="utf-8"))
if isinstance(exples, dict):
assert len(exples) == 1, "multiple entries found"
exples = list(exples.values())[0]
for id_, exple in enumerate(exples):
if len(exple) == 0:
continue
exple["gem_parent_id"] = exple["gem_id"]
exple["gem_id"] = f"common_gen-{split}-{id_}"
yield id_, exple
else:
with open(filepath, encoding="utf-8") as f:
id_ = -1
i = -1
for row in f:
row = row.replace(", }", "}") # Fix possible JSON format error
data = json.loads(row)
concepts = [word for word in data["concept_set"].split("#")]
if split == "train":
i += 1
for scene in data["scene"]:
id_ += 1
yield id_, {
"gem_id": f"common_gen-{split}-{id_}",
"gem_parent_id": f"common_gen-{split}-{id_}",
"concept_set_id": i,
"concepts": concepts,
"target": scene,
"references": [],
}
else:
id_ += 1
yield id_, {
"gem_id": f"common_gen-{split}-{id_}",
"gem_parent_id": f"common_gen-{split}-{id_}",
"concept_set_id": id_,
"concepts": concepts,
"target": "" if split == "test" else data["scene"][0],
"references": [] if split == "test" else data["scene"],
}
|