|
import json |
|
import textwrap |
|
|
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = "Fake news detection dataset." |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_FEATURES = datasets.Features({ |
|
"title": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"date_published": datasets.Value("string"), |
|
"content": datasets.Value("string"), |
|
"fake_news": datasets.features.ClassLabel(names=["fake", "real"]) |
|
}) |
|
|
|
class FakeNewsConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for FakeNews""" |
|
|
|
def __init__(self, data_url, citation, url, text_features, **kwargs): |
|
""" |
|
Args: |
|
text_features: `dict[string, string]`, map from the name of the feature |
|
dict for each text field to the name of the column in the tsv file |
|
label_column: |
|
label_classes |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(FakeNewsConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs) |
|
self.text_features = text_features |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
|
|
|
|
class FakeNewsConfig(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
BUILDER_CONFIGS = [FakeNewsConfig( |
|
name=DEFAULT_CONFIG_NAME, |
|
description=_DESCRIPTION, |
|
citation=textwrap.dedent(_CITATION), |
|
text_features=_FEATURES, |
|
data_url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv", |
|
url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=_FEATURES, |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager=None, config=None): |
|
data_dir = dl_manager.download(self.config.data_url) |
|
split_filenames = { |
|
datasets.Split.TRAIN: "train.jsonl", |
|
datasets.Split.VALIDATION: "dev.jsonl", |
|
datasets.Split.TEST: "test.jsonl", |
|
} |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"filepath": dl_manager.iter_archive(data_dir), |
|
"filename": split_filenames[split], |
|
}, |
|
) |
|
for split in split_filenames |
|
] |
|
|
|
def _generate_examples(self, filepath=None, filename=None): |
|
idx = 0 |
|
|
|
for path, file in filepath: |
|
if path.endswith(filename): |
|
lines = (line.decode("utf-8") for line in file) |
|
for line in lines: |
|
idx += 1 |
|
example = json.loads(line) |
|
|
|
yield idx, example |