File size: 3,103 Bytes
3745598
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import json
import textwrap

import datasets

logger = datasets.logging.get_logger(__name__)

_DESCRIPTION = "Fake news detection dataset."

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""

_FEATURES = datasets.Features({
                "title": datasets.Value("string"),
                "url": datasets.Value("string"),
                "date_published": datasets.Value("string"),
                "content": datasets.Value("string"),
                "fake_news":  datasets.features.ClassLabel(names=["fake", "real"])
            })

class FakeNewsConfig(datasets.BuilderConfig):
    """BuilderConfig for FakeNews"""

    def __init__(self, data_url, citation, url, text_features, **kwargs):
        """
        Args:
            text_features: `dict[string, string]`, map from the name of the feature
        dict for each text field to the name of the column in the tsv file
            label_column:
            label_classes
            **kwargs: keyword arguments forwarded to super.
        """
        super(FakeNewsConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
        self.text_features = text_features
        self.data_url = data_url
        self.citation = citation
        self.url = url


class FakeNewsConfig(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("0.1.0")

    DEFAULT_CONFIG_NAME = "default"

    BUILDER_CONFIGS = [FakeNewsConfig(
            name=DEFAULT_CONFIG_NAME,
            description=_DESCRIPTION,
            citation=textwrap.dedent(_CITATION),
            text_features=_FEATURES,
            data_url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv",
            url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv",
        )
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=_FEATURES,
            supervised_keys=None,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager=None, config=None):
        data_dir = dl_manager.download(self.config.data_url)
        split_filenames = {
            datasets.Split.TRAIN: "train.jsonl",
            datasets.Split.VALIDATION: "dev.jsonl",
            datasets.Split.TEST: "test.jsonl",
        }
        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={
                    "filepath": dl_manager.iter_archive(data_dir),
                    "filename": split_filenames[split],
                },
            )
            for split in split_filenames
        ]

    def _generate_examples(self, filepath=None, filename=None):
        idx = 0

        for path, file in filepath:
            if path.endswith(filename):
                lines = (line.decode("utf-8") for line in file)
                for line in lines:
                    idx += 1
                    example = json.loads(line)

                    yield idx, example