winobes commited on
Commit
2ac9868
·
1 Parent(s): a4a150e

first version innit

Browse files
Files changed (3) hide show
  1. data_sources.txt +1 -0
  2. open-riksdag.py +171 -0
  3. target_terms.txt +1 -0
data_sources.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ bet ds eun flista fpm frsrdg ip kammakt kom mot ovr prop prot rskr samtr skfr sou tlista utr utsk yttr
open-riksdag.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Config file by Bill Noble, adapted from the Kubhist 2 dataset by Simon Hengchen, https://hengchen.net
2
+
3
+ import os
4
+ import datasets
5
+ import json
6
+ from datasets.data_files import DataFilesDict
7
+ from pathlib import Path
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+
11
+ _DESCRIPTION = """
12
+ This is a dataset of text from the Riksdag, Sweden's legislative body.
13
+
14
+ The original data is availble without a license under the Re-use of Public Administration Documents Act (2010:566) at https://data.riksdagen.se/data/dokument
15
+
16
+ This dataset is derivative of a version compiled by Språkbanken Text (SBX) at the University of Gothenburg (Sweden). That version consists of XML files split by document source (motions, questions, protocol, etc.) and includes additional linguistic annotations. It is available under a CC BY 4.0 license at https://spraakbanken.gu.se/resurser/rd
17
+
18
+ The focus of this huggingface dataset is to organise the data for fine-grained diachronic modeling. To that end, this dataset includes two configurations:
19
+
20
+ # Configurations
21
+
22
+ ## `sentences`
23
+
24
+ This configuration provides sentences in raw text format with their original whitespace. Sentence-level tokenisation was performed by Språkbanken.
25
+
26
+ `datasets.load_dataset('ChangeIsKey/open-riksdag', 'sentences', years=YEARS, sources=SOURCES)`
27
+
28
+ - `YEARS:list(int)` - years in the range [1960, 2022] from which sentences are drawn
29
+ - `SOURCES:list(str)` - the Open Riksdag data is split into different data sources
30
+ - `bet` _Betänkande_ ~ reports
31
+ - `ds`
32
+ - `eun` _EUN_ ~ documents from the EU committee
33
+ - `flista` _Föredragningslistor_ ~ Lists of speeches
34
+ - `fpm` _faktapromemorior_ ~ factual memoranda on EU commission proposals
35
+ - `frsrdg` _Framställning/redogörelse_ ~ petitions and reports from bodies appointed by the Riksdag
36
+ ...
37
+
38
+ data fields
39
+
40
+ - `sentence` -
41
+ - `date` -
42
+ - `source`
43
+ - `document_id`
44
+ ...
45
+
46
+
47
+ ## `targets-103`
48
+
49
+ - `target_lemma`
50
+ - `start`
51
+ - `end`
52
+
53
+ In a nutshell, this version offers:
54
+
55
+ - all sentences including one or more of 103 target words, which were chosen by TF-IDF (described below)
56
+ - per-month subsets (with all document types combined)
57
+ - one line per sentence (sentences shorter than 4 words were discarded)
58
+ - data includes: date, source, document_id, target_word, and text.
59
+
60
+ License is CC BY 4.0 with attribution.
61
+ """
62
+
63
+ _CONFIGS = ['sentences', 'target-103']
64
+ _ALL_YEARS = list(range(1979, 2020))
65
+ with open("data_sources.txt") as f:
66
+ _ALL_SOURCES = f.read().strip().split(' ')
67
+ with open("target_terms.txt") as f:
68
+ _ALL_TARGET_TERMS = f.read().strip().split(' ')
69
+ _TERM_TO_ID = {t: i for i,t in enumerate(_ALL_TARGET_TERMS)}
70
+
71
+ class OpenRiksdagConfig(datasets.BuilderConfig):
72
+ """BuilderConfig for openRD-103."""
73
+
74
+ def __init__(self, name='sentences', years=_ALL_YEARS, sources=_ALL_SOURCES, targets=_ALL_TARGET_TERMS, **kwargs):
75
+ """Constructs an open-riksdag dataset.
76
+ Args:
77
+ year: integer year between 1979 and 2019
78
+ **kwargs: keyword arguments forwarded to super.
79
+ """
80
+
81
+ if not all(year in _ALL_YEARS for year in years):
82
+ raise ValueError("`years` should contain integers between 1979 and 2019")
83
+ self.years = list(set(years))
84
+
85
+ if not all(year in _ALL_YEARS for year in years):
86
+ raise ValueError(f"`sources` should be a subset of {_ALL_SOURCES}")
87
+ self.sources = list(set(sources))
88
+
89
+ try:
90
+ if targets and isinstance(targets[0], str):
91
+ targets = [_TERM_TO_ID[t] for t in targets]
92
+ assert all(t in _TERM_TO_ID.values() for t in targets)
93
+ targets = list(set(targets))
94
+ except (KeyError, AssertionError) as e:
95
+ print(e)
96
+ raise ValueError(f"`targets` should be a subset of {_ALL_TARGET_TERMS} or integer indexes there of")
97
+ self.targets = list(set(targets))
98
+
99
+ super().__init__(
100
+ name = name,
101
+ version = datasets.Version("1.1.0", ""),
102
+ data_dir = kwargs.get('data_dir', "./data") ,
103
+ **kwargs
104
+ )
105
+
106
+ class OpenRiksdag(datasets.GeneratorBasedBuilder):
107
+
108
+ BUILDER_CONFIG_CLASS = OpenRiksdagConfig
109
+
110
+ BUILDER_CONFIGS = [
111
+ OpenRiksdagConfig(
112
+ name='sentences',
113
+ description="Raws sentences from Riksdagens öppnadata",
114
+ ),
115
+ OpenRiksdagConfig(
116
+ name='target-103',
117
+ description="Sentences from Riksdagens öppna data with a selection of 103 target words"
118
+ )
119
+ ]
120
+
121
+ def _info(self):
122
+
123
+ features = {
124
+ "sentence": datasets.Value("string"),
125
+ "doc_type": datasets.Value("string"),
126
+ "doc_id": datasets.Value("string"),
127
+ "date": datasets.Value("timestamp[s]")
128
+ }
129
+
130
+ if self.config.name == 'target-103':
131
+ target_features = {
132
+ "lemma": datasets.Value("string"),
133
+ "start": datasets.Value("int32"),
134
+ "end": datasets.Value("int32"),
135
+ "pos": datasets.Value("string")
136
+ }
137
+ features = {**features, **target_features}
138
+
139
+ return datasets.DatasetInfo(
140
+ features = datasets.Features(features),
141
+ supervised_keys=None,
142
+ homepage="https://github.com/ChangeIsKey",
143
+ )
144
+
145
+ def _split_generators(self, dl_manager):
146
+
147
+ data_dir = Path(self.config.data_dir)/self.config.name
148
+ if self.config.name == 'sentences':
149
+ possible_files = [data_dir/f"{y}_{s}.jsonl.bz2" for y in self.config.years for s in self.config.sources]
150
+ elif self.config.name == 'target-103':
151
+ possible_files = [data_dir/f"{t:03d}/{y}_target{t:03d}_{s}.jsonl.bz2" for y in self.config.years
152
+ for t in self.config.targets for s in self.config.sources]
153
+ existing_files = list(data_dir.glob("*.jsonl.bz2"))
154
+ data_files = [f for f in possible_files if f in existing_files]
155
+ data_files = DataFilesDict({f"train": data_files})
156
+ extracted_paths = dl_manager.download_and_extract(data_files)
157
+ return [datasets.SplitGenerator(
158
+ name=datasets.Split.TRAIN,
159
+ gen_kwargs={"filepaths": extracted_paths['train']}
160
+ )
161
+ ]
162
+
163
+ def _generate_examples(self, filepaths):
164
+ """Yields examples."""
165
+ key = 0
166
+ for filepath in filepaths:
167
+ with open(filepath, encoding='utf-8') as f:
168
+ for line in f:
169
+ item = json.loads(line)
170
+ yield key, item
171
+ key+=1
target_terms.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ % april arbetsförmedling arbetsgivare arbetslöshet arbetsmarknad arbetsmarknadsminister augusti barn betala bil bolag bostad brott december drabba ekonomisk elev februari finansminister flicka flygplats forskning fru företag försvarsmakt försvarsminister försäkringskassa förälder gammal grupp herr hälsa högskola internationell isolering januari jobb juli juni justitieminister kommun kommunal kostnad krona kultur kunskap kvinna lag lagstiftning landsbygd landsting lokal län lärare m maj man mars migrationsminister miljard miljon miljö miljöminister myndighet mänsklig mål nationell ni november näringsminister offentlig oktober organisation ovanstående person polis procent rapport regel region rättighet september sjukvård skatt socialminister stat statlig statsminister statsråd student stöd trafikverk ung ungdom utbildning utbildningsminister utredning utrikesminister verksamhet våld vård återtagen