🪿 clean up
Browse files- README.md +24 -7
- emotions_dataset.py +0 -188
- many_emotions.py +6 -1
- requirements.txt +2 -0
README.md
CHANGED
@@ -1,11 +1,28 @@
|
|
1 |
---
|
2 |
-
license:
|
3 |
-
|
4 |
-
- dair-ai/emotion
|
5 |
task_categories:
|
6 |
-
- text-classification
|
7 |
-
|
8 |
-
-
|
|
|
|
|
9 |
size_categories:
|
10 |
-
- 100K<n<1M
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
|
|
1 |
---
|
2 |
+
license:
|
3 |
+
apache-2.0
|
|
|
4 |
task_categories:
|
5 |
+
- text-classification
|
6 |
+
multilinguality:
|
7 |
+
- multilingual
|
8 |
+
source_datasets:
|
9 |
+
- dar-ai/emotion
|
10 |
size_categories:
|
11 |
+
- 100K<n<1M
|
12 |
+
dataset_info:
|
13 |
+
- config_name: all
|
14 |
+
splits:
|
15 |
+
- name: go_emotions
|
16 |
+
- name: emotion
|
17 |
+
- name: daily_dialog
|
18 |
+
- config_name: multilingual
|
19 |
+
splits:
|
20 |
+
- name: en
|
21 |
+
- name: it
|
22 |
+
- name: fr
|
23 |
+
- name: de
|
24 |
+
- name: es
|
25 |
+
config_names:
|
26 |
+
- all
|
27 |
+
- multilingual
|
28 |
---
|
emotions_dataset.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import zipfile
|
4 |
-
from typing import List
|
5 |
-
|
6 |
-
import datasets
|
7 |
-
import pandas as pd
|
8 |
-
from datasets import ClassLabel, Value
|
9 |
-
|
10 |
-
_URLS = {
|
11 |
-
"go_emotions": {
|
12 |
-
"urls": [
|
13 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_1.csv",
|
14 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_2.csv",
|
15 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_3.csv",
|
16 |
-
],
|
17 |
-
"license": "apache license 2.0"
|
18 |
-
},
|
19 |
-
"daily_dialog": {
|
20 |
-
"urls": ["http://yanran.li/files/ijcnlp_dailydialog.zip"],
|
21 |
-
"license": "CC BY-NC-SA 4.0"
|
22 |
-
},
|
23 |
-
"emotion": {
|
24 |
-
"data": ["data/data.jsonl.gz"],
|
25 |
-
"license": "educational/research"
|
26 |
-
}
|
27 |
-
}
|
28 |
-
|
29 |
-
_CLASS_NAMES = [
|
30 |
-
"no emotion",
|
31 |
-
"happiness",
|
32 |
-
"admiration",
|
33 |
-
"amusement",
|
34 |
-
"anger",
|
35 |
-
"annoyance",
|
36 |
-
"approval",
|
37 |
-
"caring",
|
38 |
-
"confusion",
|
39 |
-
"curiosity",
|
40 |
-
"desire",
|
41 |
-
"disappointment",
|
42 |
-
"disapproval",
|
43 |
-
"disgust",
|
44 |
-
"embarrassment",
|
45 |
-
"excitement",
|
46 |
-
"fear",
|
47 |
-
"gratitude",
|
48 |
-
"grief",
|
49 |
-
"joy",
|
50 |
-
"love",
|
51 |
-
"nervousness",
|
52 |
-
"optimism",
|
53 |
-
"pride",
|
54 |
-
"realization",
|
55 |
-
"relief",
|
56 |
-
"remorse",
|
57 |
-
"sadness",
|
58 |
-
"surprise",
|
59 |
-
"neutral",
|
60 |
-
]
|
61 |
-
|
62 |
-
|
63 |
-
class EmotionsDatasetConfig(datasets.BuilderConfig):
|
64 |
-
|
65 |
-
def __init__(self, features, label_classes, **kwargs):
|
66 |
-
super().__init__(**kwargs)
|
67 |
-
self.features = features
|
68 |
-
self.label_classes = label_classes
|
69 |
-
|
70 |
-
|
71 |
-
class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
72 |
-
BUILDER_CONFIGS = [
|
73 |
-
EmotionsDatasetConfig(
|
74 |
-
name="all",
|
75 |
-
label_classes=_CLASS_NAMES,
|
76 |
-
features=["text", "label", "dataset", "license"]
|
77 |
-
),
|
78 |
-
EmotionsDatasetConfig(
|
79 |
-
name="go_emotions",
|
80 |
-
label_classes=_CLASS_NAMES,
|
81 |
-
features=["text", "label", "dataset", "license"]
|
82 |
-
),
|
83 |
-
EmotionsDatasetConfig(
|
84 |
-
name="daily_dialog",
|
85 |
-
label_classes=_CLASS_NAMES,
|
86 |
-
features=["text", "label", "dataset", "license"]
|
87 |
-
),
|
88 |
-
EmotionsDatasetConfig(
|
89 |
-
name="emotion",
|
90 |
-
label_classes=_CLASS_NAMES,
|
91 |
-
features=["text", "label", "dataset", "license"]
|
92 |
-
)
|
93 |
-
]
|
94 |
-
|
95 |
-
DEFAULT_CONFIG_NAME = "all"
|
96 |
-
|
97 |
-
def _info(self):
|
98 |
-
return datasets.DatasetInfo(
|
99 |
-
features=datasets.Features(
|
100 |
-
{
|
101 |
-
"id": datasets.Value("string"),
|
102 |
-
'text': Value(dtype='string', id=None),
|
103 |
-
'label': ClassLabel(names=_CLASS_NAMES, id=None),
|
104 |
-
'dataset': Value(dtype='string', id=None),
|
105 |
-
'license': Value(dtype='string', id=None)
|
106 |
-
}
|
107 |
-
)
|
108 |
-
)
|
109 |
-
|
110 |
-
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
111 |
-
splits = []
|
112 |
-
if self.config.name == "all":
|
113 |
-
for k, v in _URLS.items():
|
114 |
-
downloaded_files = dl_manager.download_and_extract(v.get("urls", v.get("data")))
|
115 |
-
splits.append(datasets.SplitGenerator(name=k,
|
116 |
-
gen_kwargs={"filepaths": downloaded_files,
|
117 |
-
"dataset": k,
|
118 |
-
"license": v.get("license")}))
|
119 |
-
else:
|
120 |
-
k = self.config.name
|
121 |
-
v = _URLS.get(k)
|
122 |
-
downloaded_files = dl_manager.download_and_extract(v.get("urls", v.get("data")))
|
123 |
-
splits.append(datasets.SplitGenerator(name=k,
|
124 |
-
gen_kwargs={"filepaths": downloaded_files,
|
125 |
-
"dataset": k,
|
126 |
-
"license": v.get("license")}))
|
127 |
-
return splits
|
128 |
-
|
129 |
-
def process_daily_dialog(self, filepaths, dataset):
|
130 |
-
# TODO move outside
|
131 |
-
emo_mapping = {0: "no emotion", 1: "anger", 2: "disgust",
|
132 |
-
3: "fear", 4: "happiness", 5: "sadness", 6: "surprise"}
|
133 |
-
for i, filepath in enumerate(filepaths):
|
134 |
-
if os.path.isdir(filepath):
|
135 |
-
emotions = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_emotion.txt"), "r").read()
|
136 |
-
text = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_text.txt"), "r").read()
|
137 |
-
else:
|
138 |
-
# TODO check if this can be removed
|
139 |
-
archive = zipfile.ZipFile(filepath, 'r')
|
140 |
-
emotions = archive.open("ijcnlp_dailydialog/dialogues_emotion.txt", "r").read().decode()
|
141 |
-
text = archive.open("ijcnlp_dailydialog/dialogues_text.txt", "r").read().decode()
|
142 |
-
emotions = emotions.split("\n")
|
143 |
-
text = text.split("\n")
|
144 |
-
|
145 |
-
for idx_out, (e, t) in enumerate(zip(emotions, text)):
|
146 |
-
if len(t.strip()) > 0:
|
147 |
-
cast_emotions = [int(j) for j in e.strip().split(" ")]
|
148 |
-
cast_dialog = [d.strip() for d in t.split("__eou__") if len(d)]
|
149 |
-
for idx_in, (ce, ct) in enumerate(zip(cast_emotions, cast_dialog)):
|
150 |
-
uid = f"daily_dialog_{i}_{idx_out}_{idx_in}"
|
151 |
-
yield uid, {"text": ct,
|
152 |
-
"id": uid,
|
153 |
-
"dataset": dataset,
|
154 |
-
"license": license,
|
155 |
-
"label": emo_mapping[ce]}
|
156 |
-
|
157 |
-
def _generate_examples(self, filepaths, dataset, license):
|
158 |
-
if dataset == "go_emotions":
|
159 |
-
for i, filepath in enumerate(filepaths):
|
160 |
-
df = pd.read_csv(filepath)
|
161 |
-
current_classes = list(set(df.columns).intersection(set(_CLASS_NAMES)))
|
162 |
-
df = df[["text"] + current_classes]
|
163 |
-
df = df[df[current_classes].sum(axis=1) == 1].reset_index(drop=True)
|
164 |
-
for row_idx, row in df.iterrows():
|
165 |
-
uid = f"go_emotions_{i}_{row_idx}"
|
166 |
-
yield uid, {"text": row["text"],
|
167 |
-
"id": uid,
|
168 |
-
"dataset": dataset,
|
169 |
-
"license": license,
|
170 |
-
"label": row[current_classes][row == 1].index.item()}
|
171 |
-
elif dataset == "daily_dialog":
|
172 |
-
for d in self.process_daily_dialog(filepaths, dataset):
|
173 |
-
yield d
|
174 |
-
elif dataset == "emotion":
|
175 |
-
emo_mapping = {0: "sadness", 1: "joy", 2: "love",
|
176 |
-
3: "anger", 4: "fear", 5: "surprise"}
|
177 |
-
for i, filepath in enumerate(filepaths):
|
178 |
-
with open(filepath, encoding="utf-8") as f:
|
179 |
-
for idx, line in enumerate(f):
|
180 |
-
uid = f"{dataset}_{idx}"
|
181 |
-
example = json.loads(line)
|
182 |
-
example.update({
|
183 |
-
"id": uid,
|
184 |
-
"dataset": dataset,
|
185 |
-
"license": license,
|
186 |
-
"label": emo_mapping[example["label"]]
|
187 |
-
})
|
188 |
-
yield uid, example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
many_emotions.py
CHANGED
@@ -6,7 +6,7 @@ from typing import List
|
|
6 |
|
7 |
import datasets
|
8 |
import pandas as pd
|
9 |
-
from datasets import ClassLabel, Value
|
10 |
|
11 |
_URLS = {
|
12 |
"go_emotions": {
|
@@ -207,3 +207,8 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
207 |
"label": _CLASS_NAMES[example["label"]]
|
208 |
})
|
209 |
yield example["id"], example
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
import datasets
|
8 |
import pandas as pd
|
9 |
+
from datasets import ClassLabel, Value, load_dataset
|
10 |
|
11 |
_URLS = {
|
12 |
"go_emotions": {
|
|
|
207 |
"label": _CLASS_NAMES[example["label"]]
|
208 |
})
|
209 |
yield example["id"], example
|
210 |
+
|
211 |
+
|
212 |
+
if __name__ == "__main__":
|
213 |
+
dataset = load_dataset("ma2za/many_emotions", name="all", split="emotion")
|
214 |
+
print()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
pandas
|