Upload 2 files
Browse files- data/data.jsonl.gz +3 -0
- emotions_dataset.py +48 -32
data/data.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8944e6b35cb42294769ac30cf17bd006231545b2eeecfa59324246e192564d1f
|
3 |
+
size 15388281
|
emotions_dataset.py
CHANGED
@@ -1,10 +1,11 @@
|
|
|
|
1 |
import os
|
2 |
import zipfile
|
3 |
from typing import List
|
4 |
|
5 |
import datasets
|
6 |
import pandas as pd
|
7 |
-
from datasets import ClassLabel, Value
|
8 |
|
9 |
_URLS = {
|
10 |
"go_emotions": {
|
@@ -18,6 +19,10 @@ _URLS = {
|
|
18 |
"daily_dialog": {
|
19 |
"urls": ["http://yanran.li/files/ijcnlp_dailydialog.zip"],
|
20 |
"license": "CC BY-NC-SA 4.0"
|
|
|
|
|
|
|
|
|
21 |
}
|
22 |
}
|
23 |
|
@@ -79,6 +84,11 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
79 |
name="daily_dialog",
|
80 |
label_classes=_CLASS_NAMES,
|
81 |
features=["text", "label", "dataset", "license"]
|
|
|
|
|
|
|
|
|
|
|
82 |
)
|
83 |
]
|
84 |
|
@@ -101,7 +111,7 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
101 |
splits = []
|
102 |
if self.config.name == "all":
|
103 |
for k, v in _URLS.items():
|
104 |
-
downloaded_files = dl_manager.download_and_extract(v.get("urls"))
|
105 |
splits.append(datasets.SplitGenerator(name=k,
|
106 |
gen_kwargs={"filepaths": downloaded_files,
|
107 |
"dataset": k,
|
@@ -109,13 +119,41 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
109 |
else:
|
110 |
k = self.config.name
|
111 |
v = _URLS.get(k)
|
112 |
-
downloaded_files = dl_manager.download_and_extract(v.get("urls"))
|
113 |
splits.append(datasets.SplitGenerator(name=k,
|
114 |
gen_kwargs={"filepaths": downloaded_files,
|
115 |
"dataset": k,
|
116 |
"license": v.get("license")}))
|
117 |
return splits
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
def _generate_examples(self, filepaths, dataset, license):
|
120 |
if dataset == "go_emotions":
|
121 |
for i, filepath in enumerate(filepaths):
|
@@ -131,33 +169,11 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
131 |
"license": license,
|
132 |
"label": row[current_classes][row == 1].index.item()}
|
133 |
elif dataset == "daily_dialog":
|
134 |
-
|
135 |
-
|
136 |
-
3: "fear", 4: "happiness", 5: "sadness", 6: "surprise"}
|
137 |
for i, filepath in enumerate(filepaths):
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
archive = zipfile.ZipFile(filepath, 'r')
|
144 |
-
emotions = archive.open("ijcnlp_dailydialog/dialogues_emotion.txt", "r").read().decode()
|
145 |
-
text = archive.open("ijcnlp_dailydialog/dialogues_text.txt", "r").read().decode()
|
146 |
-
emotions = emotions.split("\n")
|
147 |
-
text = text.split("\n")
|
148 |
-
|
149 |
-
for idx_out, (e, t) in enumerate(zip(emotions, text)):
|
150 |
-
if len(t.strip()) > 0:
|
151 |
-
cast_emotions = [int(j) for j in e.strip().split(" ")]
|
152 |
-
cast_dialog = [d.strip() for d in t.split("__eou__") if len(d)]
|
153 |
-
for idx_in, (ce, ct) in enumerate(zip(cast_emotions, cast_dialog)):
|
154 |
-
uid = f"daily_dialog_{i}_{idx_out}_{idx_in}"
|
155 |
-
yield uid, {"text": ct,
|
156 |
-
"id": uid,
|
157 |
-
"dataset": dataset,
|
158 |
-
"license": license,
|
159 |
-
"label": emo_mapping[ce]}
|
160 |
-
|
161 |
-
|
162 |
-
temp = load_dataset("ma2za/emotions_dataset", name="daily_dialog")
|
163 |
-
print()
|
|
|
1 |
+
import json
|
2 |
import os
|
3 |
import zipfile
|
4 |
from typing import List
|
5 |
|
6 |
import datasets
|
7 |
import pandas as pd
|
8 |
+
from datasets import ClassLabel, Value
|
9 |
|
10 |
_URLS = {
|
11 |
"go_emotions": {
|
|
|
19 |
"daily_dialog": {
|
20 |
"urls": ["http://yanran.li/files/ijcnlp_dailydialog.zip"],
|
21 |
"license": "CC BY-NC-SA 4.0"
|
22 |
+
},
|
23 |
+
"emotion": {
|
24 |
+
"data": ["data/data.jsonl.gz"],
|
25 |
+
"license": "educational/research"
|
26 |
}
|
27 |
}
|
28 |
|
|
|
84 |
name="daily_dialog",
|
85 |
label_classes=_CLASS_NAMES,
|
86 |
features=["text", "label", "dataset", "license"]
|
87 |
+
),
|
88 |
+
EmotionsDatasetConfig(
|
89 |
+
name="emotion",
|
90 |
+
label_classes=_CLASS_NAMES,
|
91 |
+
features=["text", "label", "dataset", "license"]
|
92 |
)
|
93 |
]
|
94 |
|
|
|
111 |
splits = []
|
112 |
if self.config.name == "all":
|
113 |
for k, v in _URLS.items():
|
114 |
+
downloaded_files = dl_manager.download_and_extract(v.get("urls", v.get("data")))
|
115 |
splits.append(datasets.SplitGenerator(name=k,
|
116 |
gen_kwargs={"filepaths": downloaded_files,
|
117 |
"dataset": k,
|
|
|
119 |
else:
|
120 |
k = self.config.name
|
121 |
v = _URLS.get(k)
|
122 |
+
downloaded_files = dl_manager.download_and_extract(v.get("urls", v.get("data")))
|
123 |
splits.append(datasets.SplitGenerator(name=k,
|
124 |
gen_kwargs={"filepaths": downloaded_files,
|
125 |
"dataset": k,
|
126 |
"license": v.get("license")}))
|
127 |
return splits
|
128 |
|
129 |
+
def process_daily_dialog(self, filepaths, dataset):
|
130 |
+
# TODO move outside
|
131 |
+
emo_mapping = {0: "no emotion", 1: "anger", 2: "disgust",
|
132 |
+
3: "fear", 4: "happiness", 5: "sadness", 6: "surprise"}
|
133 |
+
for i, filepath in enumerate(filepaths):
|
134 |
+
if os.path.isdir(filepath):
|
135 |
+
emotions = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_emotion.txt"), "r").read()
|
136 |
+
text = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_text.txt"), "r").read()
|
137 |
+
else:
|
138 |
+
# TODO check if this can be removed
|
139 |
+
archive = zipfile.ZipFile(filepath, 'r')
|
140 |
+
emotions = archive.open("ijcnlp_dailydialog/dialogues_emotion.txt", "r").read().decode()
|
141 |
+
text = archive.open("ijcnlp_dailydialog/dialogues_text.txt", "r").read().decode()
|
142 |
+
emotions = emotions.split("\n")
|
143 |
+
text = text.split("\n")
|
144 |
+
|
145 |
+
for idx_out, (e, t) in enumerate(zip(emotions, text)):
|
146 |
+
if len(t.strip()) > 0:
|
147 |
+
cast_emotions = [int(j) for j in e.strip().split(" ")]
|
148 |
+
cast_dialog = [d.strip() for d in t.split("__eou__") if len(d)]
|
149 |
+
for idx_in, (ce, ct) in enumerate(zip(cast_emotions, cast_dialog)):
|
150 |
+
uid = f"daily_dialog_{i}_{idx_out}_{idx_in}"
|
151 |
+
yield uid, {"text": ct,
|
152 |
+
"id": uid,
|
153 |
+
"dataset": dataset,
|
154 |
+
"license": license,
|
155 |
+
"label": emo_mapping[ce]}
|
156 |
+
|
157 |
def _generate_examples(self, filepaths, dataset, license):
|
158 |
if dataset == "go_emotions":
|
159 |
for i, filepath in enumerate(filepaths):
|
|
|
169 |
"license": license,
|
170 |
"label": row[current_classes][row == 1].index.item()}
|
171 |
elif dataset == "daily_dialog":
|
172 |
+
yield self.process_daily_dialog(filepaths, dataset)
|
173 |
+
elif dataset == "emotion":
|
|
|
174 |
for i, filepath in enumerate(filepaths):
|
175 |
+
with open(filepath, encoding="utf-8") as f:
|
176 |
+
for idx, line in enumerate(f):
|
177 |
+
uid = f"{dataset}_{idx}"
|
178 |
+
example = json.loads(line)
|
179 |
+
yield uid, example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|