Datasets:
Size:
10K<n<100K
License:
Update _URLS: add train.tsv and valid.tsv
Browse files- mtg_jamendo.py +10 -9
mtg_jamendo.py
CHANGED
@@ -12,14 +12,9 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
15 |
-
import csv
|
16 |
-
import json
|
17 |
-
import os
|
18 |
from pathlib import Path
|
19 |
|
20 |
import pandas as pd
|
21 |
-
|
22 |
-
import genres
|
23 |
import datasets
|
24 |
|
25 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
@@ -48,10 +43,16 @@ _LICENSE = "Apache License 2.0"
|
|
48 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
49 |
_URLS = {
|
50 |
"train": {
|
51 |
-
|
|
|
|
|
|
|
52 |
},
|
53 |
"val": {
|
54 |
-
|
|
|
|
|
|
|
55 |
}
|
56 |
}
|
57 |
|
@@ -97,10 +98,10 @@ class MtgJamendo(datasets.GeneratorBasedBuilder):
|
|
97 |
return {x["id"]: x for x in xs}
|
98 |
|
99 |
train_tracks = to_dict(
|
100 |
-
pd.read_csv(
|
101 |
)
|
102 |
valid_tracks = to_dict(
|
103 |
-
pd.read_csv(
|
104 |
)
|
105 |
|
106 |
train_splits = [
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
|
|
|
|
|
|
15 |
from pathlib import Path
|
16 |
|
17 |
import pandas as pd
|
|
|
|
|
18 |
import datasets
|
19 |
|
20 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
43 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
44 |
_URLS = {
|
45 |
"train": {
|
46 |
+
"df": "https://huggingface.co/datasets/rkstgr/mtg-jamendo/resolve/main/data/train.tsv",
|
47 |
+
**{
|
48 |
+
i: f"https://huggingface.co/datasets/rkstgr/mtg-jamendo/resolve/main/data/train/{i}.tar" for i in range(200)
|
49 |
+
}
|
50 |
},
|
51 |
"val": {
|
52 |
+
"df": "https://huggingface.co/datasets/rkstgr/mtg-jamendo/resolve/main/data/valid.tsv",
|
53 |
+
**{
|
54 |
+
i: f"https://huggingface.co/datasets/rkstgr/mtg-jamendo/resolve/main/data/val/{i}.tar" for i in range(22)
|
55 |
+
}
|
56 |
}
|
57 |
}
|
58 |
|
|
|
98 |
return {x["id"]: x for x in xs}
|
99 |
|
100 |
train_tracks = to_dict(
|
101 |
+
pd.read_csv(local_extracted_archive["train"]["df"], sep="\t").to_dict("records")
|
102 |
)
|
103 |
valid_tracks = to_dict(
|
104 |
+
pd.read_csv(local_extracted_archive["val"]["df"], sep="\t").to_dict("records")
|
105 |
)
|
106 |
|
107 |
train_splits = [
|