script
Browse files- SlicedMidiDataset.py +181 -0
SlicedMidiDataset.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
import numpy as np
|
6 |
+
import fortepyan as ff
|
7 |
+
from tqdm import tqdm
|
8 |
+
from datasets import (
|
9 |
+
Split,
|
10 |
+
Dataset,
|
11 |
+
DatasetInfo,
|
12 |
+
BuilderConfig,
|
13 |
+
GeneratorBasedBuilder,
|
14 |
+
load_dataset,
|
15 |
+
concatenate_datasets,
|
16 |
+
)
|
17 |
+
|
18 |
+
_DESC = """
|
19 |
+
Dataset of midi pieces sliced to records of fixed number of notes.
|
20 |
+
"""
|
21 |
+
|
22 |
+
|
23 |
+
class TokenizedMidiDatasetConfig(BuilderConfig):
|
24 |
+
def __init__(
|
25 |
+
self,
|
26 |
+
base_dataset_name: str = "roszcz/maestro-v1-sustain",
|
27 |
+
extra_datasets: list[str] = [],
|
28 |
+
sequence_length: int = 64,
|
29 |
+
sequence_step: int = 42,
|
30 |
+
**kwargs,
|
31 |
+
):
|
32 |
+
super().__init__()
|
33 |
+
# Version history:
|
34 |
+
# 0.0.1: Initial version.
|
35 |
+
super().__init__(version=datasets.Version("0.0.2"), **kwargs)
|
36 |
+
|
37 |
+
self.base_dataset_name: str = base_dataset_name
|
38 |
+
self.extra_datasets: list[str] = extra_datasets
|
39 |
+
self.sequence_length: int = sequence_length
|
40 |
+
self.sequence_step: int = sequence_step
|
41 |
+
|
42 |
+
|
43 |
+
class TokenizedMidiDataset(GeneratorBasedBuilder):
|
44 |
+
def _info(self) -> DatasetInfo:
|
45 |
+
return datasets.DatasetInfo(description=_DESC)
|
46 |
+
|
47 |
+
BUILDER_CONFIG_CLASS = TokenizedMidiDatasetConfig
|
48 |
+
BUILDER_CONFIGS = [
|
49 |
+
TokenizedMidiDatasetConfig(
|
50 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
51 |
+
extra_datasets=["roszcz/giant-midi-sustain-v2"],
|
52 |
+
sequence_length=32,
|
53 |
+
sequence_step=16,
|
54 |
+
name="giant-short",
|
55 |
+
),
|
56 |
+
TokenizedMidiDatasetConfig(
|
57 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
58 |
+
extra_datasets=[],
|
59 |
+
sequence_length=32,
|
60 |
+
sequence_step=16,
|
61 |
+
name="basic-short",
|
62 |
+
),
|
63 |
+
TokenizedMidiDatasetConfig(
|
64 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
65 |
+
extra_datasets=["roszcz/giant-midi-sustain-v2"],
|
66 |
+
sequence_length=64,
|
67 |
+
sequence_step=16,
|
68 |
+
name="giant-mid",
|
69 |
+
),
|
70 |
+
TokenizedMidiDatasetConfig(
|
71 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
72 |
+
extra_datasets=[],
|
73 |
+
sequence_length=64,
|
74 |
+
sequence_step=16,
|
75 |
+
name="basic-mid",
|
76 |
+
),
|
77 |
+
TokenizedMidiDatasetConfig(
|
78 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
79 |
+
extra_datasets=["roszcz/giant-midi-sustain-v2"],
|
80 |
+
sequence_length=128,
|
81 |
+
sequence_step=16,
|
82 |
+
name="giant-long",
|
83 |
+
),
|
84 |
+
TokenizedMidiDatasetConfig(
|
85 |
+
base_dataset_name="roszcz/maestro-sustain-v2",
|
86 |
+
extra_datasets=[],
|
87 |
+
sequence_length=128,
|
88 |
+
sequence_step=16,
|
89 |
+
name="basic-long",
|
90 |
+
),
|
91 |
+
]
|
92 |
+
DEFAULT_CONFIG_NAME = "basic-mid"
|
93 |
+
|
94 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
95 |
+
base = load_dataset(self.config.base_dataset_name)
|
96 |
+
|
97 |
+
other_datasets = [load_dataset(path, split="train") for path in self.config.extra_datasets]
|
98 |
+
other_datasets.append(base["train"])
|
99 |
+
|
100 |
+
dataset = concatenate_datasets(other_datasets)
|
101 |
+
|
102 |
+
# This will enable multiprocessing in load_dataset()
|
103 |
+
n_shards = 12
|
104 |
+
train_shards = [dataset.shard(n_shards, it) for it in range(n_shards)]
|
105 |
+
|
106 |
+
return [
|
107 |
+
datasets.SplitGenerator(name=Split.TRAIN, gen_kwargs={"dataset_shards": train_shards}),
|
108 |
+
datasets.SplitGenerator(name=Split.TEST, gen_kwargs={"dataset_shards": [base["test"]]}),
|
109 |
+
datasets.SplitGenerator(name=Split.VALIDATION, gen_kwargs={"dataset_shards": [base["validation"]]}),
|
110 |
+
]
|
111 |
+
|
112 |
+
def piece_to_records(self, piece: ff.MidiPiece) -> list[dict]:
|
113 |
+
# better practice than setting a global random state
|
114 |
+
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(4)))
|
115 |
+
|
116 |
+
n_samples = 1 + (piece.size - self.config.sequence_length) // self.config.sequence_step
|
117 |
+
# uniform distribution, piece should be covered almost entirely
|
118 |
+
piece_idxs = range(piece.size - self.config.sequence_length)
|
119 |
+
start_points = rs.choice(piece_idxs, size=n_samples, replace=False)
|
120 |
+
|
121 |
+
chopped_sequences = []
|
122 |
+
for start in start_points:
|
123 |
+
start = int(start)
|
124 |
+
finish = start + self.config.sequence_length
|
125 |
+
part = piece[start:finish]
|
126 |
+
|
127 |
+
sequence = {
|
128 |
+
"notes": {
|
129 |
+
"pitch": part.df.pitch.astype("int16").values.T,
|
130 |
+
"start": part.df.start.values,
|
131 |
+
"end": part.df.end.values,
|
132 |
+
"duration": part.df.duration.values,
|
133 |
+
"velocity": part.df.velocity.values,
|
134 |
+
},
|
135 |
+
"source": json.dumps(part.source),
|
136 |
+
}
|
137 |
+
chopped_sequences.append(sequence)
|
138 |
+
|
139 |
+
return chopped_sequences
|
140 |
+
|
141 |
+
def filter_pauses(self, piece: ff.MidiPiece) -> list[ff.MidiPiece]:
|
142 |
+
next_start = piece.df.start.shift(-1)
|
143 |
+
silent_distance = next_start - piece.df.end
|
144 |
+
|
145 |
+
# Seconds
|
146 |
+
distance_threshold = 4
|
147 |
+
|
148 |
+
ids = silent_distance > distance_threshold
|
149 |
+
|
150 |
+
break_idxs = np.where(ids)[0]
|
151 |
+
|
152 |
+
pieces = []
|
153 |
+
|
154 |
+
start = 0
|
155 |
+
for break_idx in break_idxs:
|
156 |
+
finish = break_idx.item() + 1
|
157 |
+
piece_part = piece[start:finish]
|
158 |
+
|
159 |
+
if piece_part.size <= self.config.sequence_length:
|
160 |
+
continue
|
161 |
+
|
162 |
+
pieces.append(piece_part)
|
163 |
+
start = finish
|
164 |
+
|
165 |
+
return pieces
|
166 |
+
|
167 |
+
def _generate_examples(self, dataset_shards: list[Dataset]):
|
168 |
+
# ~10min for giant midi
|
169 |
+
for dataset in dataset_shards:
|
170 |
+
for it, record in tqdm(enumerate(dataset), total=len(dataset)):
|
171 |
+
piece = ff.MidiPiece.from_huggingface(dict(record))
|
172 |
+
|
173 |
+
pieces = self.filter_pauses(piece)
|
174 |
+
chopped_sequences = sum([self.piece_to_records(piece) for piece in pieces], [])
|
175 |
+
|
176 |
+
for jt, sequence in enumerate(chopped_sequences):
|
177 |
+
# NOTE I don't really understand how this works :(
|
178 |
+
# When using jt as the key I started getting
|
179 |
+
# duplicate key errors when doing load_dataset(num_proc=8)
|
180 |
+
key = f"{it}_{jt}"
|
181 |
+
yield key, sequence
|