Richhiey commited on
Commit
b3b3061
·
verified ·
1 Parent(s): 1bfe7ad

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config/__pycache__/config.cpython-310.pyc +0 -0
  2. config/__pycache__/vocabulary.cpython-310.pyc +0 -0
  3. config/config.py +272 -0
  4. config/data_presets.py +811 -0
  5. config/vocabulary.py +384 -0
  6. content/model_output/test.mid +0 -0
  7. extras/.DS_Store +0 -0
  8. extras/Dockerfile +18 -0
  9. extras/demo_cross_augmentation.py +69 -0
  10. extras/download_mirst500.py +50 -0
  11. extras/examples/.DS_Store +0 -0
  12. extras/examples/1733.mid +0 -0
  13. extras/examples/2106.mid +0 -0
  14. extras/examples/803_002_167s95.mid +0 -0
  15. extras/examples/piano_converted.mid +0 -0
  16. extras/inspecting_slakh_bass.py +34 -0
  17. extras/rotary_positional_embedding.py +191 -0
  18. extras/run_spleeter_mirst500_cmedia.sh +13 -0
  19. extras/swap_channel.py +122 -0
  20. extras/t5_dev.py +41 -0
  21. extras/t5perceiver.py +443 -0
  22. extras/unimax_sampler/README.md +45 -0
  23. extras/unimax_sampler/demo.py +15 -0
  24. extras/unimax_sampler/unimax_sampler.py +168 -0
  25. model/__pycache__/conv_block.cpython-310.pyc +0 -0
  26. model/__pycache__/ff_layer.cpython-310.pyc +0 -0
  27. model/__pycache__/init_train.cpython-310.pyc +0 -0
  28. model/__pycache__/lm_head.cpython-310.pyc +0 -0
  29. model/__pycache__/lr_scheduler.cpython-310.pyc +0 -0
  30. model/__pycache__/ops.cpython-310.pyc +0 -0
  31. model/__pycache__/optimizers.cpython-310.pyc +0 -0
  32. model/__pycache__/projection_layer.cpython-310.pyc +0 -0
  33. model/__pycache__/spectrogram.cpython-310.pyc +0 -0
  34. model/__pycache__/ymt3.cpython-310.pyc +0 -0
  35. model/conformer_helper.py +169 -0
  36. model/conformer_mod.py +439 -0
  37. model/ff_layer.py +238 -0
  38. model/init_train.py +281 -0
  39. model/lm_head.py +40 -0
  40. model/ops.py +111 -0
  41. model/perceiver_helper.py +290 -0
  42. model/perceiver_mod.py +912 -0
  43. model/projection_layer.py +331 -0
  44. model/ymt3.py +967 -0
  45. tests/model/spectrogram_test.py +29 -0
  46. utils/README.md +22 -0
  47. utils/__pycache__/event2note.cpython-310.pyc +0 -0
  48. utils/__pycache__/midi.cpython-310.pyc +0 -0
  49. utils/__pycache__/note_event_dataclasses.cpython-310.pyc +0 -0
  50. utils/audio.py +309 -0
config/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
config/__pycache__/vocabulary.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
config/config.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """config.py"""
2
+ import numpy as np
3
+ # yapf: disable
4
+ """
5
+ audio_cfg:
6
+ - Used by 'ymt3' to create a spectrogram layer.
7
+ - Input shape of model is determined by audio_cfg.
8
+ - 'train.py' arguments can override these defaults.
9
+ """
10
+ audio_cfg = {
11
+ # Overwrittable by args in train.py
12
+ "codec": "melspec", # {melspec, spec} melspec for MT3, spec for PerceiverTF
13
+ "hop_length": 128, # {128, 300} 128 for MT3, 300 for PerceiverTF
14
+ # Shared audio parameters
15
+ "audio_backend": "torchaudio", # {torchaudio, nnAudio}
16
+ "sample_rate": 16000,
17
+ "input_frames": 32767, # number of input frames (~=2.048 s), determining in-/output shape of front layers.
18
+ "n_fft": 2048,
19
+ "n_mels": 512, # only for melspec
20
+ "f_min": 50.0,
21
+ "f_max": 8000.0,
22
+ } # TODO: currently dataloader is not updated by "input_frames"
23
+
24
+ """
25
+ model_cfg:
26
+ - Encoder type dictates use of T5_CFG or PERCEIVER_TF_CFG.
27
+ - 'train.py' arguments can override these defaults.
28
+ """
29
+ model_cfg = {
30
+ "encoder_type": "t5", # {"t5", "perceiver-tf", "conformer"}
31
+ "decoder_type": "t5", # {"t5", "multi-t5"}
32
+ "pre_encoder_type": "default", # {None, "default", "conv", "conv1d", "conv2d_avpt"} by default, t5:None, perceiver:conv.
33
+ "pre_encoder_type_default": {"t5": None, "perceiver-tf": "conv", "conformer": None},
34
+ "pre_decoder_type": "default", # {None, 'linear', 'conv1', 'mlp', 'group_linear'} see model/projection_layer.py
35
+ "pre_decoder_type_default": { # [enc_type][dec_type]
36
+ "t5": {"t5": None,},
37
+ "perceiver-tf": {"t5": "linear", "multi-t5": "mc_shared_linear"},
38
+ "conformer": {"t5": None,},
39
+ },
40
+ "conv_out_channels": 128, # number of filters for 'conv' pre_encoder. Otherwise ignored.
41
+ "t5_basename": "google/t5-v1_1-small",
42
+ "pretrained": False, # bool, if True, load pretrained weights from t5_basename. Mismatched layers are ignored.
43
+ "use_task_conditional_encoder": True, # True by default, but default task is None. So not activated by default.
44
+ "use_task_conditional_decoder": True, # True by default, but default task is None. So not activated by default.
45
+ "d_feat": "auto", # Input audio feature dimension for encoder. Automatically inferred by audio_cfg and existence of pre_encoders.
46
+ "tie_word_embeddings": True, # If True, weights of embed_tokens and lm_head are tied for stabilizing gradients.
47
+ "vocab_size": "auto", # int or "auto", automatically inferred by task manager.
48
+ "num_max_positions": "auto", # int or "auto". Length of positional encoding. Automatically inferred by "feat_length", "event_length" and task_manager.max_task_token_length.
49
+ # 'vocab_size', 'tie_word_embeddings' and 'num_max_positions' are auto-copied to encoder and decoder configs in the below.
50
+ "encoder": {
51
+ "t5": {
52
+ "d_model": 512, # Hidden size of T5 encoder.
53
+ "num_heads": 6,
54
+ "num_layers": 8,
55
+ "dropout_rate": 0.05,
56
+ "position_encoding_type": "sinusoidal", # {'sinusoidal', 'trainable'}.
57
+ "ff_widening_factor": 2, # wideening factor for MLP/MoE layers. Default is 2 in T5.
58
+ "ff_layer_type": "t5_gmlp", # {'t5_gmlp', 'moe', 'mlp', 'gmlp'}. 'moe' for mixture of experts, 'mlp' for standard transformer dense layer, 'gmlp' for simple gated MLP.
59
+ },
60
+ "perceiver-tf": {
61
+ "num_latents": 24, # number of latents in Perceiver. 24 in perceiver-tf paper.
62
+ "d_latent": 128, # latent dimension of Perceiver. 128 in perceiver-tf paper.
63
+ "d_model": "q", # int or "q" or "kv". Inner-dim of sca and local/temporal self-att.
64
+ # "q" follows "latent_dim". "kv" follows "d_feat". Best practice is to inc-/decrease 'd_latent', instead of 'd_model'.
65
+ "num_blocks": 3, # number of Perceiver-TF blocks in encoder. L in the paper.
66
+ "num_local_transformers_per_block": 2, # N in the paper.
67
+ "num_temporal_transformers_per_block": 2, # M in the paper.
68
+ "sca_use_query_residual": False,
69
+ "dropout_rate": 0.1,
70
+ "position_encoding_type": "trainable", # {'trainable', 'rotary', 'alibi', 'alibit', None, 'tkd','td', 'tk', 'kdt'}. alibit is alibi with trainable slopes.
71
+ "attention_to_channel": True, # Whether to use channel attention in sca.
72
+ "layer_norm_type": "layer_norm", # {'layer_norm', 'rms_norm'}
73
+ "ff_layer_type": "mlp", # {'moe', 'mlp', gmlp}. 'moe' for mixture of experts, 'mlp' for standard transformer dense layer, 'gmlp' for simple gated MLP.
74
+ "ff_widening_factor": 1, # wideening factor for MLP/MoE layers. Default is 1.
75
+ "moe_num_experts": 4, # number of experts in MoE layer. Default is 4. Disabled if ff_layer_type is not 'moe'.
76
+ "moe_topk": 2, # top-k routing in MoE layer. Default is 2. Disabled if ff_layer_type is not 'moe'.
77
+ "hidden_act": 'gelu', # activation function in MLP/MoE layer. Default is 'gelu'. {'gelu', 'silu', 'relu'}
78
+ "rotary_type_sca": "pixel", # {'l'|'lang', 'p'|'pixel'}. Default is 'pixel'.
79
+ "rotary_type_latent": "pixel", # {'l'|'lang', 'p'|'pixel'}. Default is 'pixel'.
80
+ "rotary_type_temporal": "lang", # {'l'|'lang', 'p'|'pixel'}. Default is 'lang'.
81
+ "rotary_apply_to_keys": False, # Whether to apply rotary to keys. Default is False.
82
+ "rotary_partial_pe": False, # Whether to use partial positional encoding. Default is False.
83
+ },
84
+ "conformer": {
85
+ "d_model": 512, # Hidden size of T5 encoder.
86
+ "intermediate_size": 512, # or 2048. size of the intermediate feed forward layer in each T5Block
87
+ "num_heads": 8,
88
+ "num_layers": 8,
89
+ "dropout_rate": 0.1,
90
+ "layerdrop": 0.1, # see https://arxiv.org/abs/1909.11556
91
+ "position_encoding_type": "rotary", # {'rotary', 'relative'}.
92
+ "conv_dim": (512, 512, 512, 512, 512, 512, 512),
93
+ "conv_stride": (5, 2, 2, 2, 2, 2, 2),
94
+ "conv_kernel": (10, 3, 3, 3, 3, 3, 3),
95
+ "conv_depthwise_kernel_size": 31,
96
+ },
97
+
98
+ },
99
+ "decoder": {
100
+ "t5": {
101
+ "d_model": 512, # Hidden size of T5 encoder. If encoder has lower dim, it is projected to this dim for enc-dec cross att.
102
+ "num_heads": 6,
103
+ "num_layers": 8,
104
+ "dropout_rate": 0.05,
105
+ "position_encoding_type": "sinusoidal", # {'sinusoidal', 'trainable'}.
106
+ "ff_widening_factor": 2, # wideening factor for MLP/MoE layers. Default is 2 in T5.
107
+ "ff_layer_type": "t5_gmlp", # {'t5_gmlp', 'moe', 'mlp', 'gmlp'}. 'moe' for mixture of experts, 'mlp' for standard transformer dense layer, 'gmlp' for simple gated MLP.
108
+ },
109
+ "multi-t5": {
110
+ "d_model": 512, # Hidden size of T5 encoder. Recommended: {256 or 512}
111
+ "num_heads": 6,
112
+ "num_layers": 8,
113
+ "dropout_rate": 0.05,
114
+ "position_encoding_type": "sinusoidal", # {'sinusoidal', 'trainable'}.
115
+ "ff_widening_factor": 2, # wideening factor for MLP/MoE layers. Default is 2 in T5.
116
+ "ff_layer_type": "t5_gmlp", # {'t5_gmlp', 'moe', 'mlp', 'gmlp'}. 'moe' for mixture of experts, 'mlp' for standard transformer dense layer, 'gmlp' for simple gated MLP.
117
+ "num_channels": 13,
118
+ },
119
+ },
120
+ "feat_length": "auto", # Input audio feature length for encoder. Automatically inferred by audio_cfg.
121
+ # mt3: 256 time steps
122
+ "event_length": 1024, # max length of event tokens excluding task tokens <-- 128 for multi-t5
123
+ "init_factor": 1.0, # initialization factor for embedding layers
124
+ }
125
+
126
+ # yapf: enable
127
+ shared_cfg = {
128
+ "PATH": {
129
+ "data_home": "../../data", # path to the data directory. If using relative path, it is relative to /src directory.
130
+ },
131
+ "BSZ": { # global batch size is local_bsz * n_GPUs in DDP mode
132
+ "train_sub": 12, #20, # sub-batch size is per CPU worker
133
+ "train_local": 24, #40, # local batch size is per GPU in DDP mode
134
+ "validation": 64, # validation batch size is per GPU in DDP mode
135
+ "test": 64,
136
+ },
137
+ "AUGMENTATION": {
138
+ "train_random_amp_range": [0.8, 1.1], # min and max amplitude scaling factor
139
+ "train_stem_iaug_prob": 0.7, # probability of stem activation in intra-stem augmentation
140
+ "train_stem_xaug_policy": {
141
+ "max_k": 3,
142
+ "tau": 0.3,
143
+ "alpha": 1.0,
144
+ "max_subunit_stems": 12, # the number of subunit stems to be reduced to this number of stems
145
+ "p_include_singing": None, # NOT IMPLEMENTED; probability of including singing for cross augmented examples. if None, use base probaility.
146
+ "no_instr_overlap": True,
147
+ "no_drum_overlap": True,
148
+ "uhat_intra_stem_augment": True,
149
+ },
150
+ "train_pitch_shift_range": [-2, 2], # [min, max] in semitones. None or [0, 0] for no pitch shift.
151
+ },
152
+ "DATAIO": { # do not set `shuffle` here.
153
+ "num_workers": 4, # num_worker is per GPU in DDP mode
154
+ "prefetch_factor": 2, #2,
155
+ "pin_memory": True,
156
+ "persistent_workers": False,
157
+ },
158
+ "CHECKPOINT": {
159
+ "save_top_k": 4, # max top k checkpoints to save
160
+ "monitor": 'validation/macro_onset_f',
161
+ "mode": 'max',
162
+ # "every_n_epochs": 20, # only working when check_val_every_n_epoch is 0
163
+ "save_last": True, # save last model
164
+ "filename": "{epoch}-{step}",
165
+ },
166
+ "TRAINER": { # do not coverwrite args in this section
167
+ "limit_train_batches": 1.0, # How much of training dataset to check (float = fraction, int = num_batches)
168
+ "limit_val_batches": 1.0,
169
+ "limit_test_batches": 1.0,
170
+ "gradient_clip_val": 1.0, # {0 or None} means don't clip.
171
+ "accumulate_grad_batches": 1, #1, # Accumulates grads every k batches. If set to 1, no effect.
172
+ "check_val_every_n_epoch": 1, #5, 1 for very large dataset such as EGMD
173
+ "num_sanity_val_steps": 0,
174
+ },
175
+ "WANDB": {
176
+ "save_dir": "../logs",
177
+ "cache_dir": "../logs/.wandb_cache",
178
+ "resume": "allow",
179
+ "anonymous": "allow", # {never, allow, must}
180
+ "mode": "online", # {online, offline, disabled}
181
+ },
182
+ "LR_SCHEDULE": {
183
+ # "scheduler_type": "cosine", # {legacy, cosine, constant}
184
+ "warmup_steps": 1000, # only for cosine scheduler, legacy scheduler follows T5's legacy schedule
185
+ "total_steps": 100000, # argparser of train.py can overwrite this
186
+ "final_cosine": 1e-5, # only for cosine scheduler
187
+ },
188
+ "TOKENIZER": {
189
+ "max_shift_steps": "auto", # max number of shift steps in the model. (int) or "auto". If "auto", it is set by audio_cfg["input_frames"] and shift_steps_ms. 206 with default setup.
190
+ "shift_step_ms": 10, # shift step in ms
191
+ },
192
+ }
193
+
194
+ T5_BASE_CFG = {
195
+ "google/t5-v1_1-small": {
196
+ "architectures": ["T5ForConditionalGeneration"],
197
+ "d_ff":
198
+ 1024, # size of the intermediate feed forward layer in each T5Block. Can be overwrten by ff_widening_factor in model_cfg.
199
+ "d_kv": 64, # d_kv has to be equal to d_model // num_heads.
200
+ # "d_model": 512, # encoder hiddnen size, defined by model_cfg
201
+ "decoder_start_token_id": 0,
202
+ "dense_act_fn": "gelu_new",
203
+ # "dropout_rate": 0.05, # can be overwritten by args in ymt3
204
+ "eos_token_id": 1,
205
+ "feed_forward_proj": "gated-gelu",
206
+ "initializer_factor": 1.0,
207
+ "is_encoder_decoder": True,
208
+ "is_gated_act": True,
209
+ "layer_norm_epsilon": 1e-06,
210
+ "model_type": "t5",
211
+ # "num_decoder_layers": 8, # defined by model_cfg
212
+ # "num_heads": 6, # defined by model_cfg
213
+ # "num_layers": 8, # defined by model_cfg
214
+ "output_past": True,
215
+ "pad_token_id": 0,
216
+ "relative_attention_num_buckets": 32,
217
+ # "tie_word_embeddings": True,
218
+ "use_cache": True,
219
+ # "vocab_size": 1391 # vocab_size is automatically set by the task manager...
220
+ },
221
+ "google/t5-efficient-small": {
222
+ "architectures": ["T5ForConditionalGeneration"],
223
+ "d_ff": 2048,
224
+ "d_kv": 64,
225
+ "d_model": 512,
226
+ "decoder_start_token_id": 0,
227
+ "dropout_rate": 0.1,
228
+ "eos_token_id": 1,
229
+ "feed_forward_proj": "relu",
230
+ "initializer_factor": 1.0,
231
+ "is_encoder_decoder": True,
232
+ "layer_norm_epsilon": 1e-06,
233
+ "model_type": "t5",
234
+ "num_decoder_layers": 6,
235
+ "num_heads": 8,
236
+ "num_layers": 6,
237
+ "pad_token_id": 0,
238
+ "relative_attention_num_buckets": 32,
239
+ "torch_dtype": "float32",
240
+ "transformers_version": "4.17.0.dev0",
241
+ "use_cache": True,
242
+ },
243
+ }
244
+
245
+ # yapf: enable
246
+ DEEPSPEED_CFG = {
247
+ "zero_allow_untested_optimizer": True,
248
+ "optimizer": {
249
+ "type": "adam",
250
+ "params": {
251
+ "lr": 1e-4,
252
+ "betas": [0.998, 0.999],
253
+ "eps": 1e-3,
254
+ "weight_decay": 0.001,
255
+ "adam_w_mode": True,
256
+ }
257
+ },
258
+ "scheduler": {
259
+ "type": "WarmupLR",
260
+ "params": {
261
+ "last_batch_iteration": -1,
262
+ "warmup_min_lr": 0,
263
+ "warmup_max_lr": 3e-5,
264
+ "warmup_num_steps": 100,
265
+ },
266
+ },
267
+ "zero_optimization": {
268
+ "stage": 0, #0,1,2,3
269
+ # "offload_optimizer":
270
+ # False, # Enable Offloading optimizer state/calculation to the host CPU
271
+ },
272
+ }
config/data_presets.py ADDED
@@ -0,0 +1,811 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ data.py:
2
+ Data presets for training and evaluation.
3
+
4
+ Single Presets:
5
+ musicnet_mt3
6
+ musicnet_em
7
+ musicnet_thickstun
8
+ slakh
9
+ guitarset
10
+ ...
11
+
12
+ Multi Presets:
13
+ all_mmegs
14
+ ...
15
+
16
+ """
17
+ from config.vocabulary import *
18
+ from config.vocabulary import drum_vocab_presets, program_vocab_presets
19
+ from utils.utils import deduplicate_splits, merge_splits, merge_vocab
20
+
21
+ data_preset_single_cfg = {
22
+ "musicnet_mt3": {
23
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
24
+ "dataset_name": "musicnet",
25
+ "train_split": "train_mt3",
26
+ "validation_split": "validation_mt3_acoustic",
27
+ "test_split": "test_mt3_acoustic",
28
+ "has_stem": False,
29
+ },
30
+ "musicnet_mt3_synth_only": { # sanity-check
31
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
32
+ "dataset_name": "musicnet",
33
+ "train_split": "train_mt3_synth",
34
+ "validation_split": "validation_mt3_synth",
35
+ "test_split": "test_mt3_acoustic",
36
+ "has_stem": False,
37
+ },
38
+ "musicnet_mt3_em": {
39
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
40
+ "dataset_name": "musicnet",
41
+ "train_split": "train_mt3_em",
42
+ "validation_split": "validation_mt3_em",
43
+ "test_split": "test_mt3_em",
44
+ "has_stem": False,
45
+ },
46
+ "musicnet_thickstun": { # exp4
47
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
48
+ "dataset_name": "musicnet",
49
+ "train_split": "train_thickstun",
50
+ "validation_split": "test_thickstun",
51
+ "test_split": "test_thickstun",
52
+ "has_stem": False,
53
+ },
54
+ "musicnet_thickstun_em": { # NOTE: this is not the use of external 'synth' in the paper, but the use of 'synth' within the dataset
55
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
56
+ "dataset_name": "musicnet",
57
+ "train_split": "train_thickstun_em",
58
+ "validation_split": "test_thickstun_em",
59
+ "test_split": "test_thickstun_em",
60
+ "has_stem": False,
61
+ },
62
+ "musicnet_thickstun_ext": { # exp4
63
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
64
+ "dataset_name": "musicnet",
65
+ "train_split": "train_thickstun",
66
+ "validation_split": "test_thickstun_ext",
67
+ "test_split": "test_thickstun_ext",
68
+ "has_stem": False,
69
+ },
70
+ "musicnet_thickstun_ext_em": { # NOTE: this is not the use of external 'synth' in the paper, but the use of 'synth' within the dataset
71
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
72
+ "dataset_name": "musicnet",
73
+ "train_split": "train_thickstun_em",
74
+ "validation_split": "test_thickstun_ext_em",
75
+ "test_split": "test_thickstun_ext_em",
76
+ "has_stem": False,
77
+ },
78
+ "maps_default": {
79
+ "eval_vocab": [PIANO_SOLO_CLASS],
80
+ "dataset_name": "maps",
81
+ "train_split": "train",
82
+ "validation_split": "test",
83
+ "test_split": "test",
84
+ "has_stem": False,
85
+ },
86
+ "maps_all": {
87
+ "eval_vocab": [None],
88
+ "dataset_name": "maps",
89
+ "train_split": "all",
90
+ "validation_split": None,
91
+ "test_split": None,
92
+ "has_stem": False,
93
+ },
94
+ "maestro": {
95
+ "eval_vocab": [PIANO_SOLO_CLASS],
96
+ "dataset_name": "maestro",
97
+ "train_split": "train",
98
+ "validation_split": "validation",
99
+ "test_split": "test",
100
+ "has_stem": False,
101
+ },
102
+ "maestro_final": {
103
+ "eval_vocab": [PIANO_SOLO_CLASS],
104
+ "dataset_name": "maestro",
105
+ "train_split": merge_splits(["train", "validation"], dataset_name="maestro"),
106
+ "validation_split": "test",
107
+ "test_split": "test",
108
+ "has_stem": False,
109
+ },
110
+ "guitarset": { # 4 random players for train, 1 for valid, and 1 for test
111
+ "eval_vocab": [GUITAR_SOLO_CLASS],
112
+ "dataset_name": "guitarset",
113
+ "train_split": "train",
114
+ "validation_split": "validation",
115
+ "test_split": "test",
116
+ "has_stem": False,
117
+ },
118
+ "guitarset_pshift": { # guitarset + pitch shift
119
+ "eval_vocab": [GUITAR_SOLO_CLASS],
120
+ "dataset_name": "guitarset",
121
+ "train_split": "train_pshift",
122
+ "validation_split": "validation",
123
+ "test_split": "test",
124
+ "has_stem": False,
125
+ },
126
+ "guitarset_progression": { # progression 1 and 2 as train, progression 3 as test
127
+ "eval_vocab": [GUITAR_SOLO_CLASS],
128
+ "dataset_name": "guitarset",
129
+ "train_split": merge_splits(["progression_1", "progression_2"], dataset_name="guitarset"),
130
+ "validation_split": "progression_3",
131
+ "test_split": "progression_3",
132
+ "has_stem": False,
133
+ },
134
+ "guitarset_progression_pshift": { # guuitarset_progression + pitch shift
135
+ "eval_vocab": [GUITAR_SOLO_CLASS],
136
+ "dataset_name": "guitarset",
137
+ "train_split": merge_splits(["progression_1_pshift", "progression_2_pshift"], dataset_name="guitarset"),
138
+ "validation_split": "progression_3",
139
+ "test_split": "progression_3",
140
+ "has_stem": False,
141
+ },
142
+ "guitarset_minus_bn": { # guuitarset_style + pitch shift
143
+ "eval_vocab": [GUITAR_SOLO_CLASS],
144
+ "dataset_name": "guitarset",
145
+ "train_split": merge_splits(["Funk_pshift", "SS_pshift", "Jazz_pshift", "Rock_pshift"],
146
+ dataset_name="guitarset"),
147
+ "validation_split": "BN",
148
+ "test_split": "BN",
149
+ "has_stem": False,
150
+ },
151
+ "guitarset_minus_funk": { # guuitarset_style + pitch shift
152
+ "eval_vocab": [GUITAR_SOLO_CLASS],
153
+ "dataset_name": "guitarset",
154
+ "train_split": merge_splits(["BN_pshift", "SS_pshift", "Jazz_pshift", "Rock_pshift"],
155
+ dataset_name="guitarset"),
156
+ "validation_split": "Funk",
157
+ "test_split": "Funk",
158
+ "has_stem": False,
159
+ },
160
+ "guitarset_minus_ss": { # guuitarset_style + pitch shift
161
+ "eval_vocab": GUITAR_SOLO_CLASS,
162
+ "dataset_name": "guitarset",
163
+ "train_split": merge_splits(["BN_pshift", "Funk_pshift", "Jazz_pshift", "Rock_pshift"],
164
+ dataset_name="guitarset"),
165
+ "validation_split": "SS",
166
+ "test_split": "SS",
167
+ "has_stem": False,
168
+ },
169
+ "guitarset_minus_jazz": { # guuitarset_style + pitch shift
170
+ "eval_vocab": [GUITAR_SOLO_CLASS],
171
+ "dataset_name": "guitarset",
172
+ "train_split": merge_splits(["BN_pshift", "Funk_pshift", "SS_pshift", "Rock_pshift"],
173
+ dataset_name="guitarset"),
174
+ "validation_split": "Jazz",
175
+ "test_split": "Jazz",
176
+ "has_stem": False,
177
+ },
178
+ "guitarset_minus_rock": { # guuitarset_style + pitch shift
179
+ "eval_vocab": [GUITAR_SOLO_CLASS],
180
+ "dataset_name": "guitarset",
181
+ "train_split": merge_splits(["BN_pshift", "Funk_pshift", "SS_pshift", "Jazz_pshift"],
182
+ dataset_name="guitarset"),
183
+ "validation_split": "Rock",
184
+ "test_split": "Rock",
185
+ "has_stem": False,
186
+ },
187
+ "guitarset_all": {
188
+ "eval_vocab": [None],
189
+ "dataset_name": "guitarset",
190
+ "train_split": "all",
191
+ "validation_split": None,
192
+ "test_split": None,
193
+ "has_stem": False,
194
+ },
195
+ "enstdrums_dtp": {
196
+ "eval_vocab": [None],
197
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
198
+ "dataset_name": "enstdrums",
199
+ "train_split": merge_splits(["drummer_1_dtp", "drummer_2_dtp", "drummer_1_dtp", "drummer_2_dtp"], dataset_name="enstdrums"),
200
+ "validation_split": "drummer_1_dtp", # for sanity check
201
+ "test_split": "drummer_3_dtp",
202
+ "has_stem": False,
203
+ },
204
+ "enstdrums_dtm": {
205
+ "eval_vocab": [None],
206
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
207
+ "dataset_name": "enstdrums",
208
+ "train_split": merge_splits(["drummer_1_dtm", "drummer_2_dtm", "drummer_1_dtp", "drummer_2_dtp"], dataset_name="enstdrums"),
209
+ "validation_split": "drummer_3_dtm_r2", # 0.6 * drum
210
+ "test_split": "drummer_3_dtm_r1", # 0.75 * drum
211
+ "has_stem": True,
212
+ },
213
+ "enstdrums_random_dtm": { # single dataset training as a denoising ADT model
214
+ "eval_vocab": [None],
215
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
216
+ "dataset_name": "enstdrums",
217
+ "train_split": "train_dtm",
218
+ "validation_split": "validation_dtm",
219
+ "test_split": "test_dtm",
220
+ "has_stem": True,
221
+ },
222
+ "enstdrums_random": { # multi dataset training with random split of 70:15:15
223
+ "eval_vocab": [None],
224
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
225
+ "dataset_name": "enstdrums",
226
+ "train_split": "train_dtp",
227
+ "validation_split": "test_dtm",
228
+ "test_split": "test_dtm",
229
+ "has_stem": True,
230
+ },
231
+ "enstdrums_random_plus_dtd": { # multi dataset training plus dtd
232
+ "eval_vocab": [None],
233
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
234
+ "dataset_name": "enstdrums",
235
+ "train_split": merge_splits(["train_dtp", "all_dtd"], dataset_name="enstdrums"),
236
+ "validation_split": "test_dtm",
237
+ "test_split": "test_dtm",
238
+ "has_stem": True,
239
+ },
240
+ "mir_st500": {
241
+ "eval_vocab": [SINGING_SOLO_CLASS],
242
+ "dataset_name": "mir_st500",
243
+ "train_split": "train_stem",
244
+ "validation_split": "test",
245
+ "test_split": "test",
246
+ "has_stem": True,
247
+ },
248
+ "mir_st500_voc": {
249
+ "eval_vocab": [SINGING_SOLO_CLASS],
250
+ "dataset_name": "mir_st500",
251
+ "train_split": "train_vocal",
252
+ "validation_split": "test_vocal",
253
+ "test_split": "test_vocal",
254
+ "has_stem": False,
255
+ },
256
+ "mir_st500_voc_debug": { # using train_vocal for test (for debugging)
257
+ "eval_vocab": [SINGING_SOLO_CLASS],
258
+ "dataset_name": "mir_st500",
259
+ "train_split": "train_vocal",
260
+ "validation_split": "test_vocal",
261
+ "test_split": "train_vocal",
262
+ "has_stem": False,
263
+ },
264
+ "slakh": {
265
+ "eval_vocab": [GM_INSTR_CLASS],
266
+ "eval_drum_vocab": drum_vocab_presets["gm"],
267
+ "dataset_name": "slakh",
268
+ "train_split": "train",
269
+ "validation_split": "validation",
270
+ "test_split": "test",
271
+ "has_stem": True,
272
+ },
273
+ "slakh_final": {
274
+ "eval_vocab": [GM_INSTR_CLASS],
275
+ "eval_drum_vocab": drum_vocab_presets["gm"],
276
+ "dataset_name": "slakh",
277
+ "train_split": merge_splits(["train", "validation"], dataset_name="slakh"),
278
+ "validation_split": "test",
279
+ "test_split": "test",
280
+ "has_stem": True,
281
+ },
282
+ "rwc_pop_bass": {
283
+ "eval_vocab": [BASS_SOLO_CLASS],
284
+ "add_pitch_class_metric": ["Bass"],
285
+ "dataset_name": "rwc_pop",
286
+ "train_split": None,
287
+ "validation_split": "bass",
288
+ "test_split": "bass",
289
+ "has_stem": False,
290
+ },
291
+ "rwc_pop_full": {
292
+ "eval_vocab": [GM_INSTR_CLASS_PLUS],
293
+ "add_pitch_class_metric": list(GM_INSTR_CLASS_PLUS.keys()),
294
+ "dataset_name": "rwc_pop",
295
+ "train_split": None,
296
+ "validation_split": "full",
297
+ "test_split": "full",
298
+ "has_stem": False,
299
+ },
300
+ "egmd": {
301
+ "eval_vocab": [None],
302
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
303
+ "dataset_name": "egmd",
304
+ "train_split": "train",
305
+ "validation_split": "validation",
306
+ "test_split": "test_reduced", # EGMD has 5000+ test files, so we reudce it to 200 files to save time
307
+ # "train_limit_num_files": 4402, #8804, # 17608, # limit the number of files for training to random choice of half.
308
+ "has_stem": False,
309
+ },
310
+ "urmp": {
311
+ "eval_vocab": [GM_INSTR_CLASS],
312
+ "dataset_name": "urmp",
313
+ "train_split": "train",
314
+ "validation_split": "test",
315
+ "test_split": "test",
316
+ "has_stem": True,
317
+ },
318
+ "cmedia": {
319
+ "eval_vocab": [SINGING_SOLO_CLASS],
320
+ "dataset_name": "cmedia",
321
+ "train_split": "train_stem",
322
+ "validation_split": "train",
323
+ "test_split": "train",
324
+ "has_stem": True,
325
+ },
326
+ "cmedia_voc": {
327
+ "eval_vocab": [SINGING_SOLO_CLASS],
328
+ "dataset_name": "cmedia",
329
+ "train_split": "train_vocal",
330
+ "validation_split": "train_vocal",
331
+ "test_split": "train_vocal",
332
+ "has_stem": False,
333
+ },
334
+ "idmt_smt_bass": {
335
+ "eval_vocab": [BASS_SOLO_CLASS],
336
+ "dataset_name": "idmt_smt_bass",
337
+ "train_split": "train",
338
+ "validation_split": "validation",
339
+ "test_split": "validation",
340
+ "has_stem": False,
341
+ },
342
+ "geerdes": { # full mix dataset for evaluation
343
+ "eval_vocab": [GM_INSTR_CLASS_PLUS],
344
+ "dataset_name": "geerdes",
345
+ "train_split": None,
346
+ "validation_split": None,
347
+ "test_split": "all",
348
+ "has_stem": False,
349
+ },
350
+ "geerdes_sep": { # Using vocal/accomp separation for evalutation
351
+ "eval_vocab": [GM_INSTR_CLASS_PLUS],
352
+ "dataset_name": "geerdes",
353
+ "train_split": None,
354
+ "validation_split": None,
355
+ "test_split": "all_sep",
356
+ "has_stem": False,
357
+ },
358
+ "geerdes_half": { # Using half dataset for train/val
359
+ "eval_vocab": [GM_INSTR_CLASS_PLUS],
360
+ "dataset_name": "geerdes",
361
+ "train_split": "train",
362
+ "validation_split": "validation",
363
+ "test_split": "validation",
364
+ "has_stem": False,
365
+ },
366
+ "geerdes_half_sep": { # Using half dataset with vocal/accomp separation for train/val
367
+ "eval_vocab": [GM_INSTR_CLASS_PLUS],
368
+ "dataset_name": "geerdes",
369
+ "train_split": "train_sep",
370
+ "validation_split": "validation_sep",
371
+ "test_split": "validation_sep",
372
+ "has_stem": False,
373
+ },
374
+ }
375
+
376
+ data_preset_multi_cfg = {
377
+ "musicnet_mt3_em_synth_plus_maps": {
378
+ "presets": ["musicnet_mt3_em_synth", "maps_all"],
379
+ "weights": [0.6, 0.4],
380
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
381
+ },
382
+ "musicnet_em_synth_table2_plus_maps": {
383
+ "presets": ["musicnet_em_synth_table2", "maps_all"],
384
+ "weights": [0.6, 0.4],
385
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
386
+ },
387
+ "musicnet_em_synth_table2_plus_maps_multi": {
388
+ "presets": ["musicnet_em_synth_table2", "maps_default"],
389
+ "weights": [0.6, 0.4],
390
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
391
+ },
392
+ "guitarset_progression_plus_maps": {
393
+ "presets": ["guitarset_progression", "maps_all"],
394
+ "weights": [0.5, 0.5],
395
+ "eval_vocab": [GUITAR_SOLO_CLASS],
396
+ },
397
+ "guitarset_pshift_plus_maps": {
398
+ "presets": ["guitarset_pshift", "maps_default"],
399
+ "weights": [0.6, 0.4],
400
+ "eval_vocab": [merge_vocab([GUITAR_SOLO_CLASS, PIANO_SOLO_CLASS])],
401
+ },
402
+ "guitarset_pshift_plus_musicnet_thick": {
403
+ "presets": ["guitarset_pshift", "musicnet_thickstun_em"],
404
+ "weights": [0.5, 0.5],
405
+ "eval_vocab": [merge_vocab([GUITAR_SOLO_CLASS, PIANO_SOLO_CLASS])],
406
+ },
407
+ "multi_sanity_check": {
408
+ "presets": ["musicnet_mt3_synth_only", "musicnet_mt3_synth_only"],
409
+ "weights": [0.6, 0.4],
410
+ "eval_vocab": [MUSICNET_INSTR_CLASS],
411
+ },
412
+ "all_mmegs": {
413
+ "presets": [
414
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp", "guitarset_pshift"
415
+ ],
416
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
417
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
418
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
419
+ "val_max_num_files": 20, # max 20 files per dataset
420
+ "test_max_num_files": None,
421
+ },
422
+ "all_gt_cv0": {
423
+ "presets": [
424
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp", "guitarset_minus_bn"
425
+ ],
426
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
427
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
428
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
429
+ "val_max_num_files": 20, # max 20 files per dataset
430
+ "test_max_num_files": None,
431
+ },
432
+ "all_gt_cv1": {
433
+ "presets": [
434
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
435
+ "guitarset_minus_funk"
436
+ ],
437
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
438
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
439
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
440
+ "val_max_num_files": 20, # max 20 files per dataset
441
+ "test_max_num_files": None,
442
+ },
443
+ "all_gt_cv2": {
444
+ "presets": [
445
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp", "guitarset_minus_ss"
446
+ ],
447
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
448
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
449
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
450
+ "val_max_num_files": 20, # max 20 files per dataset
451
+ "test_max_num_files": None,
452
+ },
453
+ "all_gt_cv3": {
454
+ "presets": [
455
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
456
+ "guitarset_minus_rock"
457
+ ],
458
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
459
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
460
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
461
+ "val_max_num_files": 20, # max 20 files per dataset
462
+ "test_max_num_files": None,
463
+ },
464
+ "all_gt_cv4": {
465
+ "presets": [
466
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
467
+ "guitarset_minus_jazz"
468
+ ],
469
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
470
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
471
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
472
+ "val_max_num_files": 20, # max 20 files per dataset
473
+ "test_max_num_files": None,
474
+ },
475
+ "all_enstdrums_random": {
476
+ "presets": [
477
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_random", "guitarset"
478
+ ],
479
+ "weights": [0.2, 0.2, 0.2, 0.2, 0.2],
480
+ "eval_vocab": [None] * 5, # None means instrument-agnostic F1 for each dataset
481
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
482
+ "val_max_num_files": 20, # max 20 files per dataset
483
+ "test_max_num_files": None,
484
+ },
485
+ "all_plus_egmd": {
486
+ "presets": [
487
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_random_plus_dtd",
488
+ "guitarset", "egmd"
489
+ ],
490
+ "weights": [0.2, 0.2, 0.2, 0.1, 0.1, 0.2],
491
+ "eval_vocab": [None] * 6, # None means instrument-agnostic F1 for each dataset
492
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
493
+ "val_max_num_files": 20, # max 20 files per dataset
494
+ "test_max_num_files": None,
495
+ },
496
+ "all_dtp_egmd": {
497
+ "presets": [
498
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp", "guitarset", "egmd"
499
+ ],
500
+ "weights": [0.2, 0.2, 0.2, 0.1, 0.1, 0.2],
501
+ "eval_vocab": [None] * 6, # None means instrument-agnostic F1 for each dataset
502
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
503
+ "val_max_num_files": 20, # max 20 files per dataset
504
+ "test_max_num_files": None,
505
+ },
506
+ "all_weighted_slakh": {
507
+ "presets": [
508
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp", "guitarset_pshift", "egmd"
509
+ ],
510
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.2],
511
+ "eval_vocab": [None] * 6, # None means instrument-agnostic F1 for each dataset
512
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
513
+ "val_max_num_files": 20, # max 20 files per dataset
514
+ "test_max_num_files": None,
515
+ },
516
+ "all_weighted_mt3": { # for comparison with MT3
517
+ "presets": [
518
+ "slakh", "musicnet_mt3", "mir_st500_voc", "enstdrums_dtp",
519
+ "guitarset_progression_pshift", "egmd"
520
+ ],
521
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.2],
522
+ "eval_vocab": [None] * 6, # None means instrument-agnostic F1 for each dataset
523
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
524
+ "val_max_num_files": 20, # max 20 files per dataset
525
+ "test_max_num_files": None,
526
+ },
527
+ "all_weighted_mt3_em": { # musicnet_mt3_em
528
+ "presets": [
529
+ "slakh", "musicnet_mt3_em", "mir_st500_voc", "enstdrums_dtp",
530
+ "guitarset_progression_pshift", "egmd"
531
+ ],
532
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.2],
533
+ "eval_vocab": [None] * 6, # None means instrument-agnoßstic F1 for each dataset
534
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
535
+ "val_max_num_files": 20, # max 20 files per dataset
536
+ "test_max_num_files": None,
537
+ },
538
+ "all_urmp": {
539
+ "presets": [
540
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
541
+ "guitarset_pshift", "egmd", "urmp"
542
+ ],
543
+ "weights": [0.5, 0.2, 0.1, 0.05, 0.05, 0.05, 0.1],
544
+ "eval_vocab": [None] * 7, # None means instrument-agnostic F1 for each dataset
545
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
546
+ "val_max_num_files": 20, # max 20 files per dataset
547
+ "test_max_num_files": None,
548
+ },
549
+ "all_urmp_mt3": { # for comparison with MT3 including URMP
550
+ "presets": [
551
+ "slakh", "musicnet_mt3", "mir_st500_voc", "enstdrums_dtp",
552
+ "guitarset_progression", "egmd", "urmp"
553
+ ],
554
+ "weights": [0.5, 0.2, 0.1, 0.05, 0.05, 0.0125, 0.1],
555
+ "eval_vocab": [None] * 7, # None means instrument-agnostic F1 for each dataset
556
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
557
+ "val_max_num_files": 20, # max 20 files per dataset
558
+ "test_max_num_files": None,
559
+ },
560
+ "all_urmp_mt3_em": { # musicnet_mt3_em including URMP
561
+ "presets": [
562
+ "slakh", "musicnet_mt3_em", "mir_st500_voc", "enstdrums_dtp",
563
+ "guitarset_progression", "egmd", "urmp"
564
+ ],
565
+ "weights": [0.5, 0.2, 0.1, 0.05, 0.05, 0.0125, 0.1],
566
+ "eval_vocab": [None] * 7, # None means instrument-agnostic F1 for each dataset
567
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
568
+ "val_max_num_files": 20, # max 20 files per dataset
569
+ "test_max_num_files": None,
570
+ },
571
+ "all_maestro": { # including Mestro and URMP
572
+ "presets": [
573
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
574
+ "guitarset_pshift", "egmd", "urmp", "maestro"
575
+ ],
576
+ "weights": [0.5, 0.1, 0.125, 0.075, 0.025, 0.01, 0.1, 0.1],
577
+ "eval_vocab": [None] * 8, # None means instrument-agnostic F1 for each dataset
578
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
579
+ "val_max_num_files": 20, # max 20 files per dataset
580
+ "test_max_num_files": None,
581
+ },
582
+ "all_maestro_mt3": { # for comparison with MT3 including URMP
583
+ "presets": [
584
+ "slakh", "musicnet_mt3", "mir_st500_voc", "enstdrums_dtp",
585
+ "guitarset_progression", "egmd", "urmp", "maestro"
586
+ ],
587
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.0125, 0.1, 0.1],
588
+ "eval_vocab": [None] * 8, # None means instrument-agnostic F1 for each dataset
589
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
590
+ "val_max_num_files": 20, # max 20 files per dataset
591
+ "test_max_num_files": None,
592
+ },
593
+ "all_maestro_mt3_em": { # musicnet_mt3_em including URMP
594
+ "presets": [
595
+ "slakh", "musicnet_mt3_em", "mir_st500_voc", "enstdrums_dtp",
596
+ "guitarset_progression", "egmd", "urmp", "maestro"
597
+ ],
598
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.0125, 0.1, 0.1],
599
+ "eval_vocab": [None] * 8, # None means instrument-agnostic F1 for each dataset
600
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
601
+ "val_max_num_files": 20, # max 20 files per dataset
602
+ "test_max_num_files": None,
603
+ },
604
+ "singing_v1": { # slakh + mir_st500 without spleeter
605
+ "presets": ["slakh", "mir_st500"],
606
+ "weights": [0.8, 0.2],
607
+ "eval_vocab": [None, SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
608
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
609
+ "val_max_num_files": 20, # max 20 files per dataset
610
+ "test_max_num_files": None,
611
+ },
612
+ "all_singing_v1": { # for singing-only task
613
+ "presets": [
614
+ "slakh", "musicnet_thickstun_em", "mir_st500_stem", "enstdrums_dtp",
615
+ "guitarset_pshift", "egmd", "urmp", "maestro"
616
+ ],
617
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.0125, 0.1, 0.1],
618
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
619
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
620
+ "val_max_num_files": 20, # max 20 files per dataset
621
+ "test_max_num_files": None,
622
+ },
623
+ "all_singing_drum_v1": { # for singing-only and drum-only tasks
624
+ "presets": [
625
+ "slakh", "musicnet_thickstun_em", "mir_st500_stem", "enstdrums_dtm",
626
+ "guitarset_pshift", "egmd", "urmp", "maestro"
627
+ ],
628
+ "weights": [0.5, 0.1, 0.1, 0.05, 0.05, 0.0125, 0.1, 0.1],
629
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
630
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
631
+ "val_max_num_files": 20, # max 20 files per dataset
632
+ "test_max_num_files": None,
633
+ },
634
+ "all_cross": { # including Mestro and URMP
635
+ "presets": [
636
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
637
+ "guitarset_pshift", "egmd", "urmp", "maestro"
638
+ ],
639
+ "weights": [0.5, 0.1, 0.125, 0.075, 0.025, 0.01, 0.1, 0.1],
640
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
641
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
642
+ "val_max_num_files": 20, # max 20 files per dataset
643
+ "test_max_num_files": None,
644
+ },
645
+ "all_cross_rebal": { # rebalanced for cross-augment, using spleeter
646
+ "presets": [
647
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
648
+ "guitarset_pshift", "egmd", "urmp", "maestro"
649
+ ],
650
+ "weights": [0.4, 0.15, 0.15, 0.075, 0.025, 0.01, 0.1, 0.1],
651
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
652
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
653
+ "val_max_num_files": 20, # max 20 files per dataset
654
+ "test_max_num_files": None,
655
+ },
656
+ "all_cross_rebal2": { # rebalanced for cross-augment, using spleeter
657
+ "presets": [
658
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
659
+ "guitarset_pshift", "egmd", "urmp", "maestro"
660
+ ],
661
+ "weights": [0.275, 0.19, 0.19, 0.1, 0.025, 0.02, 0.1, 0.1],
662
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
663
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
664
+ "val_max_num_files": 20, # max 20 files per dataset
665
+ "test_max_num_files": None,
666
+ },
667
+ "all_cross_rebal4": { # rebalanced for cross-augment, using spleeter
668
+ "presets": [
669
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
670
+ "guitarset_pshift", "egmd", "urmp", "maestro"
671
+ ],
672
+ "weights": [0.258, 0.19, 0.2, 0.125, 0.022, 0.005, 0.1, 0.1],
673
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
674
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
675
+ "val_max_num_files": 20, # max 20 files per dataset
676
+ "test_max_num_files": None,
677
+ },
678
+ "all_cross_rebal5": { # rebalanced for cross-augment, using spleeter
679
+ "presets": [
680
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
681
+ "guitarset_pshift", "egmd", "urmp", "maestro"
682
+ ],
683
+ "weights": [0.295, 0.19, 0.24, 0.05, 0.02, 0.005, 0.1, 0.1],
684
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
685
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
686
+ "val_max_num_files": 20, # max 20 files per dataset
687
+ "test_max_num_files": None,
688
+ },
689
+ "all_cross_stem": { # accomp stem for sub-task learning + rebalanced for cross-augment
690
+ "presets": [
691
+ "slakh", "musicnet_thickstun_em", "mir_st500_stem", "enstdrums_dtm",
692
+ "guitarset_pshift", "egmd", "urmp", "maestro"
693
+ ],
694
+ "weights": [0.4, 0.15, 0.15, 0.075, 0.025, 0.01, 0.1, 0.1],
695
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
696
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
697
+ "val_max_num_files": 20, # max 20 files per dataset
698
+ "test_max_num_files": None,
699
+ },
700
+ "all_cross_stem_rebal3": { # accomp stem for sub-task learning + rebalanced for cross-augment
701
+ "presets": [
702
+ "slakh", "musicnet_thickstun_em", "mir_st500_stem", "enstdrums_dtm",
703
+ "guitarset_pshift", "egmd", "urmp", "maestro"
704
+ ],
705
+ "weights": [0.265, 0.18, 0.21, 0.1, 0.025, 0.02, 0.1, 0.1],
706
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None], # None means instrument-agnostic F1 for each dataset
707
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
708
+ "val_max_num_files": 20, # max 20 files per dataset
709
+ "test_max_num_files": None,
710
+ },
711
+ "all_cross_v6": { # +cmeida +idmt_smt_bass
712
+ "presets": [
713
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
714
+ "guitarset", "egmd", "urmp", "maestro", "idmt_smt_bass", "cmedia_voc",
715
+ ],
716
+ "weights": [0.295, 0.19, 0.19, 0.05, 0.01, 0.005, 0.1, 0.1, 0.01, 0.05],
717
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None, BASS_SOLO_CLASS, SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
718
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
719
+ "val_max_num_files": 20, # max 20 files per dataset
720
+ "test_max_num_files": None,
721
+ },
722
+ "all_cross_v6_geerdes": { # +geerdes_half
723
+ "presets": [
724
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
725
+ "guitarset", "egmd", "urmp", "maestro", "idmt_smt_bass", "cmedia_voc",
726
+ "geerdes_half", "geerdes_half_sep"
727
+ ],
728
+ "weights": [0.295, 0.19, 0.19, 0.05, 0.01, 0.005, 0.075, 0.075, 0.01, 0.05, 0.025, 0.025],
729
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None, BASS_SOLO_CLASS,
730
+ SINGING_SOLO_CLASS, GM_INSTR_CLASS_PLUS, GM_INSTR_CLASS_PLUS], # None means instrument-agnostic F1 for each dataset
731
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
732
+ "val_max_num_files": 20, # max 20 files per dataset
733
+ "test_max_num_files": None,
734
+ },
735
+ "all_cross_v6_geerdes_rebal": { # +geerdes_half
736
+ "presets": [
737
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
738
+ "guitarset", "egmd", "urmp", "maestro", "idmt_smt_bass", "cmedia_voc",
739
+ "geerdes_half", "geerdes_half_sep"
740
+ ],
741
+ "weights": [0.245, 0.175, 0.19, 0.05, 0.01, 0.005, 0.075, 0.05, 0.01, 0.05, 0.075, 0.075],
742
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None, BASS_SOLO_CLASS,
743
+ SINGING_SOLO_CLASS, GM_INSTR_EXT_CLASS_PLUS, GM_INSTR_EXT_CLASS_PLUS], # None means instrument-agnostic F1 for each dataset
744
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
745
+ "val_max_num_files": 20, # max 20 files per dataset
746
+ "test_max_num_files": None,
747
+ },
748
+ "all_cross_v7": {
749
+ "presets": [
750
+ "slakh", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
751
+ "guitarset_progression_pshift", "egmd", "urmp", "maestro", "idmt_smt_bass", "cmedia_voc",
752
+ ],
753
+ "weights": [0.295, 0.19, 0.191, 0.05, 0.01, 0.004, 0.1, 0.1, 0.01, 0.05],
754
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None, BASS_SOLO_CLASS, SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
755
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
756
+ "val_max_num_files": 20, # max 20 files per dataset
757
+ "test_max_num_files": None,
758
+ },
759
+ "all_cross_final": {
760
+ "presets": [
761
+ "slakh_final", "musicnet_thickstun_em", "mir_st500_voc", "enstdrums_dtp",
762
+ "guitarset_progression_pshift", "egmd", "urmp", "maestro_final", "idmt_smt_bass", "cmedia_voc",
763
+ ],
764
+ "weights": [0.295, 0.19, 0.191, 0.05, 0.01, 0.004, 0.1, 0.1, 0.01, 0.05],
765
+ "eval_vocab": [None, None, SINGING_SOLO_CLASS, None, None, None, None, None, BASS_SOLO_CLASS, SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
766
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
767
+ "val_max_num_files": 20, # max 20 files per dataset
768
+ "test_max_num_files": None,
769
+ },
770
+ "all_eval_final": { # The final evaluation set
771
+ "presets": [
772
+ "slakh", "musicnet_thickstun", "musicnet_thickstun_em", "musicnet_thickstun_ext",
773
+ "musicnet_thickstun_ext_em", "mir_st500_voc", "mir_st500", "enstdrums_dtp",
774
+ "enstdrums_dtm", "guitarset_progression_pshift", "rwc_pop_bass", "maestro", "urmp",
775
+ "maps_default", "rwc_pop_full", # "geerdes", "geerdes_sep",
776
+ ],
777
+ "eval_vocab": [
778
+ GM_INSTR_CLASS, MUSICNET_INSTR_CLASS, MUSICNET_INSTR_CLASS, MUSICNET_INSTR_CLASS,
779
+ MUSICNET_INSTR_CLASS, SINGING_SOLO_CLASS, SINGING_SOLO_CLASS, None,
780
+ None, None, BASS_SOLO_CLASS, PIANO_SOLO_CLASS, GM_INSTR_CLASS,
781
+ PIANO_SOLO_CLASS, GM_INSTR_CLASS_PLUS, # GM_INSTR_CLASS_PLUS, GM_INSTR_CLASS_PLUS
782
+ ],
783
+ "eval_drum_vocab": drum_vocab_presets["ksh"],
784
+ },
785
+ "geerdes_eval": { # Geerdes evaluation sets for models trained without Geerdes.
786
+ "presets": ["geerdes_sep", "geerdes"],
787
+ "eval_vocab": [GM_INSTR_CLASS_PLUS, GM_INSTR_CLASS_PLUS],
788
+ "eval_drum_vocab": drum_vocab_presets["gm"],
789
+ },
790
+ "geerdes_half_eval": { # Geerdes evaluation sets for models trained with Geerdes-half
791
+ "presets": ["geerdes_half_sep", "geerdes_half"],
792
+ "eval_vocab": [GM_INSTR_CLASS_PLUS, GM_INSTR_CLASS_PLUS],
793
+ "eval_drum_vocab": drum_vocab_presets["gm"],
794
+ },
795
+ "minimal": { # slakh + mir_st500 with spleeter
796
+ "presets": ["slakh", "mir_st500_voc"],
797
+ "weights": [0.8, 0.2],
798
+ "eval_vocab": [None, SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
799
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
800
+ "val_max_num_files": 20, # max 20 files per dataset
801
+ "test_max_num_files": None,
802
+ },
803
+ "singing_debug": { # slakh + mir_st500 with spleeter
804
+ "presets": ["mir_st500_voc_debug"],
805
+ "weights": [1.0],
806
+ "eval_vocab": [SINGING_SOLO_CLASS], # None means instrument-agnostic F1 for each dataset
807
+ "eval_drum_vocab": drum_vocab_presets["ksh"], # for drums, kick-snare-hihat metric
808
+ "val_max_num_files": 20, # max 20 files per dataset
809
+ "test_max_num_files": None,
810
+ },
811
+ }
config/vocabulary.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """vocabulary.py
11
+
12
+ Vocabulary for instrument classes. Vocabulary can be used as train_vocab
13
+ or test_vocab in data_presets.py or train.py arguments.
14
+
15
+ - When it is used as train_vocab, it maps the instrument classes to the first
16
+ program number of the class. For example, if you use 'GM_INSTR_CLASS' as
17
+ train_vocab, then the program number of 'Piano' is [0,1,2,3,4,5,6,7]. These
18
+ program numbers are trained as program [0] in the model.
19
+
20
+ - When it is used as eval_vocab, any program number in the instrument class
21
+ is considered as correct.
22
+
23
+
24
+ MUSICNET_INSTR_CLASS: 3 classes used for MusicNet benchmark
25
+ GM_INSTR_CLASS: equivalent to 'MIDI Class' defined by MT3.
26
+ GM_INSTR_CLASS_PLUS: GM_INSTR_CLASS + singing voice
27
+ GM_INSTR_FULL: 128 GM instruments, which is extended from 'MT3_FULL'
28
+ MT3_FULL: this matches the class names in Table 3 of MT3 paper
29
+ ENST_DRUM_NOTES: 20 drum notes used in ENST dataset
30
+ GM_DRUM_NOTES: 45 GM drum notes with percussions
31
+
32
+ Program 128 is reserved for 'drum' internally.
33
+ Program 129 is reserved for 'unannotated', internally.
34
+ Program 100 is reserved for 'singing voice (melody)' in GM_INSTR_CLASS_PLUS.
35
+ Program 101 is reserved for 'singing voice (chorus)' in GM_INSTR_CLASS_PLUS.
36
+
37
+
38
+ """
39
+ # yapf: disable
40
+ import numpy as np
41
+
42
+ PIANO_SOLO_CLASS = {
43
+ "Piano": np.arange(0, 8),
44
+ }
45
+
46
+ GUITAR_SOLO_CLASS = {
47
+ "Guitar": np.arange(24, 32),
48
+ }
49
+
50
+ SINGING_SOLO_CLASS = {
51
+ "Singing Voice": [100, 101],
52
+ }
53
+
54
+ SINGING_CHORUS_SEP_CLASS = {
55
+ "Singing Voice": [100],
56
+ "Singing Voice (chorus)": [101],
57
+ }
58
+
59
+ BASS_SOLO_CLASS = {
60
+ "Bass": np.arange(32, 40),
61
+ }
62
+
63
+ MUSICNET_INSTR_CLASS = {
64
+ "Piano": np.arange(0, 8),
65
+ "Strings": np.arange(40, 52), # Solo strings + ensemble strings
66
+ "Winds": np.arange(64, 80), # Reed + Pipe
67
+ }
68
+
69
+ GM_INSTR_CLASS = {
70
+ "Piano": np.arange(0, 8),
71
+ "Chromatic Percussion": np.arange(8, 16),
72
+ "Organ": np.arange(16, 24),
73
+ "Guitar": np.arange(24, 32),
74
+ "Bass": np.arange(32, 40),
75
+ "Strings": np.arange(40, 56), # Strings + Ensemble
76
+ # "Strings": np.arange(40, 48),
77
+ # "Ensemble": np.arange(48, 56),
78
+ "Brass": np.arange(56, 64),
79
+ "Reed": np.arange(64, 72),
80
+ "Pipe": np.arange(72, 80),
81
+ "Synth Lead": np.arange(80, 88),
82
+ "Synth Pad": np.arange(88, 96),
83
+ }
84
+
85
+ GM_INSTR_CLASS_PLUS = GM_INSTR_CLASS.copy()
86
+ GM_INSTR_CLASS_PLUS["Singing Voice"] = [100, 101]
87
+
88
+ GM_INSTR_EXT_CLASS = { # Best for enjoyable MIDI file generation
89
+ "Acoustic Piano": [0, 1, 3, 6, 7],
90
+ "Electric Piano": [2, 4, 5],
91
+ "Chromatic Percussion": np.arange(8, 16),
92
+ "Organ": np.arange(16, 24),
93
+ "Guitar (clean)": np.arange(24, 28),
94
+ "Guitar (distortion)": [30, 28, 29, 31], # np.arange(28, 32),
95
+ "Bass": [33, 32, 34, 35, 36, 37, 38, 39], # np.arange(32, 40),
96
+ "Strings": [48, 40, 41, 42, 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 55], # np.arange(40, 56),
97
+ "Brass": np.arange(56, 64),
98
+ "Reed": np.arange(64, 72),
99
+ "Pipe": np.arange(72, 80),
100
+ "Synth Lead": np.arange(80, 88),
101
+ "Synth Pad": np.arange(88, 96),
102
+ }
103
+ GM_INSTR_EXT_CLASS_PLUS = GM_INSTR_EXT_CLASS.copy()
104
+ GM_INSTR_EXT_CLASS_PLUS["Singing Voice"] = [100]
105
+ GM_INSTR_EXT_CLASS_PLUS["Singing Voice (chorus)"] = [101]
106
+
107
+ GM_INSTR_FULL = {
108
+ "Acoustic Grand Piano": [0],
109
+ "Bright Acoustic Piano": [1],
110
+ "Electric Grand Piano": [2],
111
+ "Honky-tonk Piano": [3],
112
+ "Electric Piano 1": [4],
113
+ "Electric Piano 2": [5],
114
+ "Harpsichord": [6],
115
+ "Clavinet": [7],
116
+ "Celesta": [8],
117
+ "Glockenspiel": [9],
118
+ "Music Box": [10],
119
+ "Vibraphone": [11],
120
+ "Marimba": [12],
121
+ "Xylophone": [13],
122
+ "Tubular Bells": [14],
123
+ "Dulcimer": [15],
124
+ "Drawbar Organ": [16],
125
+ "Percussive Organ": [17],
126
+ "Rock Organ": [18],
127
+ "Church Organ": [19],
128
+ "Reed Organ": [20],
129
+ "Accordion": [21],
130
+ "Harmonica": [22],
131
+ "Tango Accordion": [23],
132
+ "Acoustic Guitar (nylon)": [24],
133
+ "Acoustic Guitar (steel)": [25],
134
+ "Electric Guitar (jazz)": [26],
135
+ "Electric Guitar (clean)": [27],
136
+ "Electric Guitar (muted)": [28],
137
+ "Overdriven Guitar": [29],
138
+ "Distortion Guitar": [30],
139
+ "Guitar Harmonics": [31],
140
+ "Acoustic Bass": [32],
141
+ "Electric Bass (finger)": [33],
142
+ "Electric Bass (pick)": [34],
143
+ "Fretless Bass": [35],
144
+ "Slap Bass 1": [36],
145
+ "Slap Bass 2": [37],
146
+ "Synth Bass 1": [38],
147
+ "Synth Bass 2": [39],
148
+ "Violin": [40],
149
+ "Viola": [41],
150
+ "Cello": [42],
151
+ "Contrabass": [43],
152
+ "Tremolo Strings": [44],
153
+ "Pizzicato Strings": [45],
154
+ "Orchestral Harp": [46],
155
+ "Timpani": [47],
156
+ "String Ensemble 1": [48],
157
+ "String Ensemble 2": [49],
158
+ "Synth Strings 1": [50],
159
+ "Synth Strings 2": [51],
160
+ "Choir Aahs": [52],
161
+ "Voice Oohs": [53],
162
+ "Synth Choir": [54],
163
+ "Orchestra Hit": [55],
164
+ "Trumpet": [56],
165
+ "Trombone": [57],
166
+ "Tuba": [58],
167
+ "Muted Trumpet": [59],
168
+ "French Horn": [60],
169
+ "Brass Section": [61],
170
+ "Synth Brass 1": [62],
171
+ "Synth Brass 2": [63],
172
+ "Soprano Sax": [64],
173
+ "Alto Sax": [65],
174
+ "Tenor Sax": [66],
175
+ "Baritone Sax": [67],
176
+ "Oboe": [68],
177
+ "English Horn": [69],
178
+ "Bassoon": [70],
179
+ "Clarinet": [71],
180
+ "Piccolo": [72],
181
+ "Flute": [73],
182
+ "Recorder": [74],
183
+ "Pan Flute": [75],
184
+ "Bottle Blow": [76],
185
+ "Shakuhachi": [77],
186
+ "Whistle": [78],
187
+ "Ocarina": [79],
188
+ "Lead 1 (square)": [80],
189
+ "Lead 2 (sawtooth)": [81],
190
+ "Lead 3 (calliope)": [82],
191
+ "Lead 4 (chiff)": [83],
192
+ "Lead 5 (charang)": [84],
193
+ "Lead 6 (voice)": [85],
194
+ "Lead 7 (fifths)": [86],
195
+ "Lead 8 (bass + lead)": [87],
196
+ "Pad 1 (new age)": [88],
197
+ "Pad 2 (warm)": [89],
198
+ "Pad 3 (polysynth)": [90],
199
+ "Pad 4 (choir)": [91],
200
+ "Pad 5 (bowed)": [92],
201
+ "Pad 6 (metallic)": [93],
202
+ "Pad 7 (halo)": [94],
203
+ "Pad 8 (sweep)": [95],
204
+ # "FX 1 (rain)": [96],
205
+ # "FX 2 (soundtrack)": [97],
206
+ # "FX 3 (crystal)": [98],
207
+ # "FX 4 (atmosphere)": [99],
208
+ # "FX 5 (brightness)": [100],
209
+ # "FX 6 (goblins)": [101],
210
+ # "FX 7 (echoes)": [102],
211
+ # "FX 8 (sci-fi)": [103],
212
+ # "Sitar": [104],
213
+ # "Banjo": [105],
214
+ # "Shamisen": [106],
215
+ # "Koto": [107],
216
+ # "Kalimba": [108],
217
+ # "Bagpipe": [109],
218
+ # "Fiddle": [110],
219
+ # "Shanai": [111],
220
+ # "Tinkle Bell": [112],
221
+ # "Agogo": [113],
222
+ # "Steel Drums": [114],
223
+ # "Woodblock": [115],
224
+ # "Taiko Drum": [116],
225
+ # "Melodic Tom": [117],
226
+ # "Synth Drum": [118],
227
+ # "Reverse Cymbal": [119],
228
+ # "Guitar Fret Noise": [120],
229
+ # "Breath Noise": [121],
230
+ # "Seashore": [122],
231
+ # "Bird Tweet": [123],
232
+ # "Telephone Ring": [124],
233
+ # "Helicopter": [125],
234
+ # "Applause": [126],
235
+ # "Gunshot": [127]
236
+ }
237
+
238
+ MT3_FULL = { # this matches the class names in Table 3 of MT3 paper
239
+ "Acoustic Piano": [0, 1, 3, 6, 7],
240
+ "Electric Piano": [2, 4, 5],
241
+ "Chromatic Percussion": np.arange(8, 16),
242
+ "Organ": np.arange(16, 24),
243
+ "Acoustic Guitar": np.arange(24, 26),
244
+ "Clean Electric Guitar": np.arange(26, 29),
245
+ "Distorted Electric Guitar": np.arange(29, 32),
246
+ "Acoustic Bass": [32, 35],
247
+ "Electric Bass": [33, 34, 36, 37, 38, 39],
248
+ "Violin": [40],
249
+ "Viola": [41],
250
+ "Cello": [42],
251
+ "Contrabass": [43],
252
+ "Orchestral Harp": [46],
253
+ "Timpani": [47],
254
+ "String Ensemble": [48, 49, 44, 45],
255
+ "Synth Strings": [50, 51],
256
+ "Choir and Voice": [52, 53, 54],
257
+ "Orchestra Hit": [55],
258
+ "Trumpet": [56, 59],
259
+ "Trombone": [57],
260
+ "Tuba": [58],
261
+ "French Horn": [60],
262
+ "Brass Section": [61, 62, 63],
263
+ "Soprano/Alto Sax": [64, 65],
264
+ "Tenor Sax": [66],
265
+ "Baritone Sax": [67],
266
+ "Oboe": [68],
267
+ "English Horn": [69],
268
+ "Bassoon": [70],
269
+ "Clarinet": [71],
270
+ "Pipe": [73, 72, 74, 75, 76, 77, 78, 79],
271
+ "Synth Lead": np.arange(80, 88),
272
+ "Synth Pad": np.arange(88, 96),
273
+ }
274
+
275
+ MT3_FULL_PLUS = MT3_FULL.copy()
276
+ MT3_FULL_PLUS["Singing Voice"] = [100]
277
+ MT3_FULL_PLUS["Singing Voice (chorus)"] = [101]
278
+
279
+ ENST_DRUM_NOTES = {
280
+ "bd": [36], # Kick Drum
281
+ "sd": [38], # Snare Drum
282
+ "sweep": [0], # Brush sweep
283
+ "sticks": [1], # Sticks
284
+ "rs": [2], # Rim shot
285
+ "cs": [37], # X-stick
286
+ "chh": [42], # Closed Hi-Hat
287
+ "ohh": [46], # Open Hi-Hat
288
+ "cb": [56], # Cowbell
289
+ "c": [3], # Other Cymbals
290
+ "lmt": [47], # Low Mid Tom
291
+ "mt": [48], # Mid Tom
292
+ "mtr": [58], # Mid Tom Rim
293
+ "lt": [45], # Low Tom
294
+ "ltr": [50], # Low Tom Rim
295
+ "lft": [41], # Low Floor Tom
296
+ "rc": [51], # Ride Cymbal
297
+ "ch": [52], # Chinese Cymbal
298
+ "cr": [49], # Crash Cymbal
299
+ "spl": [55], # Splash Cymbal
300
+ }
301
+
302
+ EGMD_DRUM_NOTES = {
303
+ "Kick Drum": [36], # Listed by order of most common annotation
304
+ "Snare X-stick": [37], # Snare X-Stick, https://youtu.be/a2KFrrKaoYU?t=80
305
+ "Snare Drum": [38], # Snare (head) and Electric Snare
306
+ "Closed Hi-Hat": [42, 44, 22], # 44 is pedal hi-hat
307
+ "Open Hi-Hat": [46, 26],
308
+ "Cowbell": [56],
309
+ "High Floor Tom": [43],
310
+ "Low Floor Tom": [41], # Lowest Tom
311
+ "Low Tom": [45],
312
+ "Low-Mid Tom": [47],
313
+ "Mid Tom": [48],
314
+ "Low Tom (Rim)": [50], # TD-17: 47, 50, 58
315
+ "Mid Tom (Rim)": [58],
316
+ # "Ride Cymbal": [51, 53, 59],
317
+ "Ride": [51],
318
+ "Ride (Bell)": [53], # https://youtu.be/b94hZoM5s3k?t=323
319
+ "Ride (Edge)": [59],
320
+ "Chinese Cymbal": [52],
321
+ "Crash Cymbal": [49, 57],
322
+ "Splash Cymbal": [55],
323
+ }
324
+
325
+ # Inspired by Roland TD-17 MIDI note map, https://rolandus.zendesk.com/hc/en-us/articles/360005173411-TD-17-Default-Factory-MIDI-Note-Map
326
+ GM_DRUM_NOTES = {
327
+ "Kick Drum": [36, 35], # Listed by order of most common annotation
328
+ "Snare X-stick": [37, 2], # Snare X-Stick, https://youtu.be/a2KFrrKaoYU?t=80
329
+ "Snare Drum": [38, 40], # Snare (head) and Electric Snare
330
+ "Closed Hi-Hat": [42, 44, 22], # 44 is pedal hi-hat
331
+ "Open Hi-Hat": [46, 26],
332
+ "Cowbell": [56],
333
+ "High Floor Tom": [43],
334
+ "Low Floor Tom": [41], # Lowest Tom
335
+ "Low Tom": [45],
336
+ "Low-Mid Tom": [47],
337
+ "Mid Tom": [48],
338
+ "Low Tom (Rim)": [50], # TD-17: 47, 50, 58
339
+ "Mid Tom (Rim)": [58],
340
+ # "Ride Cymbal": [51, 53, 59],
341
+ "Ride": [51],
342
+ "Ride (Bell)": [53], # https://youtu.be/b94hZoM5s3k?t=323
343
+ "Ride (Edge)": [59],
344
+ "Chinese Cymbal": [52],
345
+ "Crash Cymbal": [49, 57],
346
+ "Splash Cymbal": [55],
347
+ }
348
+
349
+ KICK_SNARE_HIHAT = {
350
+ "Kick Drum": [36, 35],
351
+ "Snare Drum": [38, 40],
352
+ # "Snare Drum + X-Stick": [38, 40, 37, 2],
353
+ # "Snare X-stick": [37, 2], # Snare X-Stick, https://youtu.be/a2KFrrKaoYU?t=80
354
+ "Hi-Hat": [42, 44, 46, 22, 26],
355
+ # "Ride Cymbal": [51, 53, 59],
356
+ # "Hi-Hat + Ride": [42, 44, 46, 22, 26, 51, 53, 59],
357
+ # "HiHat + all Cymbals": [42, 44, 46, 22, 26, 51, 53, 59, 52, 49, 57, 55],
358
+ # "Kick Drum + Low Tom": [36, 35, 45],
359
+ # "All Cymbal": [51, 53, 59, 52, 49, 57, 55]
360
+ # "all": np.arange(30, 60)
361
+ }
362
+
363
+ drum_vocab_presets = {
364
+ "gm": GM_DRUM_NOTES,
365
+ "egmd": EGMD_DRUM_NOTES,
366
+ "enst": ENST_DRUM_NOTES,
367
+ "ksh": KICK_SNARE_HIHAT,
368
+ "kshr": {
369
+ "Kick Drum": [36, 35],
370
+ "Snare Drum": [38, 40],
371
+ "Hi-Hat": [42, 44, 46, 22, 26, 51, 53, 59],
372
+ }
373
+ }
374
+
375
+ program_vocab_presets = {
376
+ "gm_full": GM_INSTR_FULL, # 96 classes (except drums)
377
+ "mt3_full": MT3_FULL, # 34 classes (except drums) as in MT3 paper
378
+ "mt3_midi": GM_INSTR_CLASS, # 11 classes (except drums) as in MT3 paper
379
+ "mt3_midi_plus": GM_INSTR_CLASS_PLUS, # 11 classes + singing (except drums)
380
+ "mt3_full_plus": MT3_FULL_PLUS, # 34 classes (except drums) mt3_full + singing (except drums)
381
+ "gm": GM_INSTR_CLASS, # 11 classes (except drums)
382
+ "gm_plus": GM_INSTR_CLASS_PLUS, # 11 classes + singing (except drums)
383
+ "gm_ext_plus": GM_INSTR_EXT_CLASS_PLUS, # 13 classes + singing + chorus (except drums)
384
+ }
content/model_output/test.mid ADDED
Binary file (2.94 kB). View file
 
extras/.DS_Store ADDED
Binary file (10.2 kB). View file
 
extras/Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel
2
+ LABEL maintainer="https://github.com/mimbres/YourMT3"
3
+
4
+ ENV TZ=Europe/London \
5
+ DEBIAN_FRONTEND=noninteractive
6
+ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
7
+
8
+ RUN apt-get update
9
+ ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
10
+
11
+ RUN apt-get update --fix-missing && apt-get install -y wget curl \
12
+ nano git ffmpeg sox tmux htop
13
+ RUN pip3 install --upgrade pip
14
+ RUN pip3 install mirdata mido git+https://github.com/craffel/mir_eval.git \
15
+ matplotlib lightning>=2.0.2 pytest-timeout pytest deprecated librosa \
16
+ einops transformers wandb
17
+
18
+ CMD [ "/bin/bash" ]
extras/demo_cross_augmentation.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ from typing import Dict, Tuple
11
+ from copy import deepcopy
12
+ import soundfile as sf
13
+ import torch
14
+ from utils.data_modules import AMTDataModule
15
+ from config.data_presets import data_preset_single_cfg, data_preset_multi_cfg
16
+ from utils.augment import intra_stem_augment_processor
17
+
18
+
19
+ def get_ds(data_preset_multi: Dict, train_num_samples_per_epoch: int = 90000):
20
+ dm = AMTDataModule(data_preset_multi=data_preset_multi, train_num_samples_per_epoch=train_num_samples_per_epoch)
21
+ dm.setup('fit')
22
+ dl = dm.train_dataloader()
23
+ ds = dl.flattened[0].dataset
24
+ return ds
25
+
26
+
27
+ def debug_func(num_segments: int = 10):
28
+ sampled_data, sampled_ids = ds._get_rand_segments_from_cache(num_segments)
29
+ ux_sampled_data, _ = ds._get_rand_segments_from_cache(ux_count_sum, False, sampled_ids)
30
+ s = deepcopy(sampled_data)
31
+ intra_stem_augment_processor(sampled_data, submix_audio=False)
32
+
33
+
34
+ def gen_audio(index: int = 0):
35
+ # audio_arr: (b, 1, nframe), note_token_arr: (b, l), task_token_arr: (b, task_l)
36
+ audio_arr, note_token_arr, task_token_arr = ds.__getitem__(index)
37
+
38
+ # merge all the segments into one audio file
39
+ audio = audio_arr.permute(0, 2, 1).reshape(-1).squeeze().numpy()
40
+
41
+ # save the audio file
42
+ sf.write('xaug_demo_audio.wav', audio, 16000, subtype='PCM_16')
43
+
44
+
45
+ data_preset_multi = data_preset_multi_cfg["all_cross_rebal5"]
46
+ ds = get_ds(data_preset_multi)
47
+ ds.random_amp_range = [0.8, 1.1]
48
+ ds.stem_xaug_policy = {
49
+ "max_k": 5,
50
+ "tau": 0.3,
51
+ "alpha": 1.0,
52
+ "max_subunit_stems": 12,
53
+ "no_instr_overlap": True,
54
+ "no_drum_overlap": True,
55
+ "uhat_intra_stem_augment": True,
56
+ }
57
+ gen_audio(3)
58
+
59
+ # for k in ds.cache.keys():
60
+ # arr = ds.cache[k]['audio_array']
61
+ # arr = np.sum(arr, axis=1).reshape(-1)
62
+ # # sf.write(f'xxx/{k}.wav', arr, 16000, subtype='PCM_16')
63
+ # if np.min(arr) > -0.5:
64
+ # print(k)
65
+
66
+ # arr = ds.cache[52]['audio_array']
67
+ # for i in range(arr.shape[1]):
68
+ # a = arr[:, i, :].reshape(-1)
69
+ # sf.write(f'xxx52/52_{i}.wav', a, 16000, subtype='PCM_16')
extras/download_mirst500.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import numpy as np
4
+ from pytube import YouTube
5
+
6
+
7
+ def downloadMp3(yt, idx, askPath=0):
8
+ # extract only audio
9
+ video = yt.streams.filter(only_audio=True).first()
10
+
11
+ destination = 'mp3File'
12
+ # check for destination to save file
13
+ if (askPath == 1):
14
+ print("Enter the destination (leave blank for default dir mp3File)")
15
+ destination = str(input(">> ")) or 'mp3File'
16
+
17
+ # download the file
18
+ out_file = video.download(output_path=destination)
19
+
20
+ # save the file
21
+ # base, ext = os.path.splitext(out_file)
22
+ dir_path, file_base = os.path.split(out_file)
23
+
24
+ new_file = os.path.join(dir_path, f'{idx}.mp3')
25
+ os.rename(out_file, new_file)
26
+ # result of success
27
+ print(yt.title + " has been successfully downloaded.")
28
+
29
+
30
+ MISSING_FILE_IDS = [
31
+ 16, 26, 33, 38, 40, 50, 53, 55, 60, 81, 82, 98, 107, 122, 126, 127, 129, 141, 145, 150, 172,
32
+ 201, 205, 206, 215, 216, 221, 226, 232, 240, 243, 245, 255, 257, 267, 273, 278, 279, 285, 287,
33
+ 291, 304, 312, 319, 321, 325, 329, 332, 333, 336, 337, 342, 359, 375, 402, 417, 438, 445, 454,
34
+ 498
35
+ ]
36
+
37
+ data_link_file = '../../../data/mir_St500_yourmt3_16k/MIR-ST500_20210206/MIR-ST500_link.json'
38
+ data_link = json.load(open(data_link_file, 'r'))
39
+ download_fail = []
40
+
41
+ for i in MISSING_FILE_IDS:
42
+ print(f'Downloading {i}...')
43
+ yt = YouTube(data_link[str(i)])
44
+ try:
45
+ downloadMp3(yt, idx=i)
46
+ except:
47
+ download_fail.append(i)
48
+ print(f'Failed to download {i}.')
49
+
50
+ print(f'Failed to download {len(download_fail)} files: {download_fail}')
extras/examples/.DS_Store ADDED
Binary file (6.15 kB). View file
 
extras/examples/1733.mid ADDED
Binary file (16 kB). View file
 
extras/examples/2106.mid ADDED
Binary file (12.9 kB). View file
 
extras/examples/803_002_167s95.mid ADDED
Binary file (9.94 kB). View file
 
extras/examples/piano_converted.mid ADDED
Binary file (42.9 kB). View file
 
extras/inspecting_slakh_bass.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mirdata
2
+ from utils.mirdata_dev.datasets import slakh16k
3
+
4
+ ds = slakh16k.Dataset(data_home='../../data', version='2100-yourmt3-16k')
5
+ mtrack_ids = ds.mtrack_ids
6
+
7
+ # Collect plugin names
8
+ plugin_names = set()
9
+ cnt = 0
10
+ for mtrack_id in mtrack_ids:
11
+ mtrack = ds.multitrack(mtrack_id)
12
+ for track_id in mtrack.track_ids:
13
+ track = ds.track(track_id)
14
+ if track.instrument.lower() == 'bass':
15
+ if track.plugin_name == 'upright_bass.nkm':
16
+ print(f'{str(cnt)}: {track_id}: {track.plugin_name}')
17
+ # if track.plugin_name not in plugin_names:
18
+ # plugin_names.add(track.plugin_name)
19
+ # print(f'{str(cnt)}: {track_id}: {track.plugin_name}')
20
+ # cnt += 1
21
+ """
22
+ 0: Track00001-S03: scarbee_rickenbacker_bass_palm_muted.nkm
23
+ 1: Track00002-S01: classic_bass.nkm
24
+ 2: Track00004-S01: scarbee_rickenbacker_bass.nkm
25
+ 3: Track00005-S04: scarbee_jay_bass_both.nkm
26
+ 4: Track00006-S03: pop_bass.nkm
27
+ 5: Track00008-S00: scarbee_pre_bass.nkm
28
+ 6: Track00013-S00: jazz_upright.nkm
29
+ 7: Track00014-S01: funk_bass.nkm
30
+ 8: Track00016-S01: scarbee_mm_bass.nkm
31
+ 9: Track00024-S07: upright_bass.nkm
32
+ 10: Track00027-S03: scarbee_jay_bass_slap_both.nkm
33
+ 11: Track00094-S08: upright_bass2.nkm
34
+ """
extras/rotary_positional_embedding.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """rotary_positional_embedding.py - Rotary Positional Embedding
2
+
3
+ code from github.com/lucidrains/rotary-embedding-torch
4
+
5
+ MIT License
6
+ """
7
+
8
+ from math import pi, log
9
+ import torch
10
+ from torch import nn, einsum
11
+ from einops import rearrange, repeat
12
+
13
+
14
+ def exists(val):
15
+ return val is not None
16
+
17
+
18
+ def broadcat(tensors, dim=-1):
19
+ num_tensors = len(tensors)
20
+ shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
21
+ assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
22
+ shape_len = list(shape_lens)[0]
23
+
24
+ dim = (dim + shape_len) if dim < 0 else dim
25
+ dims = list(zip(*map(lambda t: list(t.shape), tensors)))
26
+
27
+ expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
28
+ assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)
29
+ ]), 'invalid dimensions for broadcastable concatentation'
30
+ max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
31
+ expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
32
+ expanded_dims.insert(dim, (dim, dims[dim]))
33
+ expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
34
+ tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
35
+ return torch.cat(tensors, dim=dim)
36
+
37
+
38
+ # rotary embedding helper functions
39
+ def rotate_half(x):
40
+ x = rearrange(x, '... (d r) -> ... d r', r=2)
41
+ x1, x2 = x.unbind(dim=-1)
42
+ x = torch.stack((-x2, x1), dim=-1)
43
+ return rearrange(x, '... d r -> ... (d r)')
44
+
45
+
46
+ def apply_rotary_emb(freqs, t, start_index=0, scale=1.):
47
+ rot_dim, seq_len = freqs.shape[-1], t.shape[-2]
48
+ freqs = freqs[-seq_len:, :]
49
+
50
+ freqs = freqs.to(t)
51
+ end_index = start_index + rot_dim
52
+ assert rot_dim <= t.shape[
53
+ -1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
54
+ t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
55
+ t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
56
+ return torch.cat((t_left, t, t_right), dim=-1)
57
+
58
+
59
+ # learned rotation helpers
60
+ def apply_learned_rotations(rotations, t, start_index=0, freq_ranges=None):
61
+ if exists(freq_ranges):
62
+ rotations = einsum('..., f -> ... f', rotations, freq_ranges)
63
+ rotations = rearrange(rotations, '... r f -> ... (r f)')
64
+
65
+ rotations = repeat(rotations, '... n -> ... (n r)', r=2)
66
+ return apply_rotary_emb(rotations, t, start_index=start_index)
67
+
68
+
69
+ # classes
70
+ class RotaryEmbedding(nn.Module):
71
+
72
+ def __init__(self,
73
+ dim,
74
+ custom_freqs=None,
75
+ freqs_for='lang',
76
+ theta=10000,
77
+ max_freq=10,
78
+ num_freqs=1,
79
+ learned_freq=False,
80
+ use_xpos=False,
81
+ xpos_scale_base=512,
82
+ interpolate_factor=1.,
83
+ theta_rescale_factor=1.):
84
+ super().__init__()
85
+ # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
86
+ # has some connection to NTK literature
87
+ # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
88
+ theta *= theta_rescale_factor**(dim / (dim - 2))
89
+
90
+ if exists(custom_freqs):
91
+ freqs = custom_freqs
92
+ elif freqs_for == 'lang':
93
+ freqs = 1. / (theta**(torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
94
+ elif freqs_for == 'pixel':
95
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
96
+ elif freqs_for == 'constant':
97
+ freqs = torch.ones(num_freqs).float()
98
+ else:
99
+ raise ValueError(f'unknown modality {freqs_for}')
100
+
101
+ self.cache = dict()
102
+ self.cache_scale = dict()
103
+ self.freqs = nn.Parameter(freqs, requires_grad=learned_freq)
104
+
105
+ # interpolation factors
106
+
107
+ assert interpolate_factor >= 1.
108
+ self.interpolate_factor = interpolate_factor
109
+
110
+ # xpos
111
+
112
+ self.use_xpos = use_xpos
113
+ if not use_xpos:
114
+ self.register_buffer('scale', None)
115
+ return
116
+
117
+ scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
118
+ self.scale_base = xpos_scale_base
119
+ self.register_buffer('scale', scale)
120
+
121
+ def get_seq_pos(self, seq_len, device, dtype, offset=0):
122
+ return (torch.arange(seq_len, device=device, dtype=dtype) +
123
+ offset) / self.interpolate_factor
124
+
125
+ def rotate_queries_or_keys(self, t, seq_dim=-2, offset=0, freq_seq_len=None):
126
+ assert not self.use_xpos, 'you must use `.rotate_queries_and_keys` method instead and pass in both queries and keys, for length extrapolatable rotary embeddings'
127
+
128
+ device, dtype, seq_len = t.device, t.dtype, t.shape[seq_dim]
129
+
130
+ if exists(freq_seq_len):
131
+ assert freq_seq_len >= seq_len
132
+ seq_len = freq_seq_len
133
+
134
+ freqs = self.forward(
135
+ lambda: self.get_seq_pos(seq_len, device=device, dtype=dtype, offset=offset),
136
+ cache_key=f'freqs:{seq_len}|offset:{offset}')
137
+ return apply_rotary_emb(freqs, t)
138
+
139
+ def rotate_queries_with_cached_keys(self, q, k, seq_dim=-2):
140
+ q_len, k_len = q.shape[seq_dim], k.shape[seq_dim]
141
+ assert q_len <= k_len
142
+ q = self.rotate_queries_or_keys(q, seq_dim=seq_dim, freq_seq_len=k_len)
143
+ k = self.rotate_queries_or_keys(k, seq_dim=seq_dim)
144
+ return q, k
145
+
146
+ def rotate_queries_and_keys(self, q, k, seq_dim=-2):
147
+ assert self.use_xpos
148
+ device, dtype, seq_len = q.device, q.dtype, q.shape[seq_dim]
149
+ seq = self.get_seq_pos(seq_len, dtype=dtype, device=device)
150
+ freqs = self.forward(lambda: seq, cache_key=f'freqs:{seq_len}')
151
+ scale = self.get_scale(lambda: seq, cache_key=f'scale:{seq_len}').to(dtype)
152
+ rotated_q = apply_rotary_emb(freqs, q, scale=scale)
153
+ rotated_k = apply_rotary_emb(freqs, k, scale=scale**-1)
154
+ return rotated_q, rotated_k
155
+
156
+ def get_scale(self, t, cache_key=None):
157
+ assert self.use_xpos
158
+
159
+ if exists(cache_key) and cache_key in self.cache:
160
+ return self.cache[cache_key]
161
+
162
+ if callable(t):
163
+ t = t()
164
+
165
+ scale = 1.
166
+ if self.use_xpos:
167
+ power = (t - len(t) // 2) / self.scale_base
168
+ scale = self.scale**rearrange(power, 'n -> n 1')
169
+ scale = torch.cat((scale, scale), dim=-1)
170
+
171
+ if exists(cache_key):
172
+ self.cache[cache_key] = scale
173
+
174
+ return scale
175
+
176
+ def forward(self, t, cache_key=None):
177
+ if exists(cache_key) and cache_key in self.cache:
178
+ return self.cache[cache_key]
179
+
180
+ if callable(t):
181
+ t = t()
182
+
183
+ freqs = self.freqs
184
+
185
+ freqs = einsum('..., f -> ... f', t.type(freqs.dtype), freqs)
186
+ freqs = repeat(freqs, '... n -> ... (n r)', r=2)
187
+
188
+ if exists(cache_key):
189
+ self.cache[cache_key] = freqs
190
+
191
+ return freqs
extras/run_spleeter_mirst500_cmedia.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ shopt -s globstar
3
+ for file in "$1"/**/*.wav; do
4
+ output_dir="${file%/*}"
5
+ input_file="$output_dir/converted_Mixture.wav"
6
+ spleeter separate -p spleeter:2stems -o $output_dir $input_file -f {instrument}.{codec}
7
+ ffmpeg -i "$output_dir/vocals.wav" -acodec pcm_s16le -ac 1 -ar 16000 -y "$output_dir/vocals_16k.wav"
8
+ ffmpeg -i "$output_dir/accompaniment.wav" -acodec pcm_s16le -ac 1 -ar 16000 -y "$output_dir/accompaniment_16k.wav"
9
+ rm "$output_dir/vocals.wav"
10
+ rm "$output_dir/accompaniment.wav"
11
+ mv "$output_dir/vocals_16k.wav" "$output_dir/vocals.wav"
12
+ mv "$output_dir/accompaniment_16k.wav" "$output_dir/accompaniment.wav"
13
+ done
extras/swap_channel.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ a = np.arange(12).reshape(2, 3, 2) # (batch, channel, dim)
4
+ print(a)
5
+ array([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]])
6
+
7
+ swap_mat = create_swap_channel_mat(input_shape, swap_channel=(1, 2))
8
+
9
+ # will swap channel 1 and 2 of batch 0 with channel 1 and 2 of batch 1
10
+ b = a @ swap_mat
11
+ print(b)
12
+ # expected output
13
+ array([[[0, 1], [8, 9], [10, 11]], [[6, 7], [2, 3], [4, 5]]])
14
+
15
+ import torch
16
+
17
+
18
+ def swap_channels_between_batches(a_tensor, swap_channels):
19
+ # Copy the tensor to avoid modifying the original tensor
20
+ result_tensor = a_tensor.clone()
21
+
22
+ # Unpack the channels to be swapped
23
+ ch1, ch2 = swap_channels
24
+
25
+ # Swap the specified channels between batches
26
+ result_tensor[0, ch1, :], result_tensor[1, ch1, :] = a_tensor[1, ch1, :].clone(), a_tensor[0, ch1, :].clone()
27
+ result_tensor[0, ch2, :], result_tensor[1, ch2, :] = a_tensor[1, ch2, :].clone(), a_tensor[0, ch2, :].clone()
28
+
29
+ return result_tensor
30
+
31
+
32
+ # Define a sample tensor 'a_tensor'
33
+ a_tensor = torch.tensor([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], dtype=torch.float32)
34
+
35
+ # Define channels to swap
36
+ swap_channels = (1, 2) # Channels to swap between batches
37
+
38
+ # Swap the channels between batches
39
+ swapped_tensor = swap_channels_between_batches(a_tensor, swap_channels)
40
+
41
+ # Print the original tensor and the tensor after swapping channels between batches
42
+ print("Original Tensor 'a_tensor':")
43
+ print(a_tensor)
44
+ print("\nTensor after swapping channels between batches:")
45
+ print(swapped_tensor)
46
+
47
+ #-------------------------------------------------
48
+
49
+ import torch
50
+ from einops import rearrange
51
+
52
+
53
+ def shift(arr, num, fill_value=np.nan):
54
+ result = np.empty_like(arr)
55
+ if num > 0:
56
+ result[:num] = fill_value
57
+ result[num:] = arr[:-num]
58
+ elif num < 0:
59
+ result[num:] = fill_value
60
+ result[:num] = arr[-num:]
61
+ else:
62
+ result[:] = arr
63
+ return result
64
+
65
+
66
+ def create_batch_swap_matrix(batch_size, channels, swap_channels):
67
+ swap_mat = np.eye(batch_size * channels)
68
+
69
+ for c in swap_channels:
70
+ idx1 = c # 첫 번째 배치의 교환할 채널 인덱스
71
+ idx2 = c + channels # 두 번째 배치의 교환할 채널 인덱스
72
+
73
+ swap_mat[idx1, idx1], swap_mat[idx2, idx2] = 0, 0 # 대각선 값을 0으로 설정
74
+ swap_mat[idx1, idx2], swap_mat[idx2, idx1] = 1, 1 # 해당 채널을 교환
75
+ return swap_mat
76
+
77
+
78
+ def create_batch_swap_matrix(batch_size, channels, swap_channels):
79
+ swap_mat = np.eye(batch_size * channels)
80
+
81
+ # 모든 채널에 대해 교환 수행
82
+ for c in swap_channels:
83
+ idx1 = np.arange(c, batch_size * channels, channels) # 현재 채널의 모든 배치 인덱스
84
+ idx2 = (idx1 + channels) % (batch_size * channels) # 순환을 위해 modulo 사용
85
+
86
+ swap_mat[idx1, idx1] = 0
87
+ swap_mat[idx2, idx2] = 0
88
+ swap_mat[idx1, idx2] = 1
89
+ swap_mat[idx2, idx1] = 1
90
+
91
+ return swap_mat
92
+
93
+
94
+ def swap_channels_between_batches(input_tensor, swap_matrix):
95
+ reshaped_tensor = rearrange(input_tensor, 'b c d -> (b c) d')
96
+ swapped_tensor = swap_matrix @ reshaped_tensor
97
+ return rearrange(swapped_tensor, '(b c) d -> b c d', b=input_tensor.shape[0])
98
+
99
+
100
+ # 예제 파라미터
101
+ batch_size = 2
102
+ channels = 3
103
+ # swap_info = {
104
+ # : [1, 2] # batch_index: [channel_indices]
105
+ # }
106
+ swap_channels = [1, 2] # 교환할 채널
107
+
108
+ # 예제 텐서 생성
109
+ input_tensor = torch.tensor([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], dtype=torch.float32)
110
+
111
+ # swap matrix 생성
112
+ swap_matrix = create_batch_swap_matrix(batch_size, channels, swap_channels)
113
+ swap_matrix = torch.Tensor(swap_matrix)
114
+
115
+ # 채널 교환 수행
116
+ swapped_tensor = swap_channels_between_batches(input_tensor, swap_matrix)
117
+
118
+ # 결과 출력
119
+ print("Original Tensor:")
120
+ print(input_tensor)
121
+ print("\nSwapped Tensor:")
122
+ print(swapped_tensor)
extras/t5_dev.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import T5Config
3
+ from model.t5mod import T5ForConditionalGeneration
4
+
5
+ a = {
6
+ "architectures": ["T5ForConditionalGeneration"],
7
+ "d_ff": 1024, # size of the intermediate feed forward layer in each T5Block
8
+ "d_kv": 64, # d_kv has to be equal to d_model // num_heads.
9
+ # "d_model": 512, # encoder hiddnen size, defined by model_cfg
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ # "dropout_rate": 0.05, # can be overwritten by args in ymt3
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ # "is_encoder_decoder": True,
17
+ "is_gated_act": True,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ # "num_decoder_layers": 8,
21
+ "num_heads": 6,
22
+ "num_layers": 8,
23
+ "output_past": True,
24
+ "pad_token_id": 0,
25
+ "relative_attention_num_buckets": 32,
26
+ "use_cache": True,
27
+ "vocab_size": 1391 # vocab_size is automatically set by the task manager...
28
+ }
29
+ cfg = T5Config(**a)
30
+ cfg.num_decoder_layers = 4
31
+ cfg.num_layers = 0
32
+
33
+ model = T5ForConditionalGeneration(cfg)
34
+ print(model)
35
+
36
+ x = torch.rand(((2, 256, 512)))
37
+ out = model.encoder.forward(inputs_embeds=x)
38
+
39
+ enc_hs = torch.rand((2, 256, 512))
40
+ labels = torch.randint(0, 1391, (2, 256))
41
+ pred = model(encoder_outputs=(enc_hs,), labels=labels) # important (enc_hs,) comma!
extras/t5perceiver.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ Bare wrapper of HF PyTorch T5 and Perceiver with the following modifications:
11
+ - PerceiverTF encoder
12
+ - ResConv pre-encoder
13
+ - Projection layers for dynamic dimension matching
14
+ - Sinusoidal absolute positional embeddings
15
+ - Positional embeddings from Perceiver implementation
16
+ - Task conditioning on encoder and decoder by input tokens
17
+ """
18
+ import copy
19
+ import warnings
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+ from torch.utils.checkpoint import checkpoint
26
+
27
+ from transformers.utils import logging
28
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
29
+ from transformers.modeling_utils import PreTrainedModel
30
+ from transformers.models.t5.modeling_t5 import (T5LayerNorm, T5Block, PARALLELIZE_DOCSTRING, DEPARALLELIZE_DOCSTRING,
31
+ T5_START_DOCSTRING, T5_INPUTS_DOCSTRING, _CONFIG_FOR_DOC,
32
+ __HEAD_MASK_WARNING_MSG)
33
+ from transformers.modeling_outputs import (Seq2SeqLMOutput, BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions)
34
+ from transformers import T5Config #, T5PreTrainedModel
35
+ from model.ops import FixedSinusoidalPositionalEmbedding
36
+
37
+ # additional imports
38
+ from model.t5mod import T5Stack
39
+ from transformers.models.t5.modeling_t5 import (T5Model, T5ForConditionalGeneration, T5EncoderModel, T5DenseActDense,
40
+ T5DenseGatedActDense, T5Attention, load_tf_weights_in_t5,
41
+ is_torch_fx_proxy)
42
+
43
+ from transformers.utils import (DUMMY_INPUTS, DUMMY_MASK)
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ class T5PerceiverPreTrainedModel(PreTrainedModel):
49
+ """
50
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
51
+ models.
52
+ """
53
+
54
+ config_class = None
55
+ load_tf_weights = load_tf_weights_in_t5
56
+ base_model_prefix = "transformer"
57
+ is_parallelizable = True
58
+ supports_gradient_checkpointing = True
59
+ _no_split_modules = ["T5Block"]
60
+ _keep_in_fp32_modules = ["wo"]
61
+
62
+ @property
63
+ def dummy_inputs(self):
64
+ input_ids = torch.tensor(DUMMY_INPUTS)
65
+ input_mask = torch.tensor(DUMMY_MASK)
66
+ dummy_inputs = {
67
+ "decoder_input_ids": input_ids,
68
+ "input_ids": input_ids,
69
+ "decoder_attention_mask": input_mask,
70
+ }
71
+ return dummy_inputs
72
+
73
+ def _init_weights(self, module):
74
+ """Initialize the weights"""
75
+ factor = self.config.initializer_factor # Used for testing weights initialization
76
+ if isinstance(module, T5LayerNorm):
77
+ module.weight.data.fill_(factor * 1.0)
78
+ elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)):
79
+ # Mesh TensorFlow embeddings initialization
80
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
81
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
82
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
83
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
84
+ elif isinstance(module, T5DenseActDense):
85
+ # Mesh TensorFlow FF initialization
86
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
87
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
88
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)**-0.5))
89
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
90
+ module.wi.bias.data.zero_()
91
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff)**-0.5))
92
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
93
+ module.wo.bias.data.zero_()
94
+ elif isinstance(module, T5DenseGatedActDense):
95
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)**-0.5))
96
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
97
+ module.wi_0.bias.data.zero_()
98
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)**-0.5))
99
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
100
+ module.wi_1.bias.data.zero_()
101
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff)**-0.5))
102
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
103
+ module.wo.bias.data.zero_()
104
+ elif isinstance(module, T5Attention):
105
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
106
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
107
+ d_model = self.config.d_model
108
+ key_value_proj_dim = self.config.d_kv
109
+ n_heads = self.config.num_heads
110
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim)**-0.5))
111
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
112
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
113
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim)**-0.5))
114
+ if module.has_relative_attention_bias:
115
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model)**-0.5))
116
+
117
+ def _set_gradient_checkpointing(self, module, value=False):
118
+ if isinstance(module, (T5Attention, T5Stack)):
119
+ module.gradient_checkpointing = value
120
+
121
+ def _shift_right(self, input_ids):
122
+ decoder_start_token_id = self.config.decoder_start_token_id
123
+ pad_token_id = self.config.pad_token_id
124
+
125
+ assert decoder_start_token_id is not None, (
126
+ "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id."
127
+ " See T5 docs for more information")
128
+
129
+ # shift inputs to the right
130
+ if is_torch_fx_proxy(input_ids):
131
+ # Item assignment is not supported natively for proxies.
132
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
133
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
134
+ else:
135
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
136
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
137
+ shifted_input_ids[..., 0] = decoder_start_token_id
138
+
139
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
140
+ # replace possible -100 values in labels by `pad_token_id`
141
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
142
+
143
+ return shifted_input_ids
144
+
145
+
146
+ class T5PerceiverForConditionalGeneration(T5PerceiverPreTrainedModel):
147
+ config_class = None
148
+ load_tf_weights = load_tf_weights_in_t5
149
+ base_model_prefix = "transformer"
150
+ is_parallelizable = True
151
+ supports_gradient_checkpointing = True
152
+ _no_split_modules = ["T5Block"]
153
+ _keep_in_fp32_modules = ["wo"]
154
+
155
+ @property
156
+ def dummy_inputs(self):
157
+ input_ids = torch.tensor(DUMMY_INPUTS)
158
+ input_mask = torch.tensor(DUMMY_MASK)
159
+ dummy_inputs = {
160
+ "decoder_input_ids": input_ids,
161
+ "input_ids": input_ids,
162
+ "decoder_attention_mask": input_mask,
163
+ }
164
+ return dummy_inputs
165
+
166
+ def __init__(
167
+ self,
168
+ model_cfg: dict,
169
+ # config: T5Config,
170
+ # use_fixed_absolute_pe: bool = True,
171
+ # num_max_positions: int = 1025
172
+ ):
173
+ super().__init__(config)
174
+ self.model_dim = config.d_model
175
+ """ mod: absolute position embedding """
176
+ self.use_fixed_absolute_pe = use_fixed_absolute_pe
177
+
178
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
179
+
180
+ encoder_config = copy.deepcopy(config)
181
+ encoder_config.is_decoder = False
182
+ encoder_config.use_cache = False
183
+ encoder_config.is_encoder_decoder = False
184
+ self.encoder = T5Stack(encoder_config,
185
+ self.shared,
186
+ use_fixed_absolute_pe=use_fixed_absolute_pe,
187
+ num_max_positions=num_max_positions)
188
+
189
+ decoder_config = copy.deepcopy(config)
190
+ decoder_config.is_decoder = True
191
+ decoder_config.is_encoder_decoder = False
192
+ decoder_config.num_layers = config.num_decoder_layers
193
+ self.decoder = T5Stack(decoder_config,
194
+ self.shared,
195
+ use_fixed_absolute_pe=use_fixed_absolute_pe,
196
+ num_max_positions=num_max_positions)
197
+
198
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
199
+
200
+ # Initialize weights and apply final processing
201
+ self.post_init()
202
+
203
+ # Model parallel
204
+ self.model_parallel = False
205
+ self.device_map = None
206
+
207
+ def get_input_embeddings(self):
208
+ return self.shared
209
+
210
+ def set_input_embeddings(self, new_embeddings):
211
+ self.shared = new_embeddings
212
+ self.encoder.set_input_embeddings(new_embeddings)
213
+ self.decoder.set_input_embeddings(new_embeddings)
214
+
215
+ def set_output_embeddings(self, new_embeddings):
216
+ self.lm_head = new_embeddings
217
+
218
+ def get_output_embeddings(self):
219
+ return self.lm_head
220
+
221
+ def get_encoder(self):
222
+ return self.encoder
223
+
224
+ def get_decoder(self):
225
+ return self.decoder
226
+
227
+ def forward(
228
+ self,
229
+ input_ids: Optional[torch.LongTensor] = None,
230
+ attention_mask: Optional[torch.FloatTensor] = None,
231
+ decoder_input_ids: Optional[torch.LongTensor] = None,
232
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
233
+ head_mask: Optional[torch.FloatTensor] = None,
234
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
235
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
236
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
237
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
238
+ inputs_embeds: Optional[torch.FloatTensor] = None,
239
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
240
+ labels: Optional[torch.LongTensor] = None,
241
+ use_cache: Optional[bool] = None,
242
+ output_attentions: Optional[bool] = None,
243
+ output_hidden_states: Optional[bool] = None,
244
+ return_dict: Optional[bool] = None,
245
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
246
+ r"""
247
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
248
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
249
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
250
+ labels in `[0, ..., config.vocab_size]`
251
+
252
+ Returns:
253
+
254
+ Examples:
255
+
256
+ ```python
257
+ >>> from transformers import AutoTokenizer, T5ForConditionalGeneration
258
+
259
+ >>> tokenizer = AutoTokenizer.from_pretrained("t5-small")
260
+ >>> model = T5ForConditionalGeneration.from_pretrained("t5-small")
261
+
262
+ >>> # training
263
+ >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
264
+ >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
265
+ >>> outputs = model(input_ids=input_ids, labels=labels)
266
+ >>> loss = outputs.loss
267
+ >>> logits = outputs.logits
268
+
269
+ >>> # inference
270
+ >>> input_ids = tokenizer(
271
+ ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
272
+ ... ).input_ids # Batch size 1
273
+ >>> outputs = model.generate(input_ids)
274
+ >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
275
+ >>> # studies have shown that owning a dog is good for you.
276
+ ```"""
277
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
278
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
279
+
280
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
281
+ if head_mask is not None and decoder_head_mask is None:
282
+ if self.config.num_layers == self.config.num_decoder_layers:
283
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
284
+ decoder_head_mask = head_mask
285
+
286
+ # Encode if needed (training, first prediction pass)
287
+ if encoder_outputs is None:
288
+ # Convert encoder inputs in embeddings if needed
289
+ encoder_outputs = self.encoder(
290
+ input_ids=input_ids,
291
+ attention_mask=attention_mask,
292
+ inputs_embeds=inputs_embeds,
293
+ head_mask=head_mask,
294
+ output_attentions=output_attentions,
295
+ output_hidden_states=output_hidden_states,
296
+ return_dict=return_dict,
297
+ )
298
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
299
+ encoder_outputs = BaseModelOutput(
300
+ last_hidden_state=encoder_outputs[0],
301
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
302
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
303
+ )
304
+
305
+ hidden_states = encoder_outputs[0]
306
+
307
+ if self.model_parallel:
308
+ torch.cuda.set_device(self.decoder.first_device)
309
+
310
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
311
+ # get decoder inputs from shifting lm labels to the right
312
+ decoder_input_ids = self._shift_right(labels)
313
+
314
+ # Set device for model parallelism
315
+ if self.model_parallel:
316
+ torch.cuda.set_device(self.decoder.first_device)
317
+ hidden_states = hidden_states.to(self.decoder.first_device)
318
+ if decoder_input_ids is not None:
319
+ decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
320
+ if attention_mask is not None:
321
+ attention_mask = attention_mask.to(self.decoder.first_device)
322
+ if decoder_attention_mask is not None:
323
+ decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
324
+
325
+ # Decode
326
+ decoder_outputs = self.decoder(
327
+ input_ids=decoder_input_ids,
328
+ attention_mask=decoder_attention_mask,
329
+ inputs_embeds=decoder_inputs_embeds,
330
+ past_key_values=past_key_values,
331
+ encoder_hidden_states=hidden_states,
332
+ encoder_attention_mask=attention_mask,
333
+ head_mask=decoder_head_mask,
334
+ cross_attn_head_mask=cross_attn_head_mask,
335
+ use_cache=use_cache,
336
+ output_attentions=output_attentions,
337
+ output_hidden_states=output_hidden_states,
338
+ return_dict=return_dict,
339
+ )
340
+
341
+ sequence_output = decoder_outputs[0]
342
+
343
+ # Set device for model parallelism
344
+ if self.model_parallel:
345
+ torch.cuda.set_device(self.encoder.first_device)
346
+ self.lm_head = self.lm_head.to(self.encoder.first_device)
347
+ sequence_output = sequence_output.to(self.lm_head.weight.device)
348
+
349
+ if self.config.tie_word_embeddings:
350
+ # Rescale output before projecting on vocab
351
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
352
+ sequence_output = sequence_output * (self.model_dim**-0.5)
353
+
354
+ lm_logits = self.lm_head(sequence_output)
355
+
356
+ loss = None
357
+ if labels is not None:
358
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
359
+ # move labels to correct device to enable PP
360
+ labels = labels.to(lm_logits.device)
361
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
362
+ # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
363
+
364
+ if not return_dict:
365
+ output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
366
+ return ((loss,) + output) if loss is not None else output
367
+
368
+ return Seq2SeqLMOutput(
369
+ loss=loss,
370
+ logits=lm_logits,
371
+ past_key_values=decoder_outputs.past_key_values,
372
+ decoder_hidden_states=decoder_outputs.hidden_states,
373
+ decoder_attentions=decoder_outputs.attentions,
374
+ cross_attentions=decoder_outputs.cross_attentions,
375
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
376
+ encoder_hidden_states=encoder_outputs.hidden_states,
377
+ encoder_attentions=encoder_outputs.attentions,
378
+ )
379
+
380
+ def prepare_inputs_for_generation(
381
+ self,
382
+ input_ids,
383
+ past_key_values=None,
384
+ attention_mask=None,
385
+ head_mask=None,
386
+ decoder_head_mask=None,
387
+ cross_attn_head_mask=None,
388
+ use_cache=None,
389
+ encoder_outputs=None,
390
+ **kwargs,
391
+ ):
392
+ # cut decoder_input_ids if past is used
393
+ if past_key_values is not None:
394
+ input_ids = input_ids[:, -1:]
395
+
396
+ return {
397
+ "decoder_input_ids": input_ids,
398
+ "past_key_values": past_key_values,
399
+ "encoder_outputs": encoder_outputs,
400
+ "attention_mask": attention_mask,
401
+ "head_mask": head_mask,
402
+ "decoder_head_mask": decoder_head_mask,
403
+ "cross_attn_head_mask": cross_attn_head_mask,
404
+ "use_cache": use_cache,
405
+ }
406
+
407
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
408
+ return self._shift_right(labels)
409
+
410
+ def _reorder_cache(self, past_key_values, beam_idx):
411
+ # if decoder past is not included in output
412
+ # speedy decoding is disabled and no need to reorder
413
+ if past_key_values is None:
414
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
415
+ return past_key_values
416
+
417
+ reordered_decoder_past = ()
418
+ for layer_past_states in past_key_values:
419
+ # get the correct batch idx from layer past batch dim
420
+ # batch dim of `past` is at 2nd position
421
+ reordered_layer_past_states = ()
422
+ for layer_past_state in layer_past_states:
423
+ # need to set correct `past` for each of the four key / value states
424
+ reordered_layer_past_states = reordered_layer_past_states + (layer_past_state.index_select(
425
+ 0, beam_idx.to(layer_past_state.device)),)
426
+
427
+ assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
428
+ assert len(reordered_layer_past_states) == len(layer_past_states)
429
+
430
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
431
+ return reordered_decoder_past
432
+
433
+
434
+ from transformers import PreTrainedModel, PretrainedConfig
435
+ from transformers import AutoModel, AutoConfig
436
+
437
+
438
+ class MyConfig(T5Config, PerceiverConfig):
439
+ model_type = 'mymodel'
440
+
441
+ def __init__(self, important_param=42, **kwargs):
442
+ super().__init__(**kwargs)
443
+ self.important_param = important_param
extras/unimax_sampler/README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # UniMax Language Dataset Sampler with DDP support
2
+
3
+ This repository contains an unofficial implementation of the UNIMAX sampling algorithm using PyTorch. The UNIMAX algorithm ["UniMax: Fairer and more Effective Language Sampling for Large-Scale Multilingual Pretraining" by HW Chung et al. (ICLR 2023)](https://arxiv.org/abs/2304.09151) is used to generate a sampling distribution of languages based on their character counts, a total character budget, and a specified number of epochs per language. This can be useful for training language models on datasets with imbalanced language distribution.
4
+
5
+ ## Contents
6
+
7
+ 1. `unimax_sampler.py`: This Python file contains the `UnimaxSampler` class, a PyTorch `Sampler` that uses the UNIMAX algorithm.
8
+
9
+ 2. `test_unimax_sampler.py`: This Python file contains a unit test for the `UnimaxSampler` class to ensure its correct functionality.
10
+
11
+ ## Usage
12
+
13
+ ```python
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from unimax_sampler import UnimaxSampler
16
+
17
+ # Define your parameters
18
+ language_character_counts = [100, 200, 300, 400, 500]
19
+ total_character_budget = 1000
20
+ num_epochs = 2
21
+
22
+ # Create the UnimaxSampler
23
+ unimax_sampler = UnimaxSampler(language_character_counts, total_character_budget, num_epochs)
24
+ ```
25
+
26
+ Then, use the sampler as the sampler argument when creating a DataLoader.
27
+
28
+ ```python
29
+ # Disable shuffle when using custom sampler...
30
+ data_loader = DataLoader(my_dataset, batch_size=2, shuffle=None, sampler=unimax_sampler)
31
+ ```
32
+
33
+ For DDP,
34
+ ```python
35
+ if torch.distributed.is_initialized():
36
+ sampler = DistributedUnimaxSampler(...)
37
+ else:
38
+ return unimax_sampler(...)
39
+ ```
40
+
41
+ ## Note
42
+ The initial version of this code was created by [Chat GPT-4](https://chat.openai.com/), based on the pseudocode provided in the [UNIMAX](https://arxiv.org/abs/2304.09151) paper. Subsequently, the code was manually revised for `PyTorch` Distributed Data Parallel ([DDP](https://pytorch.org/docs/stable/notes/ddp.html)) framework. The DistributedSamplerWrapper implementation is derived from an earlier version found in the [Catalyst](https://github.com/catalyst-team/catalyst) project.
43
+
44
+ ## License
45
+ This project is licensed under the MIT License.
extras/unimax_sampler/demo.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.unimax_sampler.unimax_sampler import UnimaxSampler
2
+
3
+ language_character_counts = [100, 200, 300, 400, 500]
4
+ total_character_budget = 1000
5
+ num_epochs = 2
6
+
7
+ # Create the UnimaxSampler.
8
+ sampler = UnimaxSampler(language_character_counts, total_character_budget, num_epochs)
9
+
10
+ # Define the expected output. This will depend on your specific implementation of Unimax.
11
+ expected_output = torch.tensor([0.1, 0.2, 0.3, 0.2, 0.2])
12
+
13
+ # Use PyTorch's allclose function to compare the computed and expected outputs.
14
+ # The absolute tolerance parameter atol specifies the maximum difference allowed for the test to pass.
15
+ self.assertTrue(torch.allclose(sampler.p, expected_output, atol=1e-6))
extras/unimax_sampler/unimax_sampler.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import DistributedSampler
3
+ from torch.utils.data import Dataset, Sampler
4
+ from torch.utils.data import RandomSampler
5
+ from operator import itemgetter
6
+ from typing import List, Union, Iterator, Optional
7
+
8
+
9
+ class DatasetFromSampler(Dataset):
10
+ """Dataset to create indexes from `Sampler`. From catalyst library.
11
+
12
+ Args:
13
+ sampler: PyTorch sampler
14
+ """
15
+
16
+ def __init__(self, sampler: Sampler):
17
+ """Initialisation for DatasetFromSampler."""
18
+ self.sampler = sampler
19
+ self.sampler_list = None
20
+
21
+ def __getitem__(self, index: int):
22
+ """Gets element of the dataset.
23
+
24
+ Args:
25
+ index: index of the element in the dataset
26
+
27
+ Returns:
28
+ Single element by index
29
+ """
30
+ if self.sampler_list is None:
31
+ self.sampler_list = list(self.sampler)
32
+ return self.sampler_list[index]
33
+
34
+ def __len__(self) -> int:
35
+ """
36
+ Returns:
37
+ int: length of the dataset
38
+ """
39
+ return len(self.sampler)
40
+
41
+
42
+ class DistributedSamplerWrapper(DistributedSampler):
43
+ """
44
+ Wrapper over `Sampler` for distributed training.
45
+ Allows you to use any sampler in distributed mode.
46
+ From https://github.com/catalyst-team/catalyst/blob/master/catalyst/data/sampler.py
47
+
48
+ It is especially useful in conjunction with
49
+ `torch.nn.parallel.DistributedDataParallel`. In such case, each
50
+ process can pass a DistributedSamplerWrapper instance as a DataLoader
51
+ sampler, and load a subset of subsampled data of the original dataset
52
+ that is exclusive to it.
53
+
54
+ .. note::
55
+ Sampler is assumed to be of constant size.
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ sampler,
61
+ num_replicas: Optional[int] = None,
62
+ rank: Optional[int] = None,
63
+ shuffle: bool = True,
64
+ ):
65
+ """
66
+
67
+ Args:
68
+ sampler: Sampler used for subsampling
69
+ num_replicas (int, optional): Number of processes participating in
70
+ distributed training
71
+ rank (int, optional): Rank of the current process
72
+ within ``num_replicas``
73
+ shuffle (bool, optional): If true (default),
74
+ sampler will shuffle the indices
75
+ """
76
+ super(DistributedSamplerWrapper, self).__init__(
77
+ DatasetFromSampler(sampler),
78
+ num_replicas=num_replicas,
79
+ rank=rank,
80
+ shuffle=shuffle,
81
+ )
82
+ self.sampler = sampler
83
+
84
+ def __iter__(self) -> Iterator[int]:
85
+ """Iterate over sampler.
86
+
87
+ Returns:
88
+ python iterator
89
+ """
90
+ self.dataset = DatasetFromSampler(self.sampler)
91
+ indexes_of_indexes = super().__iter__()
92
+ subsampler_indexes = self.dataset
93
+ return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
94
+
95
+
96
+ class UnimaxSampler(Sampler):
97
+ # Initialize the sampler with the character counts for each language,
98
+ # the total character budget, and the number of epochs per language.
99
+ def __init__(self, language_character_counts: List[int], total_character_budget: int,
100
+ num_epochs: int) -> None:
101
+ self.language_character_counts = torch.tensor(language_character_counts)
102
+ self.total_character_budget = total_character_budget
103
+ self.num_epochs = num_epochs
104
+ # Compute the sampling distribution p.
105
+ self.p = self._unimax()
106
+
107
+ # Define how to iterate over the data. We'll use PyTorch's multinomial
108
+ # function to generate indices according to the distribution p.
109
+ def __iter__(self) -> iter:
110
+ return iter(torch.multinomial(self.p, len(self.p), replacement=True).tolist())
111
+
112
+ # Define the length of the sampler as the number of languages.
113
+ def __len__(self) -> int:
114
+ return len(self.p)
115
+
116
+ # Implement the UNIMAX algorithm to compute the sampling distribution p.
117
+ def _unimax(self) -> torch.Tensor:
118
+ # Sort languages by character count.
119
+ L, indices = torch.sort(self.language_character_counts)
120
+ # Initialize the remaining budget to the total character budget.
121
+ B = float(self.total_character_budget)
122
+ i = 0
123
+ # Initialize the budget per language.
124
+ U = torch.zeros_like(L)
125
+ # For each language...
126
+ for idx in indices:
127
+ # Compute the remaining budget per-language.
128
+ bl = B / (len(L) - i)
129
+ cl = L[idx]
130
+ # If per-language budget exceeds N epochs of the language, use N epochs.
131
+ if bl > cl * self.num_epochs:
132
+ Ul = cl * self.num_epochs
133
+ # Otherwise use uniform per-language budget.
134
+ else:
135
+ Ul = bl
136
+ # Store the computed budget.
137
+ U[idx] = Ul
138
+ # Update the remaining budget.
139
+ B -= Ul
140
+ # Move to the next language.
141
+ i += 1
142
+ # Normalize the budget to create a distribution.
143
+ p = U / U.sum()
144
+ # Return the computed distribution.
145
+ return p
146
+
147
+
148
+ class DistributedUnimaxSampler(UnimaxSampler):
149
+
150
+ def __init__(self,
151
+ language_character_counts: List[int],
152
+ total_character_budget: int,
153
+ num_epochs: int,
154
+ num_replicas: Optional[int] = None,
155
+ rank: Optional[int] = None,
156
+ shuffle: bool = True) -> None:
157
+
158
+ super().__init__(language_character_counts, total_character_budget, num_epochs)
159
+ self.distributed_sampler = DistributedSamplerWrapper(self, num_replicas, rank, shuffle)
160
+
161
+ def __iter__(self):
162
+ return iter(self.distributed_sampler)
163
+
164
+ def __len__(self):
165
+ return len(self.distributed_sampler)
166
+
167
+ def set_epoch(self, epoch):
168
+ self.distributed_sampler.set_epoch(epoch)
model/__pycache__/conv_block.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
model/__pycache__/ff_layer.cpython-310.pyc ADDED
Binary file (7.85 kB). View file
 
model/__pycache__/init_train.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
model/__pycache__/lm_head.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
model/__pycache__/lr_scheduler.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
model/__pycache__/ops.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
model/__pycache__/optimizers.cpython-310.pyc ADDED
Binary file (5.4 kB). View file
 
model/__pycache__/projection_layer.cpython-310.pyc ADDED
Binary file (8.87 kB). View file
 
model/__pycache__/spectrogram.cpython-310.pyc ADDED
Binary file (5.13 kB). View file
 
model/__pycache__/ymt3.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
model/conformer_helper.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ import math
11
+ from typing import Optional, Union
12
+
13
+ from torch import nn
14
+ from transformers.configuration_utils import PretrainedConfig
15
+ from transformers.modeling_utils import PreTrainedModel
16
+
17
+
18
+ class ConformerYMT3Config(PretrainedConfig):
19
+ r"""
20
+ This is the configuration class to store the configuration of a [`ConformerYMT3Encoder`]. It is used to
21
+ instantiate an ConformerYMT3Encoder according to the specified arguments, defining the model architecture.
22
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Conformer
23
+ [facebook/wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large)
24
+ architecture.
25
+
26
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
27
+ documentation from [`PretrainedConfig`] for more information.
28
+
29
+
30
+ Args:
31
+ d_model (`int`, *optional*, defaults to 512):
32
+ Dimensionality of the encoder layers and the pooler layer.
33
+ num_layers (`int`, *optional*, defaults to 12):
34
+ Number of hidden layers in the Transformer encoder.
35
+ num_heads (`int`, *optional*, defaults to 12):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 2048):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
40
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
41
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
42
+ dropout_rate (`float`, *optional*, defaults to 0.05):
43
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
44
+ layerdrop (`float`, *optional*, defaults to 0.1):
45
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
46
+ details.
47
+ initializer_range (`float`, *optional*, defaults to 0.02):
48
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
49
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
50
+ The epsilon used by the layer normalization layers.
51
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
52
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
53
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
54
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
55
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
56
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
57
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
58
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
59
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
60
+ *conv_dim*.
61
+ conv_bias (`bool`, *optional*, defaults to `False`):
62
+ Whether the 1D convolutional layers have a bias.
63
+ output_hidden_size (`int`, *optional*):
64
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
65
+ if `add_adapter is True`.
66
+ position_encoding_type (`str`, *optional*, defaults to `"relative"`):
67
+ Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
68
+ `None` no relative position embedding is applied.
69
+ rotary_embedding_base (`int`, *optional*, defaults to 10000):
70
+ If `"rotary"` position embeddings are used, defines the size of the embedding base.
71
+ num_max_positions (`int`, *optional*, defaults to 5000):
72
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
73
+ conv_depthwise_kernel_size (`int`, defaults to 31):
74
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import ConformerYMT3Config, ConformerYMT3Encoder
80
+
81
+ >>> # Initializing a ConformerYMT3Encoder configuration
82
+ >>> configuration = ConformerYMT3Config()
83
+
84
+ >>> # Initializing a model (with random weights) from the facebook/wav2vec2-conformer-rel-pos-large style configuration
85
+ >>> model = ConformerYMT3Encoder(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+ model_type = "conformer-ymt3"
91
+
92
+ def __init__(
93
+ self,
94
+ d_model=512, # 768
95
+ num_layers=8, # ConformerYMT3Encoder
96
+ num_heads=8, # ConformerYMT3SelfAttention
97
+ intermediate_size=2048, # 3072,# used in intermediate_dense of ConformerYMT3FeedForward
98
+ hidden_act="gelu", # used in intermediate_act_fn of ConformerYMT3FeedForward
99
+ dropout_rate=0.1,
100
+ layerdrop=0.1,
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-5,
103
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
104
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
105
+ conv_kernel=(10, 3, 3, 3, 3, 3, 3),
106
+ conv_bias=False,
107
+ position_encoding_type="rotary",
108
+ rotary_embedding_base=10000,
109
+ num_max_positions=1024,
110
+ conv_depthwise_kernel_size=31,
111
+ **kwargs,
112
+ ):
113
+ super().__init__(**kwargs)
114
+ self.d_model = d_model
115
+ self.conv_dim = list(conv_dim)
116
+ self.conv_stride = list(conv_stride)
117
+ self.conv_kernel = list(conv_kernel)
118
+ self.conv_bias = conv_bias
119
+ self.num_layers = num_layers
120
+ self.intermediate_size = intermediate_size
121
+ self.hidden_act = hidden_act
122
+ self.num_heads = num_heads
123
+ self.dropout_rate = dropout_rate
124
+
125
+ self.layerdrop = layerdrop
126
+ self.layer_norm_eps = layer_norm_eps
127
+ self.initializer_range = initializer_range
128
+ self.num_max_positions = num_max_positions
129
+ self.position_encoding_type = position_encoding_type
130
+ self.rotary_embedding_base = rotary_embedding_base
131
+
132
+ # Conformer-block related
133
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
134
+
135
+
136
+ class ConformerYMT3PreTrainedModel(PreTrainedModel):
137
+ """
138
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
139
+ models.
140
+ """
141
+
142
+ config_class = ConformerYMT3Config
143
+ base_model_prefix = "wav2vec2_conformer"
144
+ main_input_name = "input_values"
145
+ supports_gradient_checkpointing = True
146
+
147
+ def _init_weights(self, module):
148
+ """Initialize the weights"""
149
+ if module.__class__.__name__ == "ConformerYMT3SelfAttention":
150
+ if hasattr(module, "pos_bias_u"):
151
+ nn.init.xavier_uniform_(module.pos_bias_u)
152
+ if hasattr(module, "pos_bias_v"):
153
+ nn.init.xavier_uniform_(module.pos_bias_v)
154
+ elif isinstance(module, nn.Linear):
155
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
156
+ if module.bias is not None:
157
+ module.bias.data.zero_()
158
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
159
+ module.bias.data.zero_()
160
+ module.weight.data.fill_(1.0)
161
+ elif isinstance(module, nn.Conv1d):
162
+ nn.init.kaiming_normal_(module.weight)
163
+ if module.bias is not None:
164
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
165
+ nn.init.uniform_(module.bias, a=-k, b=k)
166
+
167
+ def _set_gradient_checkpointing(self, module, value=False):
168
+ if module.__class__.__name__ == "ConformerYMT3Encoder":
169
+ module.gradient_checkpointing = value
model/conformer_mod.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ from typing import Tuple, Literal, Any, Optional
11
+ import math
12
+
13
+ import torch
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import BaseModelOutput
17
+
18
+ from model.conformer_helper import ConformerYMT3Config, ConformerYMT3PreTrainedModel
19
+ from model.positional_encoding import (Wav2Vec2ConformerRelPositionalEmbedding,
20
+ Wav2Vec2ConformerRotaryPositionalEmbedding)
21
+
22
+
23
+ class ConformerYMT3FeedForward(nn.Module):
24
+
25
+ def __init__(self, config):
26
+ super().__init__()
27
+ self.intermediate_dropout = nn.Dropout(config.dropout_rate)
28
+
29
+ self.intermediate_dense = nn.Linear(config.d_model, config.intermediate_size)
30
+ if isinstance(config.hidden_act, str):
31
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
32
+ else:
33
+ self.intermediate_act_fn = config.hidden_act
34
+
35
+ self.output_dense = nn.Linear(config.intermediate_size, config.d_model)
36
+ self.output_dropout = nn.Dropout(config.dropout_rate)
37
+
38
+ def forward(self, hidden_states):
39
+ hidden_states = self.intermediate_dense(hidden_states)
40
+ hidden_states = self.intermediate_act_fn(hidden_states)
41
+ hidden_states = self.intermediate_dropout(hidden_states)
42
+
43
+ hidden_states = self.output_dense(hidden_states)
44
+ hidden_states = self.output_dropout(hidden_states)
45
+ return hidden_states
46
+
47
+
48
+ class ConformerYMT3ConvolutionModule(nn.Module):
49
+ """Convolution block used in the conformer block"""
50
+
51
+ def __init__(self, config):
52
+ super().__init__()
53
+ if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
54
+ raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
55
+ self.layer_norm = nn.LayerNorm(config.d_model)
56
+ self.pointwise_conv1 = torch.nn.Conv1d(
57
+ config.d_model,
58
+ 2 * config.d_model,
59
+ kernel_size=1,
60
+ stride=1,
61
+ padding=0,
62
+ bias=False,
63
+ )
64
+ self.glu = torch.nn.GLU(dim=1)
65
+ self.depthwise_conv = torch.nn.Conv1d(
66
+ config.d_model,
67
+ config.d_model,
68
+ config.conv_depthwise_kernel_size,
69
+ stride=1,
70
+ padding=(config.conv_depthwise_kernel_size - 1) // 2,
71
+ groups=config.d_model,
72
+ bias=False,
73
+ )
74
+ self.batch_norm = torch.nn.BatchNorm1d(config.d_model)
75
+ self.activation = ACT2FN[config.hidden_act]
76
+ self.pointwise_conv2 = torch.nn.Conv1d(
77
+ config.d_model,
78
+ config.d_model,
79
+ kernel_size=1,
80
+ stride=1,
81
+ padding=0,
82
+ bias=False,
83
+ )
84
+ self.dropout = torch.nn.Dropout(config.dropout_rate)
85
+
86
+ def forward(self, hidden_states):
87
+ hidden_states = self.layer_norm(hidden_states)
88
+ # exchange the temporal dimension and the feature dimension
89
+ hidden_states = hidden_states.transpose(1, 2)
90
+
91
+ # GLU mechanism
92
+ # => (batch, 2*channel, dim)
93
+ hidden_states = self.pointwise_conv1(hidden_states)
94
+ # => (batch, channel, dim)
95
+ hidden_states = self.glu(hidden_states)
96
+
97
+ # 1D Depthwise Conv
98
+ hidden_states = self.depthwise_conv(hidden_states)
99
+ hidden_states = self.batch_norm(hidden_states)
100
+ hidden_states = self.activation(hidden_states)
101
+
102
+ hidden_states = self.pointwise_conv2(hidden_states)
103
+ hidden_states = self.dropout(hidden_states)
104
+ hidden_states = hidden_states.transpose(1, 2)
105
+ return hidden_states
106
+
107
+
108
+ class ConformerYMT3SelfAttention(nn.Module):
109
+ """Construct a ConformerSelfAttention object.
110
+ Can be enhanced with rotary or relative position embeddings.
111
+ """
112
+
113
+ def __init__(self, config):
114
+ super().__init__()
115
+
116
+ self.head_size = config.d_model // config.num_heads
117
+ self.num_heads = config.num_heads
118
+ self.position_encoding_type = config.position_encoding_type
119
+
120
+ self.linear_q = nn.Linear(config.d_model, config.d_model)
121
+ self.linear_k = nn.Linear(config.d_model, config.d_model)
122
+ self.linear_v = nn.Linear(config.d_model, config.d_model)
123
+ self.linear_out = nn.Linear(config.d_model, config.d_model)
124
+
125
+ self.dropout = nn.Dropout(p=config.dropout_rate)
126
+
127
+ if self.position_encoding_type == "relative":
128
+ # linear transformation for positional encoding
129
+ self.linear_pos = nn.Linear(config.d_model, config.d_model, bias=False)
130
+ # these two learnable bias are used in matrix c and matrix d
131
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
132
+ self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
133
+ self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
134
+
135
+ def forward(
136
+ self,
137
+ hidden_states: torch.Tensor,
138
+ attention_mask: Optional[torch.Tensor] = None,
139
+ relative_position_embeddings: Optional[torch.Tensor] = None,
140
+ output_attentions: bool = False,
141
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
142
+ # self-attention mechanism
143
+ batch_size, sequence_length, d_model = hidden_states.size()
144
+
145
+ # make sure query/key states can be != value states
146
+ query_key_states = hidden_states
147
+ value_states = hidden_states
148
+
149
+ if self.position_encoding_type == "rotary":
150
+ if relative_position_embeddings is None:
151
+ raise ValueError(
152
+ "`relative_position_embeddings` has to be defined when `self.position_encoding_type == 'rotary'")
153
+ query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
154
+
155
+ # project query_key_states and value_states
156
+ query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
157
+ key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
158
+ value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
159
+
160
+ # => (batch, head, time1, d_k)
161
+ query = query.transpose(1, 2)
162
+ key = key.transpose(1, 2)
163
+ value = value.transpose(1, 2)
164
+
165
+ if self.position_encoding_type == "relative":
166
+ if relative_position_embeddings is None:
167
+ raise ValueError("`relative_position_embeddings` has to be defined when `self.position_encoding_type =="
168
+ " 'relative'")
169
+ # apply relative_position_embeddings to qk scores
170
+ # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860
171
+ scores = self._apply_relative_embeddings(query=query,
172
+ key=key,
173
+ relative_position_embeddings=relative_position_embeddings)
174
+ else:
175
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
176
+
177
+ # apply attention_mask if necessary
178
+ if attention_mask is not None:
179
+ scores = scores + attention_mask
180
+
181
+ # => (batch, head, time1, time2)
182
+ probs = torch.softmax(scores, dim=-1)
183
+ probs = self.dropout(probs)
184
+
185
+ # => (batch, head, time1, d_k)
186
+ hidden_states = torch.matmul(probs, value)
187
+
188
+ # => (batch, time1, d_model)
189
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
190
+ hidden_states = self.linear_out(hidden_states)
191
+
192
+ return hidden_states, probs
193
+
194
+ def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
195
+ batch_size, sequence_length, d_model = hidden_states.size()
196
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
197
+
198
+ cos = relative_position_embeddings[0, :sequence_length, ...]
199
+ sin = relative_position_embeddings[1, :sequence_length, ...]
200
+
201
+ # rotate hidden_states with rotary embeddings
202
+ hidden_states = hidden_states.transpose(0, 1)
203
+ rotated_states_begin = hidden_states[..., :self.head_size // 2]
204
+ rotated_states_end = hidden_states[..., self.head_size // 2:]
205
+ rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
206
+ hidden_states = (hidden_states * cos) + (rotated_states * sin)
207
+ hidden_states = hidden_states.transpose(0, 1)
208
+
209
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
210
+
211
+ return hidden_states
212
+
213
+ def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
214
+ # 1. project positional embeddings
215
+ # => (batch, head, 2*time1-1, d_k)
216
+ proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
217
+ proj_relative_position_embeddings = proj_relative_position_embeddings.view(relative_position_embeddings.size(0),
218
+ -1, self.num_heads, self.head_size)
219
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
220
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
221
+
222
+ # 2. Add bias to query
223
+ # => (batch, head, time1, d_k)
224
+ query = query.transpose(1, 2)
225
+ q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
226
+ q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
227
+
228
+ # 3. attention score: first compute matrix a and matrix c
229
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
230
+ # => (batch, head, time1, time2)
231
+ scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
232
+
233
+ # 4. then compute matrix b and matrix d
234
+ # => (batch, head, time1, 2*time1-1)
235
+ scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
236
+
237
+ # 5. shift matrix b and matrix d
238
+ zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
239
+ scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
240
+ scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
241
+ scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
242
+ scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
243
+ scores_bd = scores_bd[:, :, :, :scores_bd.size(-1) // 2 + 1]
244
+
245
+ # 6. sum matrices
246
+ # => (batch, head, time1, time2)
247
+ scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
248
+
249
+ return scores
250
+
251
+
252
+ class ConformerYMT3EncoderLayer(nn.Module):
253
+ """Conformer block based on https://arxiv.org/abs/2005.08100."""
254
+
255
+ def __init__(self, config):
256
+ super().__init__()
257
+ embed_dim = config.d_model
258
+ dropout = config.dropout_rate
259
+
260
+ # Feed-forward 1
261
+ self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
262
+ self.ffn1 = ConformerYMT3FeedForward(config)
263
+
264
+ # Self-Attention
265
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
266
+ self.self_attn_dropout = torch.nn.Dropout(dropout)
267
+ self.self_attn = ConformerYMT3SelfAttention(config)
268
+
269
+ # Conformer Convolution
270
+ self.conv_module = ConformerYMT3ConvolutionModule(config)
271
+
272
+ # Feed-forward 2
273
+ self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
274
+ self.ffn2 = ConformerYMT3FeedForward(config)
275
+ self.final_layer_norm = nn.LayerNorm(embed_dim)
276
+
277
+ def forward(
278
+ self,
279
+ hidden_states,
280
+ attention_mask: Optional[torch.Tensor] = None,
281
+ relative_position_embeddings: Optional[torch.Tensor] = None,
282
+ output_attentions: bool = False,
283
+ ):
284
+ hidden_states = hidden_states
285
+
286
+ # 1. Feed-Forward 1 layer
287
+ residual = hidden_states
288
+ hidden_states = self.ffn1_layer_norm(hidden_states)
289
+ hidden_states = self.ffn1(hidden_states)
290
+ hidden_states = hidden_states * 0.5 + residual
291
+ residual = hidden_states
292
+
293
+ # 2. Self-Attention layer
294
+ hidden_states = self.self_attn_layer_norm(hidden_states)
295
+ hidden_states, attn_weigts = self.self_attn(
296
+ hidden_states=hidden_states,
297
+ attention_mask=attention_mask,
298
+ relative_position_embeddings=relative_position_embeddings,
299
+ output_attentions=output_attentions,
300
+ )
301
+ hidden_states = self.self_attn_dropout(hidden_states)
302
+ hidden_states = hidden_states + residual
303
+
304
+ # 3. Convolutional Layer
305
+ residual = hidden_states
306
+ hidden_states = self.conv_module(hidden_states)
307
+ hidden_states = residual + hidden_states
308
+
309
+ # 4. Feed-Forward 2 Layer
310
+ residual = hidden_states
311
+ hidden_states = self.ffn2_layer_norm(hidden_states)
312
+ hidden_states = self.ffn2(hidden_states)
313
+ hidden_states = hidden_states * 0.5 + residual
314
+ hidden_states = self.final_layer_norm(hidden_states)
315
+
316
+ return hidden_states, attn_weigts
317
+
318
+
319
+ class ConformerYMT3Encoder(nn.Module):
320
+
321
+ def __init__(self, config):
322
+ super().__init__()
323
+ self.config = config
324
+
325
+ if config.position_encoding_type == "relative":
326
+ self.embed_positions = Wav2Vec2ConformerRelPositionalEmbedding(config)
327
+ elif config.position_encoding_type == "rotary":
328
+ self.embed_positions = Wav2Vec2ConformerRotaryPositionalEmbedding(config)
329
+ else:
330
+ self.embed_positions = None
331
+
332
+ # self.pos_conv_embed = Wav2Vec2ConformerPositionalConvEmbedding(config)
333
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
334
+ self.dropout = nn.Dropout(config.dropout_rate)
335
+ self.layers = nn.ModuleList([ConformerYMT3EncoderLayer(config) for _ in range(config.num_layers)])
336
+ self.gradient_checkpointing = False
337
+
338
+ def forward(
339
+ self,
340
+ inputs_embeds: torch.FloatTensor, # (B, T, D)
341
+ attention_mask: Optional[torch.FloatTensor] = None,
342
+ output_attentions: Optional[bool] = False,
343
+ output_hidden_states: Optional[bool] = False,
344
+ return_dict: Optional[bool] = True,
345
+ ):
346
+ if output_attentions is None:
347
+ output_attentions = self.config.output_attentions
348
+ if output_hidden_states is None:
349
+ output_hidden_states = self.config.output_hidden_states
350
+ if return_dict is None:
351
+ return_dict = self.config.use_return_dict
352
+ all_hidden_states = () if output_hidden_states else None
353
+ all_self_attentions = () if output_attentions else None
354
+
355
+ # inputs_embeds as hidden_states
356
+ hidden_states = inputs_embeds
357
+
358
+ if attention_mask is not None:
359
+ # make sure padded tokens output 0
360
+ hidden_states[~attention_mask] = 0.0
361
+
362
+ # extend attention_mask
363
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
364
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
365
+ attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1],
366
+ attention_mask.shape[-1])
367
+
368
+ hidden_states = self.dropout(hidden_states)
369
+
370
+ if self.embed_positions is not None:
371
+ relative_position_embeddings = self.embed_positions(hidden_states)
372
+ else:
373
+ relative_position_embeddings = None
374
+
375
+ for i, layer in enumerate(self.layers):
376
+ if output_hidden_states:
377
+ all_hidden_states = all_hidden_states + (hidden_states,)
378
+
379
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
380
+ dropout_probability = torch.rand([])
381
+
382
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
383
+ if not skip_the_layer:
384
+ # under deepspeed zero3 all gpus must run in sync
385
+ if self.gradient_checkpointing and self.training:
386
+ # create gradient checkpointing function
387
+ def create_custom_forward(module):
388
+
389
+ def custom_forward(*inputs):
390
+ return module(*inputs, output_attentions)
391
+
392
+ return custom_forward
393
+
394
+ layer_outputs = torch.utils.checkpoint.checkpoint(
395
+ create_custom_forward(layer),
396
+ hidden_states,
397
+ attention_mask,
398
+ relative_position_embeddings,
399
+ )
400
+ else:
401
+ layer_outputs = layer(
402
+ hidden_states,
403
+ attention_mask=attention_mask,
404
+ relative_position_embeddings=relative_position_embeddings,
405
+ output_attentions=output_attentions,
406
+ )
407
+ hidden_states = layer_outputs[0]
408
+
409
+ if skip_the_layer:
410
+ layer_outputs = (None, None)
411
+
412
+ if output_attentions:
413
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
414
+
415
+ hidden_states = self.layer_norm(hidden_states)
416
+ if output_hidden_states:
417
+ all_hidden_states = all_hidden_states + (hidden_states,)
418
+
419
+ if not return_dict:
420
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
421
+ return BaseModelOutput(
422
+ last_hidden_state=hidden_states,
423
+ hidden_states=all_hidden_states,
424
+ attentions=all_self_attentions,
425
+ )
426
+
427
+
428
+ def test():
429
+ import torch
430
+ from model.conformer_mod import ConformerYMT3Encoder
431
+ from model.conformer_helper import ConformerYMT3Config
432
+ from model.ops import count_parameters
433
+ config = ConformerYMT3Config()
434
+ encoder = ConformerYMT3Encoder(config)
435
+ encoder.eval()
436
+ # num params: 48,468,992 w/ intermediate_size=2048
437
+ # num params: 23,278,592 w/ intermediate_size=512
438
+ x = torch.randn(2, 256, 512) # (B, T, D)
439
+ enc_hs = encoder.forward(inputs_embeds=x)['last_hidden_state'] # (B, T, D)
model/ff_layer.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ff_layer.py
11
+
12
+ This module contains the implementation of the feedforward layers.
13
+
14
+ Supported ff_layer_type:
15
+ 'mlp': Multi-Layer Perceptron
16
+ 'gmlp': Gated Multi-Layer Perceptron, simplified version of Mixtral Expert with num_experts=1 and top_k=1.
17
+ This is not the spatial gating MLP (https://arxiv.org/abs/2105.08050).
18
+ 'moe': Mixtral of Experts, modified from the original source code:
19
+ https://github.com/huggingface/transformers/blob/v4.38.2/src/transformers/models/mixtral/modeling_mixtral.py
20
+
21
+ Usage:
22
+ from model.ff_layer import get_ff_layer
23
+
24
+ config = PerceiverTFConfig() # or any type of PretrainedConfig()
25
+ config.ff_layer_type = 'moe' # or 'mlp'
26
+ config.moe_num_experts = 4
27
+ config.moe_topk = 2
28
+ config.hidden_act = 'gelu' # or any type of activation function, e.g., 'silu'
29
+
30
+ ff_layer = get_ff_layer(config, input_size, widening_factor)
31
+
32
+ What ff_layer returns:
33
+ - It returns (hidden_states, router_logits) for MoE and (hidden_states, None) for MLP.
34
+ - router_logits has the shape of (batch_size * sequence_length, n_experts) for MoE.
35
+
36
+
37
+ """
38
+ from typing import Any, Tuple
39
+ import torch
40
+ import torch.nn as nn
41
+ import torch.nn.functional as F
42
+ from transformers.configuration_utils import PretrainedConfig
43
+ from transformers.activations import ACT2FN
44
+ from model.ops import get_layer_norm
45
+ from model.ops import optional_compiler_disable, optional_compiler_dynamic
46
+
47
+
48
+ class MixtralBlockSparseTop2MLP(nn.Module):
49
+ """
50
+ The Gated Multilayer Perceptron (GMLP) used in Mixtral of Experts (MoE).
51
+
52
+ """
53
+
54
+ def __init__(self, config: PretrainedConfig, input_size: int, widening_factor: int):
55
+ super().__init__()
56
+ self.hidden_dim = input_size
57
+ self.ffn_dim = int(input_size * widening_factor)
58
+
59
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
60
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
61
+ self.gate = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
62
+ self.act_fn = ACT2FN[config.hidden_act]
63
+
64
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
65
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.gate(hidden_states)
66
+ current_hidden_states = self.w2(current_hidden_states)
67
+ return current_hidden_states
68
+
69
+
70
+ class MixtralSparseMoeBlock(nn.Module):
71
+ """
72
+ This implementation is
73
+ strictly equivalent to standard MoE with full capacity (no
74
+ dropped tokens). It's faster since it formulates MoE operations
75
+ in terms of block-sparse operations to accomodate imbalanced
76
+ assignments of tokens to experts, whereas standard MoE either
77
+ (1) drop tokens at the cost of reduced performance or (2) set
78
+ capacity factor to number of experts and thus waste computation
79
+ and memory on padding.
80
+ """
81
+
82
+ def __init__(self, config, input_size: int, widening_factor: int):
83
+ super().__init__()
84
+ self.hidden_dim = input_size
85
+ self.widening_factor = widening_factor
86
+ self.num_experts = config.moe_num_experts
87
+ self.top_k = config.moe_topk
88
+
89
+ # gating
90
+ self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
91
+ self.experts = nn.ModuleList(
92
+ [MixtralBlockSparseTop2MLP(config, self.hidden_dim, self.widening_factor) for _ in range(self.num_experts)])
93
+
94
+ @optional_compiler_disable
95
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
96
+ """ """
97
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
98
+ hidden_states = hidden_states.view(-1, hidden_dim)
99
+ # router_logits: (batch * sequence_length, n_experts)
100
+ router_logits = self.gate(hidden_states)
101
+
102
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
103
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
104
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
105
+ # we cast back to the input dtype
106
+ routing_weights = routing_weights.to(hidden_states.dtype)
107
+
108
+ final_hidden_states = torch.zeros((batch_size * sequence_length, hidden_dim),
109
+ dtype=hidden_states.dtype,
110
+ device=hidden_states.device)
111
+
112
+ # One hot encode the selected experts to create an expert mask
113
+ # this will be used to easily index which expert is going to be sollicitated
114
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
115
+
116
+ # Loop over all available experts in the model and perform the computation on each expert
117
+ for expert_idx in range(self.num_experts):
118
+ expert_layer = self.experts[expert_idx]
119
+ idx, top_x = torch.where(expert_mask[expert_idx])
120
+
121
+ if top_x.shape[0] == 0:
122
+ continue
123
+
124
+ # in torch it is faster to index using lists than torch tensors
125
+ top_x_list = top_x.tolist()
126
+ idx_list = idx.tolist()
127
+
128
+ # Index the correct hidden states and compute the expert hidden state for
129
+ # the current expert. We need to make sure to multiply the output hidden
130
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
131
+ current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)
132
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]
133
+
134
+ # However `index_add_` only support torch tensors for indexing so we'll use
135
+ # the `top_x` tensor here.
136
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
137
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
138
+ return final_hidden_states, router_logits
139
+
140
+
141
+ class MLP(nn.Module):
142
+ """A Standard Transformer-style dense module to follow attention."""
143
+
144
+ def __init__(self, config: PretrainedConfig, input_size: int, widening_factor: int):
145
+ super().__init__()
146
+ self.dense1 = nn.Linear(input_size, widening_factor * input_size)
147
+ self.dense2 = nn.Linear(widening_factor * input_size, input_size)
148
+
149
+ if isinstance(config.hidden_act, str):
150
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
151
+ else:
152
+ self.intermediate_act_fn = config.hidden_act
153
+
154
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, Any]:
155
+ hidden_states = self.dense1(hidden_states)
156
+ hidden_states = self.intermediate_act_fn(hidden_states)
157
+ hidden_states = self.dense2(hidden_states)
158
+ return hidden_states, None
159
+
160
+
161
+ class SimpleGMLP(nn.Module):
162
+ """A Simple Gated Multilayer Perceptron (aka. 'gmlp'), without the spatial gating mechanism.
163
+
164
+ Note that this is not the spatial gating MLP (https://arxiv.org/abs/2105.08050).
165
+ - A simplified MLP w/ gating mechanism adapted from Mixtral Expert, as when
166
+ the number of experts and top_k are both set to 1.)
167
+ - Added a dropout layer.
168
+ - This was also used in T5 v1.1.
169
+ """
170
+
171
+ def __init__(self, config: PretrainedConfig, input_size: int, widening_factor: int):
172
+ super().__init__()
173
+ self.hidden_dim = input_size
174
+ self.ffn_dim = int(input_size * widening_factor)
175
+
176
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
177
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
178
+ self.gate = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
179
+ self.act_fn = ACT2FN[config.hidden_act]
180
+ self.dropout1 = nn.Dropout(config.dropout_rate)
181
+ self.dropout2 = nn.Dropout(config.dropout_rate)
182
+
183
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
184
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.gate(hidden_states)
185
+ current_hidden_states = self.dropout1(current_hidden_states)
186
+ current_hidden_states = self.w2(current_hidden_states)
187
+ current_hidden_states = self.dropout2(
188
+ current_hidden_states) # Residual connection is applied outside of this module.
189
+ return current_hidden_states, None
190
+
191
+
192
+ def get_ff_layer(config: PretrainedConfig, input_size: int, widening_factor: int):
193
+ if config.ff_layer_type == 'moe':
194
+ assert hasattr(config, 'moe_num_experts') and hasattr(config, 'moe_topk') and hasattr(config, 'hidden_act')
195
+ return MixtralSparseMoeBlock(config, input_size, widening_factor)
196
+ elif config.ff_layer_type == 'mlp':
197
+ assert hasattr(config, 'hidden_act')
198
+ return MLP(config, input_size, widening_factor)
199
+ elif config.ff_layer_type == 'gmlp':
200
+ assert hasattr(config, 'hidden_act')
201
+ return SimpleGMLP(config, input_size, widening_factor)
202
+ else:
203
+ raise ValueError(
204
+ f"Unsupported ff_layer_type: {config.ff_layer_type}. Supported types are 'moe', 'mlp' and 'gmlp'.")
205
+
206
+
207
+ def test_get_ff_layer():
208
+ from model.ff_layer import get_ff_layer
209
+ from model.perceiver_helper import PerceiverTFConfig
210
+ input_size = 32
211
+ widening_factor = 1
212
+
213
+ # Test for MoE
214
+ config = PerceiverTFConfig() # or any type of PretrainedConfig()
215
+ config.ff_layer_type = 'moe'
216
+ config.moe_num_experts = 4
217
+ config.moe_topk = 2
218
+ config.hidden_act = 'silu'
219
+
220
+ ff_layer = get_ff_layer(config, input_size, widening_factor)
221
+ x = torch.rand(2, 8, input_size)
222
+ hidden_states, router_logits = ff_layer(x)
223
+ print(hidden_states.shape, router_logits.shape) # (2, 8, 32), (2*8, 4)
224
+
225
+ # Test for MLP
226
+ config.ff_layer_type = 'mlp'
227
+ config.hidden_act = 'gelu'
228
+
229
+ ff_layer = get_ff_layer(config, input_size, widening_factor)
230
+ hidden_states, _ = ff_layer(x)
231
+ print(hidden_states.shape) # (2, 8, 32)
232
+
233
+ # Test for (simple)gMLP
234
+ config.ff_layer_type = 'gmlp'
235
+ config.hidden_act = 'silu'
236
+ ff_layer = get_ff_layer(config, input_size, widening_factor)
237
+ hidden_states, _ = ff_layer(x)
238
+ print(hidden_states.shape) # (2, 8, 32)
model/init_train.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """init_train.py"""
11
+ from typing import Tuple, Literal, Any
12
+ from copy import deepcopy
13
+ import os
14
+ import argparse
15
+ import pytorch_lightning as pl
16
+ from pytorch_lightning.loggers import WandbLogger
17
+ from pytorch_lightning.callbacks import ModelCheckpoint
18
+ from pytorch_lightning.callbacks import LearningRateMonitor
19
+ from pytorch_lightning.utilities import rank_zero_only
20
+ from config.config import shared_cfg as default_shared_cfg
21
+ from config.config import audio_cfg as default_audio_cfg
22
+ from config.config import model_cfg as default_model_cfg
23
+ from config.config import DEEPSPEED_CFG
24
+
25
+
26
+ def initialize_trainer(args: argparse.Namespace,
27
+ stage: Literal['train', 'test'] = 'train') -> Tuple[pl.Trainer, WandbLogger, dict]:
28
+ """Initialize trainer and logger"""
29
+ shared_cfg = deepcopy(default_shared_cfg)
30
+
31
+ # create save dir
32
+ os.makedirs(shared_cfg["WANDB"]["save_dir"], exist_ok=True)
33
+
34
+ # collecting specific checkpoint from exp_id with extension (@xxx where xxx is checkpoint name)
35
+ if "@" in args.exp_id:
36
+ args.exp_id, checkpoint_name = args.exp_id.split("@")
37
+ else:
38
+ checkpoint_name = "last.ckpt"
39
+
40
+ # checkpoint dir
41
+ lightning_dir = os.path.join(shared_cfg["WANDB"]["save_dir"], args.project, args.exp_id)
42
+
43
+ # create logger
44
+ if args.wandb_mode is not None:
45
+ shared_cfg["WANDB"]["mode"] = str(args.wandb_mode)
46
+ if shared_cfg["WANDB"].get("cache_dir", None) is not None:
47
+ os.environ["WANDB_CACHE_DIR"] = shared_cfg["WANDB"].get("cache_dir")
48
+ del shared_cfg["WANDB"]["cache_dir"] # remove cache_dir from shared_cfg
49
+ wandb_logger = WandbLogger(log_model="all",
50
+ project=args.project,
51
+ id=args.exp_id,
52
+ allow_val_change=True,
53
+ **shared_cfg['WANDB'])
54
+
55
+ # check if any checkpoint exists
56
+ last_ckpt_path = os.path.join(lightning_dir, "checkpoints", checkpoint_name)
57
+ if os.path.exists(os.path.join(last_ckpt_path)):
58
+ print(f'Resuming from {last_ckpt_path}')
59
+ elif stage == 'train':
60
+ print(f'No checkpoint found in {last_ckpt_path}. Starting from scratch')
61
+ last_ckpt_path = None
62
+ else:
63
+ raise ValueError(f'No checkpoint found in {last_ckpt_path}. Quit...')
64
+
65
+ # add info
66
+ dir_info = dict(lightning_dir=lightning_dir, last_ckpt_path=last_ckpt_path)
67
+
68
+ # define checkpoint callback
69
+ checkpoint_callback = ModelCheckpoint(**shared_cfg["CHECKPOINT"],)
70
+
71
+ # define lr scheduler monitor callback
72
+ lr_monitor = LearningRateMonitor(logging_interval='step')
73
+
74
+ # deepspeed strategy
75
+ if args.strategy == 'deepspeed':
76
+ strategy = pl.strategies.DeepSpeedStrategy(config=DEEPSPEED_CFG)
77
+
78
+ # validation interval
79
+ if stage == 'train' and args.val_interval is not None:
80
+ shared_cfg["TRAINER"]["check_val_every_n_epoch"] = None
81
+ shared_cfg["TRAINER"]["val_check_interval"] = int(args.val_interval)
82
+
83
+ # define trainer
84
+ sync_batchnorm = False
85
+ if stage == 'train':
86
+ # train batch size
87
+ if args.train_batch_size is not None:
88
+ train_sub_bsz = int(args.train_batch_size[0])
89
+ train_local_bsz = int(args.train_batch_size[1])
90
+ if train_local_bsz % train_sub_bsz == 0:
91
+ shared_cfg["BSZ"]["train_sub"] = train_sub_bsz
92
+ shared_cfg["BSZ"]["train_local"] = train_local_bsz
93
+ else:
94
+ raise ValueError(
95
+ f'Local batch size {train_local_bsz} must be divisible by sub batch size {train_sub_bsz}')
96
+
97
+ # ddp strategy
98
+ if args.strategy == 'ddp':
99
+ args.strategy = 'ddp_find_unused_parameters_true' # fix for conformer or pitchshifter having unused parameter issue
100
+
101
+ # sync-batchnorm
102
+ if args.sync_batchnorm is True:
103
+ sync_batchnorm = True
104
+
105
+ train_params = dict(**shared_cfg["TRAINER"],
106
+ devices=args.num_gpus if args.num_gpus == 'auto' else int(args.num_gpus),
107
+ num_nodes=int(args.num_nodes),
108
+ strategy=strategy if args.strategy == 'deepspeed' else args.strategy,
109
+ precision=args.precision,
110
+ max_epochs=args.max_epochs if stage == 'train' else None,
111
+ max_steps=args.max_steps if stage == 'train' else -1,
112
+ logger=wandb_logger,
113
+ callbacks=[checkpoint_callback, lr_monitor],
114
+ sync_batchnorm=sync_batchnorm)
115
+ trainer = pl.trainer.trainer.Trainer(**train_params)
116
+
117
+ # Update wandb logger (for DDP)
118
+ if trainer.global_rank == 0:
119
+ wandb_logger.experiment.config.update(args, allow_val_change=True)
120
+
121
+ return trainer, wandb_logger, dir_info, shared_cfg
122
+
123
+
124
+ def update_config(args, shared_cfg, stage: Literal['train', 'test'] = 'train'):
125
+ """Update audio/model/shared configurations with args"""
126
+ audio_cfg = default_audio_cfg
127
+ model_cfg = default_model_cfg
128
+
129
+ # Only update config when training
130
+ if stage == 'train':
131
+ # Augmentation parameters
132
+ if args.random_amp_range is not None:
133
+ shared_cfg["AUGMENTATION"]["train_random_amp_range"] = list(
134
+ (float(args.random_amp_range[0]), float(args.random_amp_range[1])))
135
+ if args.stem_iaug_prob is not None:
136
+ shared_cfg["AUGMENTATION"]["train_stem_iaug_prob"] = float(args.stem_iaug_prob)
137
+
138
+ if args.xaug_max_k is not None:
139
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["max_k"] = int(args.xaug_max_k)
140
+ if args.xaug_tau is not None:
141
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["tau"] = float(args.xaug_tau)
142
+ if args.xaug_alpha is not None:
143
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["alpha"] = float(args.xaug_alpha)
144
+ if args.xaug_no_instr_overlap is not None:
145
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["no_instr_overlap"] = bool(args.xaug_no_instr_overlap)
146
+ if args.xaug_no_drum_overlap is not None:
147
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["no_drum_overlap"] = bool(args.xaug_no_drum_overlap)
148
+ if args.uhat_intra_stem_augment is not None:
149
+ shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]["uhat_intra_stem_augment"] = bool(
150
+ args.uhat_intra_stem_augment)
151
+
152
+ if args.pitch_shift_range is not None:
153
+ if args.pitch_shift_range in [["0", "0"], [0, 0]]:
154
+ shared_cfg["AUGMENTATION"]["train_pitch_shift_range"] = None
155
+ else:
156
+ shared_cfg["AUGMENTATION"]["train_pitch_shift_range"] = list(
157
+ (int(args.pitch_shift_range[0]), int(args.pitch_shift_range[1])))
158
+
159
+ train_stem_iaug_prob = shared_cfg["AUGMENTATION"]["train_stem_iaug_prob"]
160
+ random_amp_range = shared_cfg["AUGMENTATION"]["train_random_amp_range"]
161
+ train_stem_xaug_policy = shared_cfg["AUGMENTATION"]["train_stem_xaug_policy"]
162
+ print(f'Random amp range: {random_amp_range}\n' +
163
+ f'Intra-stem augmentation probability: {train_stem_iaug_prob}\n' +
164
+ f'Stem augmentation policy: {train_stem_xaug_policy}\n' +
165
+ f'Pitch shift range: {shared_cfg["AUGMENTATION"]["train_pitch_shift_range"]}\n')
166
+
167
+ # Update audio config
168
+ if args.audio_codec != None:
169
+ assert args.audio_codec in ['spec', 'melspec']
170
+ audio_cfg["codec"] = str(args.audio_codec)
171
+ if args.hop_length != None:
172
+ audio_cfg["hop_length"] = int(args.hop_length)
173
+ if args.n_mels != None:
174
+ audio_cfg["n_mels"] = int(args.n_mels)
175
+ if args.input_frames != None:
176
+ audio_cfg["input_frames"] = int(args.input_frames)
177
+
178
+ # Update shared config
179
+ if shared_cfg["TOKENIZER"]["max_shift_steps"] == "auto":
180
+ shift_steps_ms = shared_cfg["TOKENIZER"]["shift_step_ms"]
181
+ input_frames = audio_cfg["input_frames"]
182
+ fs = audio_cfg["sample_rate"]
183
+ max_shift_steps = (input_frames / fs) // (shift_steps_ms / 1000) + 2 # 206 by default
184
+ shared_cfg["TOKENIZER"]["max_shift_steps"] = int(max_shift_steps)
185
+
186
+ # Update model config
187
+ if args.encoder_type != None:
188
+ model_cfg["encoder_type"] = str(args.encoder_type)
189
+ if args.decoder_type != None:
190
+ model_cfg["decoder_type"] = str(args.decoder_type)
191
+ if args.pre_encoder_type != "default":
192
+ model_cfg["pre_encoder_type"] = str(args.pre_encoder_type)
193
+ if args.pre_decoder_type != 'default':
194
+ model_cfg["pre_decoder_type"] = str(args.pre_decoder_type)
195
+ if args.conv_out_channels != None:
196
+ model_cfg["conv_out_channels"] = int(args.conv_out_channels)
197
+ assert isinstance(args.task_cond_decoder, bool) and isinstance(args.task_cond_encoder, bool)
198
+ model_cfg["use_task_conditional_encoder"] = args.task_cond_encoder
199
+ model_cfg["use_task_conditional_decoder"] = args.task_cond_decoder
200
+
201
+ if args.encoder_position_encoding_type != 'default':
202
+ if args.encoder_position_encoding_type in ['None', 'none', '0']:
203
+ model_cfg["encoder"][model_cfg["encoder_type"]]["position_encoding_type"] = None
204
+ elif args.encoder_position_encoding_type in [
205
+ 'sinusoidal', 'rope', 'trainable', 'alibi', 'alibit', 'tkd', 'td', 'tk', 'kdt'
206
+ ]:
207
+ model_cfg["encoder"][model_cfg["encoder_type"]]["position_encoding_type"] = str(
208
+ args.encoder_position_encoding_type)
209
+ else:
210
+ raise ValueError(f'Encoder PE type {args.encoder_position_encoding_type} not supported')
211
+ if args.decoder_position_encoding_type != 'default':
212
+ if args.decoder_position_encoding_type in ['None', 'none', '0']:
213
+ raise ValueError('Decoder PE type cannot be None')
214
+ elif args.decoder_position_encoding_type in ['sinusoidal', 'trainable']:
215
+ model_cfg["decoder"][model_cfg["decoder_type"]]["position_encoding_type"] = str(
216
+ args.decoder_position_encoding_type)
217
+ else:
218
+ raise ValueError(f'Decoder PE {args.decoder_position_encoding_type} not supported')
219
+
220
+ if args.tie_word_embedding is not None:
221
+ model_cfg["tie_word_embedding"] = bool(args.tie_word_embedding)
222
+
223
+ if args.d_feat != None:
224
+ model_cfg["d_feat"] = int(args.d_feat)
225
+ if args.d_latent != None:
226
+ model_cfg['encoder']['perceiver-tf']["d_latent"] = int(args.d_latent)
227
+ if args.num_latents != None:
228
+ model_cfg['encoder']['perceiver-tf']['num_latents'] = int(args.num_latents)
229
+ if args.perceiver_tf_d_model != None:
230
+ model_cfg['encoder']['perceiver-tf']['d_model'] = int(args.perceiver_tf_d_model)
231
+ if args.num_perceiver_tf_blocks != None:
232
+ model_cfg["encoder"]["perceiver-tf"]["num_blocks"] = int(args.num_perceiver_tf_blocks)
233
+ if args.num_perceiver_tf_local_transformers_per_block != None:
234
+ model_cfg["encoder"]["perceiver-tf"]["num_local_transformers_per_block"] = int(
235
+ args.num_perceiver_tf_local_transformers_per_block)
236
+ if args.num_perceiver_tf_temporal_transformers_per_block != None:
237
+ model_cfg["encoder"]["perceiver-tf"]["num_temporal_transformers_per_block"] = int(
238
+ args.num_perceiver_tf_temporal_transformers_per_block)
239
+ if args.attention_to_channel != None:
240
+ model_cfg["encoder"]["perceiver-tf"]["attention_to_channel"] = bool(args.attention_to_channel)
241
+ if args.sca_use_query_residual != None:
242
+ model_cfg["encoder"]["perceiver-tf"]["sca_use_query_residual"] = bool(args.sca_use_query_residual)
243
+ if args.layer_norm_type != None:
244
+ model_cfg["encoder"]["perceiver-tf"]["layer_norm"] = str(args.layer_norm_type)
245
+ if args.ff_layer_type != None:
246
+ model_cfg["encoder"]["perceiver-tf"]["ff_layer_type"] = str(args.ff_layer_type)
247
+ if args.ff_widening_factor != None:
248
+ model_cfg["encoder"]["perceiver-tf"]["ff_widening_factor"] = int(args.ff_widening_factor)
249
+ if args.moe_num_experts != None:
250
+ model_cfg["encoder"]["perceiver-tf"]["moe_num_experts"] = int(args.moe_num_experts)
251
+ if args.moe_topk != None:
252
+ model_cfg["encoder"]["perceiver-tf"]["moe_topk"] = int(args.moe_topk)
253
+ if args.hidden_act != None:
254
+ model_cfg["encoder"]["perceiver-tf"]["hidden_act"] = str(args.hidden_act)
255
+ if args.rotary_type != None:
256
+ assert len(
257
+ args.rotary_type
258
+ ) == 3, "rotary_type must be a 3-letter string (e.g. 'ppl': 'pixel' for SCA, 'pixel' for latent, 'lang' for temporal transformer)"
259
+ model_cfg["encoder"]["perceiver-tf"]["rotary_type_sca"] = str(args.rotary_type)[0]
260
+ model_cfg["encoder"]["perceiver-tf"]["rotary_type_latent"] = str(args.rotary_type)[1]
261
+ model_cfg["encoder"]["perceiver-tf"]["rotary_type_temporal"] = str(args.rotary_type)[2]
262
+ if args.rope_apply_to_keys != None:
263
+ model_cfg["encoder"]["perceiver-tf"]["rope_apply_to_keys"] = bool(args.rope_apply_to_keys)
264
+ if args.rope_partial_pe != None:
265
+ model_cfg["encoder"]["perceiver-tf"]["rope_partial_pe"] = bool(args.rope_partial_pe)
266
+
267
+ if args.decoder_ff_layer_type != None:
268
+ model_cfg["decoder"][model_cfg["decoder_type"]]["ff_layer_type"] = str(args.decoder_ff_layer_type)
269
+ if args.decoder_ff_widening_factor != None:
270
+ model_cfg["decoder"][model_cfg["decoder_type"]]["ff_widening_factor"] = int(args.decoder_ff_widening_factor)
271
+
272
+ if args.event_length != None:
273
+ model_cfg["event_length"] = int(args.event_length)
274
+
275
+ if stage == 'train':
276
+ if args.encoder_dropout_rate != None:
277
+ model_cfg["encoder"][model_cfg["encoder_type"]]["dropout_rate"] = float(args.encoder_dropout_rate)
278
+ if args.decoder_dropout_rate != None:
279
+ model_cfg["decoder"][model_cfg["decoder_type"]]["dropout_rate"] = float(args.decoder_dropout_rate)
280
+
281
+ return shared_cfg, audio_cfg, model_cfg # return updated configs
model/lm_head.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """lm_head.py"""
11
+ import torch
12
+ from torch import nn
13
+ from typing import Optional, Dict
14
+
15
+
16
+ class LMHead(nn.Module):
17
+ """Language Model Head with tied weights."""
18
+
19
+ def __init__(self, decoder_config: Dict, init_factor: float = 1.0, tie_word_embeddings: bool = True):
20
+
21
+ super().__init__()
22
+ self.d_model = decoder_config["d_model"]
23
+ self.init_factor = init_factor
24
+ self.tie_word_embeddings = tie_word_embeddings
25
+
26
+ self.lm_head = nn.Linear(decoder_config["d_model"], decoder_config["vocab_size"], bias=False)
27
+ self._init_weights()
28
+
29
+ def _init_weights(self):
30
+ if self.tie_word_embeddings is False:
31
+ self.lm_head.weight.data.normal_(mean=0.0, std=self.init_factor * 1.0)
32
+
33
+ def forward(self, decoder_hs: torch.FloatTensor) -> torch.FloatTensor:
34
+ if self.tie_word_embeddings is True:
35
+ # Rescale output before projecting on vocab
36
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
37
+ decoder_hs = decoder_hs * (self.d_model**-0.5)
38
+
39
+ lm_logits = self.lm_head(decoder_hs)
40
+ return lm_logits
model/ops.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ op.py """
11
+ import math
12
+ from packaging.version import parse as VersionParse
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ from einops import rearrange
18
+ from transformers.models.t5.modeling_t5 import T5LayerNorm as RMSNorm
19
+
20
+
21
+ def get_layer_norm(dim: int, layer_norm_type: str = "layer_norm", layer_norm_eps: float = 1e-5):
22
+ """Get layer normalization layer.
23
+ Args:
24
+ dim (int): Feature dimension
25
+ layer_norm_type (str): "layer_norm" or "rms_norm"
26
+ layer_norm_eps (float): Epsilon value for numerical stability
27
+
28
+ Returns:
29
+ nn.Module: Layer normalization layer
30
+ """
31
+ if layer_norm_type == "rms_norm":
32
+ # T5LayerNorm is equivalent to RMSNorm. https://arxiv.org/abs/1910.07467
33
+ return RMSNorm(hidden_size=dim, eps=layer_norm_eps)
34
+ else:
35
+ return nn.LayerNorm(normalized_shape=dim, eps=layer_norm_eps)
36
+
37
+
38
+ def check_all_elements_equal(x: torch.Tensor) -> bool:
39
+ return x.eq(x[0]).all().item()
40
+
41
+
42
+ def minmax_normalize(x: torch.Tensor, eps: float = 0.008) -> torch.FloatTensor:
43
+ """Min-max normalization:
44
+
45
+ x_norm = (x - x_min) / (x_max - x_min + eps)
46
+
47
+ Args:
48
+ x (torch.Tensor): (B, T, F)
49
+ Returns:
50
+ torch.Tensor: (B, T, F) with output range of [0, 1]
51
+ """
52
+ x_max = rearrange(x, "b t f -> b (t f)").max(1, keepdim=True)[0]
53
+ x_min = rearrange(x, "b t f -> b (f t)").min(1, keepdim=True)[0]
54
+ x_max = x_max[:, None, :] # (B,1,1)
55
+ x_min = x_min[:, None, :] # (B,1,1)
56
+ return (x - x_min) / (x_max - x_min + eps)
57
+
58
+
59
+ def count_parameters(model):
60
+ num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
61
+ num_params = sum(p.numel() for p in model.parameters())
62
+ return num_trainable_params, num_params
63
+
64
+
65
+ def adjust_b_to_gcd(a, b, min_gcd=16):
66
+ """
67
+ Adjust the value of b to ensure the GCD(a, b) is at least min_gcd with minimum change to b.
68
+
69
+ Parameters:
70
+ - a (int): A positive integer
71
+ - b (int): A positive integer
72
+ - min_gcd (int): The minimum desired GCD
73
+
74
+ Returns:
75
+ - int: The adjusted value of b
76
+ """
77
+ current_gcd = math.gcd(a, b)
78
+
79
+ # If current GCD is already greater than or equal to min_gcd, return b as it is.
80
+ if current_gcd >= min_gcd:
81
+ return b
82
+
83
+ # If a is less than min_gcd, then it's impossible to get a GCD of at least min_gcd.
84
+ if a < min_gcd:
85
+ raise ValueError("a must be at least as large as min_gcd.")
86
+
87
+ # Adjust b by trying increments and decrements, preferring the smallest absolute change.
88
+ adjusted_b_up = b
89
+ adjusted_b_down = b
90
+
91
+ while True:
92
+ adjusted_b_up += 1
93
+ adjusted_b_down -= 1
94
+
95
+ if math.gcd(a, adjusted_b_up) >= min_gcd:
96
+ return adjusted_b_up
97
+ elif math.gcd(a, adjusted_b_down) >= min_gcd:
98
+ return adjusted_b_down
99
+
100
+
101
+ def optional_compiler_disable(func):
102
+ if VersionParse(torch.__version__) >= VersionParse("2.1"):
103
+ # If the version is 2.1 or higher, apply the torch.compiler.disable decorator.
104
+ return torch.compiler.disable(func)
105
+ else:
106
+ # If the version is below 2.1, return the original function.
107
+ return func
108
+
109
+
110
+ def optional_compiler_dynamic(func):
111
+ return torch.compile(func, dynamic=True)
model/perceiver_helper.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ from dataclasses import dataclass
11
+ from typing import Optional, Tuple
12
+ import torch
13
+ from torch import nn
14
+ from transformers.utils import ModelOutput
15
+ from transformers.configuration_utils import PretrainedConfig
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ # from transformers.models.perceiver.modeling_perceiver import (PerceiverAbstractPositionEncoding,
18
+ # PerceiverTrainablePositionEncoding,
19
+ # PerceiverFourierPositionEncoding)
20
+
21
+
22
+ class PerceiverTFConfig(PretrainedConfig):
23
+ r"""
24
+ This is the configuration class to store the configuration of a [`PerceiverTF`]. It is used to instantiate an
25
+ Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
26
+ configuration with the defaults will yield a similar configuration to that of the Perceiver
27
+ [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
28
+
29
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
30
+ documentation from [`PretrainedConfig`] for more information.
31
+
32
+ Args:
33
+ num_latents (`int`, *optional*, defaults to 256):
34
+ The number of latents.
35
+ d_latents (`int`, *optional*, defaults to 1280):
36
+ Dimension of the latent embeddings.
37
+ d_model (`int`, *optional*, defaults to 768):
38
+ Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
39
+ preprocessor is provided.
40
+ kv_dim (`int`, *optional*, defaults to 128):
41
+ num_blocks (`int`, *optional*, defaults to 1):
42
+ Number of blocks in the Transformer encoder.
43
+ num_self_attention_heads (`int`, *optional*, defaults to 8):
44
+ Number of attention heads for each self-attention layer in the Transformer encoder.
45
+ num_cross_attention_heads (`int`, *optional*, defaults to 8):
46
+ Number of attention heads for each cross-attention layer in the Transformer encoder.
47
+ num_local_transformers_per_block (`int`, *optional*, defaults to 2):
48
+ Number of local Transformer layers per Transformer block in the Transformer encoder.
49
+ num_temporal_transformers_per_block (`int`, *optional*, defaults to 2):
50
+ Number of temporal Transformer layers per Transformer block in the Transformer encoder.
51
+ shared_parallel_temporal_transformers (`bool`, *optional*, defaults to `False`):
52
+ Whether to share the parameters across the K parallel temporal Transformers in each block.
53
+ qk_channels (`int`, *optional*):
54
+ Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
55
+ layers of the encoder. Will default to preserving the dimension of the queries if not specified.
56
+ v_channels (`int`, *optional*):
57
+ Dimension to project the values before applying attention in the cross-attention and self-attention layers
58
+ of the encoder. Will default to preserving the dimension of the queries if not specified.
59
+ ** DEPRECATED ** cross_attention_shape_for_attention (`str`, *optional*, defaults to `'kv'`):
60
+ Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
61
+ ** DEPRECATED ** self_attention_widening_factor (`int`, *optional*, defaults to 1):
62
+ Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
63
+ cross_attention_widening_factor (`int`, *optional*, defaults to 1):
64
+ Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
65
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
66
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
67
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
68
+ dropout_rate (`float`, *optional*, defaults to 0.1):
69
+ The dropout ratio for the attention probabilities.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ layer_norm_type (`str`, *optional*, defaults to `'layer_norm'`):
73
+ The type of layer normalization to use. Can be one of {'layer_norm', 'rms_norm'}.
74
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
75
+ The epsilon used by the layer normalization layers.
76
+ sca_use_query_residual (`bool`, *optional*, defaults to `True`):
77
+ Whether to add a query residual in the spectral cross attention (SCA) layer of the encoder.
78
+ use_query_residual (`float`, *optional*, defaults to `True`):
79
+ Whether to add a query residual in the cross-attention layer of the encoder.
80
+ position_encoding_type (`str`, *optional*, defaults to `'trainable'`):
81
+ Type of position encoding to use. Can be one of {'trainable', 'alibi', 'alibit', 'rope', None}.
82
+ num_max_positions (`int`, *optional*, defaults to 331):
83
+ Maximum number of positions to use for the position encoding.
84
+ vocab_size (`int`, *optional*, defaults to 262):
85
+ Vocabulary size for the masked language modeling model.
86
+ attention_to_channel (`bool`, defaults to `False`):
87
+ Whether SCA should attend to the channel dimension. If False, attention to frequency bin dimension.
88
+ ff_layer_type (`str`, *optional*, defaults to `'mlp'`):
89
+ Type of feed-forward layer to use. Can be one of {'mlp', 'moe'}.
90
+ ff_widening_factor (`int`, *optional*, defaults to 1):
91
+ Widening factor for the feed-forward layers in the MLP/MoE.
92
+ moe_num_experts (`int`, *optional*, defaults to 4):
93
+ Number of experts to use in the mixture of experts (MoE) feed-forward layer.
94
+ Only used if `ff_layer_type` is set to `'moe'`.
95
+ moe_topk (`int`, *optional*, defaults to 2):
96
+ Number of top experts to use in the mixture of experts (MoE) feed-forward layer.
97
+ Only used if `ff_layer_type` is set to `'moe'`.
98
+ rope_type_sca (`str`, *optional*, defaults to `pixel`): Can be one of {'l'|lang', 'p'|'pixel', None}.
99
+ RoPE index type for SCA. Only used if `position_encoding_type` is set to `rope`.
100
+ rope_type_latent (`str`, *optional*, defaults to `pixel`): Can be one of {'l'|'lang', 'p'|'pixel', None}.
101
+ RoPE index type for Latent Transformer. Only used if `position_encoding_type` is set to `'rope'`.
102
+ rope_type_temporal (`str`, *optional*, defaults to `lang`): Can be one of {'l'|'lang', 'p'|'pixel', None}.
103
+ RoPE index type for Temporal Transformer. Only used if `position_encoding_type` is set to `'rope'`.
104
+ rope_apply_to_keys (`bool`, *optional*, defaults to `False`): Whether to apply RoPE to the keys in the
105
+ self/cross-attention layers. Only used if `position_encoding_type` is set to `'rope'`.
106
+ rope_partial_pe (`bool`, *optional*, defaults to `False`): Whether to use partial RoPE in the self/cross-attention.
107
+ Only used if `position_encoding_type` is set to `'rope'`.
108
+ rope_trainable (`bool`, *optional*, defaults to `False`): Whether to make the RoPE trainable. Only used if
109
+
110
+ Example:
111
+
112
+ ```python
113
+ >>> from model.perceiver_mod import PerceiverTFEncodel, PerceiverTFConfig
114
+
115
+ >>> # Initializing a Perceiver deepmind/language-perceiver style configuration
116
+ >>> configuration = PerceiverTFConfig()
117
+
118
+ >>> # Initializing a model from the deepmind/language-perceiver style configuration
119
+ >>> model = PerceiverTFEncoder(configuration)
120
+
121
+ >>> # Accessing the model configuration
122
+ >>> configuration = model.config
123
+ ```"""
124
+ model_type = "perceivertf"
125
+
126
+ def __init__(
127
+ self,
128
+ num_latents=24,
129
+ d_latents=128,
130
+ d_model=128,
131
+ kv_dim=128,
132
+ num_blocks=3,
133
+ num_self_attention_heads=8,
134
+ num_cross_attention_heads=8,
135
+ num_local_transformers_per_block=2,
136
+ num_temporal_transformers_per_block=2,
137
+ qk_channels=128,
138
+ v_channels=128,
139
+ cross_attention_shape_for_attention="q",
140
+ # self_attention_widening_factor=1, ** DEPRECATED **
141
+ # cross_attention_widening_factor=1, ** DEPRECATED **
142
+ hidden_act="gelu",
143
+ dropout_rate=0.1,
144
+ initializer_range=0.02,
145
+ layer_norm_type="layer_norm",
146
+ layer_norm_eps=1e-5,
147
+ sca_use_query_residual=True,
148
+ use_query_residual=True,
149
+ position_encoding_type="trainable",
150
+ num_max_positions=330,
151
+ vocab_size=1391,
152
+ attention_to_channel=False,
153
+ ff_layer_type="mlp",
154
+ ff_widening_factor=1,
155
+ moe_num_experts=4,
156
+ moe_topk=2,
157
+ rope_type_sca="pixel",
158
+ rope_type_latent="pixel",
159
+ rope_type_temporal="lang",
160
+ rope_apply_to_keys=False,
161
+ rope_partial_pe=False,
162
+ rope_trainable=False,
163
+ **kwargs,
164
+ ):
165
+ super().__init__(**kwargs)
166
+
167
+ self.num_latents = num_latents
168
+ self.d_latents = d_latents
169
+ self.d_model = d_model
170
+ self.kv_dim = kv_dim
171
+ self.qk_channels = qk_channels
172
+ self.v_channels = v_channels
173
+
174
+ self.num_blocks = num_blocks
175
+ self.num_self_attention_heads = num_self_attention_heads
176
+ self.num_cross_attention_heads = num_cross_attention_heads
177
+ self.num_local_transformers_per_block = num_local_transformers_per_block
178
+ self.num_temporal_transformers_per_block = num_temporal_transformers_per_block
179
+ self.sca_use_query_residual = sca_use_query_residual
180
+ self.use_query_residual = use_query_residual
181
+ self.position_encoding_type = position_encoding_type
182
+ self.num_max_positions = num_max_positions
183
+ # self.self_attention_widening_factor = self_attention_widening_factor
184
+ # self.cross_attention_widening_factor = cross_attention_widening_factor
185
+ self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
186
+ self.attention_to_channel = attention_to_channel
187
+ self.ff_layer_type = ff_layer_type
188
+ self.ff_widening_factor = ff_widening_factor
189
+ self.moe_num_experts = moe_num_experts
190
+ self.moe_topk = moe_topk
191
+ self.rope_type_sca = rope_type_sca
192
+ self.rope_type_latent = rope_type_latent
193
+ self.rope_type_temporal = rope_type_temporal
194
+ self.rope_apply_to_keys = rope_apply_to_keys
195
+ self.rope_partial_pe = rope_partial_pe
196
+ self.rope_trainable = rope_trainable
197
+
198
+ self.hidden_act = hidden_act
199
+ self.dropout_rate = dropout_rate
200
+ self.initializer_range = initializer_range
201
+ self.layer_norm_type = layer_norm_type
202
+ self.layer_norm_eps = layer_norm_eps
203
+
204
+ # masked language modeling attributes
205
+ self.vocab_size = vocab_size
206
+
207
+
208
+ class PerceiverTFPreTrainedModel(PreTrainedModel):
209
+ """
210
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
211
+ models.
212
+ """
213
+
214
+ config_class = PerceiverTFConfig
215
+ base_model_prefix = "perceivertf"
216
+ main_input_name = "inputs"
217
+
218
+ def _init_weights(self, module):
219
+ """Initialize the weights"""
220
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
221
+ # Slightly different from the TF version which uses truncated_normal for initialization
222
+ # cf https://github.com/pytorch/pytorch/pull/5617
223
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
224
+ if module.bias is not None:
225
+ module.bias.data.zero_()
226
+ elif hasattr(module, "latents"):
227
+ module.latents.data.normal_(mean=0.0, std=self.config.initializer_range)
228
+ elif hasattr(module, "_pos_emb") and isinstance(module._pos_emb, nn.Parameter):
229
+ # initialize PerceiverTFTrainablePE
230
+ module._pos_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
231
+ elif hasattr(module, "_pos_emb_temporal"):
232
+ # initialize PerceiverTFTrainablePE
233
+ module._pos_emb_temporal.data.normal_(mean=0.0, std=self.config.initializer_range)
234
+ elif hasattr(module, "slopes") and isinstance(module.slopes, nn.Parameter):
235
+ # initialize AlibiPositionalBias
236
+ module.reset_parameters()
237
+ elif isinstance(module, nn.ParameterDict):
238
+ for modality in module.keys():
239
+ module[modality].data.normal_(mean=0.0, std=self.config.initializer_range)
240
+ elif isinstance(module, nn.Embedding):
241
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
242
+ if module.padding_idx is not None:
243
+ module.weight.data[module.padding_idx].zero_()
244
+ elif isinstance(module, nn.LayerNorm):
245
+ module.bias.data.zero_()
246
+ module.weight.data.fill_(1.0)
247
+ # elif hasattr(module, "position_embeddings") and isinstance(
248
+ # module, PerceiverTrainablePositionEncoding):
249
+ # module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range)
250
+
251
+
252
+ # Replace the 'ModelOutputWithCrossAttentions' with 'MoEModelOutputWithCrossAttentions' for MoE
253
+ @dataclass
254
+ class MoEModelOutputWithCrossAttentions(ModelOutput):
255
+ """
256
+ Base class for model's outputs, with potential hidden states and attentions.
257
+ Plus, router_probs for Mixture of Experts models.
258
+
259
+ Args:
260
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
261
+ Sequence of hidden-states at the output of the last layer of the model.
262
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
263
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
264
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
265
+
266
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
267
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
268
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
269
+ sequence_length)`.
270
+
271
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
272
+ heads.
273
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
274
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
275
+ sequence_length)`.
276
+
277
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
278
+ weighted average in the cross-attention heads.
279
+ router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`):
280
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
281
+
282
+ Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary
283
+ loss and the z_loss for Mixture of Experts models.
284
+ """
285
+
286
+ last_hidden_state: torch.FloatTensor = None
287
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
288
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
289
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
290
+ router_logits: Optional[Tuple[torch.FloatTensor]] = None
model/perceiver_mod.py ADDED
@@ -0,0 +1,912 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """perceiver_mod.py
11
+
12
+ Implementation of the PerceiverTF encoder with:
13
+ - AliBi positional bias
14
+ - Mixtral of Experts (MoE) feedforward layer
15
+
16
+ """
17
+ import math
18
+ from einops import rearrange
19
+ from typing import Optional, Tuple, Union, List, Dict, Literal
20
+
21
+ import torch
22
+ from torch import nn
23
+ from transformers.models.perceiver.modeling_perceiver import PerceiverSelfOutput
24
+ from transformers.pytorch_utils import (apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer)
25
+ from model.perceiver_helper import MoEModelOutputWithCrossAttentions
26
+ from model.perceiver_helper import PerceiverTFPreTrainedModel, PerceiverTFConfig
27
+ from model.positional_encoding import AlibiPositionalBias, get_rotary_emb
28
+ from model.ops import get_layer_norm
29
+ from model.ff_layer import get_ff_layer
30
+
31
+
32
+ class PerceiverEmbeddings(nn.Module):
33
+ """Construct the latent embeddings sharable with token embeddings in the decoder."""
34
+
35
+ def __init__(self, config, shared_emb: Optional[nn.Parameter] = None):
36
+ super().__init__()
37
+ if shared_emb is not None:
38
+ self.latents = shared_emb
39
+ assert self.latents.shape == (config.num_latents, config.d_latents)
40
+ else:
41
+ self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
42
+
43
+ def forward(self, batch_size: int):
44
+ return self.latents.expand(batch_size, -1, -1)
45
+
46
+
47
+ class PerceiverTFTrainablePE(nn.Module):
48
+ """Construct the trainable absolute positional embeddings."""
49
+
50
+ def __init__(self, position_encoding_type: Literal['trainable', 'tkd', 'td', 'tk', 'kdt'], max_t: int, k: int,
51
+ d: int) -> None:
52
+ super().__init__()
53
+ self.position_encoding_type = position_encoding_type
54
+ self.max_t = max_t
55
+ self.k = k
56
+ self.d = d
57
+
58
+ if position_encoding_type in ['trainable', 'tkd']:
59
+ self._pos_emb = nn.Parameter(torch.randn(max_t, k, d))
60
+ elif position_encoding_type == 'td':
61
+ self._pos_emb = nn.Parameter(torch.randn(max_t, d))
62
+ elif position_encoding_type == 'tk':
63
+ self._pos_emb = nn.Parameter(torch.randn(max_t, k))
64
+ elif position_encoding_type == 'kdt':
65
+ self._pos_emb = nn.Parameter(torch.randn(k, d))
66
+ self._pos_emb_temporal = nn.Parameter(torch.randn(max_t, d))
67
+ else:
68
+ raise ValueError(f'unknown position encoding type {position_encoding_type}')
69
+
70
+ def forward(self):
71
+ pos_emb_temporal = None
72
+
73
+ if self.position_encoding_type in ['trainable', 'tkd']:
74
+ pos_emb = self._pos_emb
75
+ elif self.position_encoding_type == 'td':
76
+ pos_emb = self._pos_emb.unsqueeze(1).expand(-1, self.k, -1)
77
+ elif self.position_encoding_type == 'tk':
78
+ pos_emb = self._pos_emb.unsqueeze(-1).expand(-1, -1, self.d)
79
+ elif self.position_encoding_type == 'kdt':
80
+ pos_emb = self._pos_emb.unsqueeze(0).expand(self.max_t, -1, -1)
81
+ pos_emb_temporal = self._pos_emb_temporal
82
+
83
+ return pos_emb, pos_emb_temporal
84
+
85
+
86
+ class PerceiverAlibiSelfAttention(nn.Module):
87
+ """
88
+ Multi-headed {cross, self}-attention + Alibi/Rotary positional bias/emb:
89
+ - Can be used both in the encoder as well as in the decoder.
90
+ - Modified from PerceiverSelfAttention in modeling_perceiver.py to support Alibi positional bias
91
+
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ config,
97
+ is_cross_attention=False,
98
+ qk_channels=None,
99
+ v_channels=None,
100
+ num_heads=1,
101
+ q_dim=None,
102
+ kv_dim=None,
103
+ rotary_emb=None,
104
+ ):
105
+ super().__init__()
106
+ self.num_heads = num_heads
107
+ # Q and K must have the same number of channels.
108
+ # Default to preserving Q's input's shape.
109
+ if qk_channels is None:
110
+ qk_channels = q_dim
111
+ # V's num_channels determines the shape of the output of QKV-attention.
112
+ # Default to the same number of channels used in the key-query operation.
113
+ if v_channels is None:
114
+ v_channels = qk_channels
115
+ if qk_channels % num_heads != 0:
116
+ raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).")
117
+ if v_channels % num_heads != 0:
118
+ raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).")
119
+
120
+ self.qk_channels = qk_channels
121
+ self.v_channels = v_channels
122
+ self.qk_channels_per_head = self.qk_channels // num_heads
123
+ self.v_channels_per_head = self.v_channels // num_heads
124
+
125
+ # Layer normalization
126
+ self.layernorm1 = get_layer_norm(q_dim, config.layer_norm_type, config.layer_norm_eps)
127
+ if is_cross_attention:
128
+ self.layernorm2 = get_layer_norm(kv_dim, config.layer_norm_type, config.layer_norm_eps)
129
+ else:
130
+ self.layernorm2 = nn.Identity()
131
+ # self.layernorm1 = nn.LayerNorm(q_dim)
132
+ # self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
133
+
134
+ # Projection matrices
135
+ self.query = nn.Linear(q_dim, qk_channels)
136
+ self.key = nn.Linear(kv_dim, qk_channels)
137
+ self.value = nn.Linear(kv_dim, v_channels)
138
+
139
+ self.dropout = nn.Dropout(config.dropout_rate)
140
+
141
+ # (Modified) Alibi positional bias
142
+ if config.position_encoding_type == 'alibi':
143
+ self.alibi_bias = AlibiPositionalBias(heads=num_heads, total_heads=num_heads, trainable_slope=False)
144
+ elif config.position_encoding_type == 'alibit':
145
+ self.alibi_bias = AlibiPositionalBias(heads=num_heads, total_heads=num_heads, trainable_slope=True)
146
+ else:
147
+ self.alibi_bias = None
148
+ # (Modified) RoPE
149
+ if config.position_encoding_type == 'rope':
150
+ assert rotary_emb is not None, "rotary_emb must be provided for RoPE."
151
+ self.rotary_emb = rotary_emb
152
+ else:
153
+ self.rotary_emb = None
154
+ self.rope_apply_to_keys = config.rope_apply_to_keys # False by default
155
+
156
+ def transpose_for_scores(self, x, channels_per_head):
157
+ new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head)
158
+ x = x.view(*new_x_shape)
159
+ return x.permute(0, 2, 1, 3)
160
+
161
+ def forward(
162
+ self,
163
+ hidden_states: torch.Tensor,
164
+ attention_mask: Optional[torch.FloatTensor] = None,
165
+ head_mask: Optional[torch.FloatTensor] = None,
166
+ inputs: Optional[torch.FloatTensor] = None,
167
+ inputs_mask: Optional[torch.FloatTensor] = None,
168
+ output_attentions: Optional[bool] = False,
169
+ ) -> Tuple[torch.Tensor]:
170
+ hidden_states = self.layernorm1(hidden_states)
171
+ inputs = self.layernorm2(inputs)
172
+
173
+ # Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module,
174
+ # the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to.
175
+ is_cross_attention = inputs is not None
176
+ queries = self.query(hidden_states)
177
+
178
+ if is_cross_attention:
179
+ keys = self.key(inputs)
180
+ values = self.value(inputs)
181
+ attention_mask = inputs_mask
182
+ else:
183
+ keys = self.key(hidden_states)
184
+ values = self.value(hidden_states)
185
+
186
+ # Reshape channels for multi-head attention.
187
+ # We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head)
188
+ queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
189
+ keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
190
+ values = self.transpose_for_scores(values, self.v_channels_per_head)
191
+
192
+ # (Modified) RoPE
193
+ if self.rotary_emb is not None:
194
+ queries = self.rotary_emb.apply_rotary_custom(queries)
195
+ if self.rope_apply_to_keys is True:
196
+ keys = self.rotary_emb.apply_rotary_custom(keys)
197
+
198
+ # Take the dot product between the queries and keys to get the raw attention scores.
199
+ attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
200
+
201
+ # (Modified) Alibi positional bias
202
+ if self.alibi_bias is not None:
203
+ batch_size, num_heads, q_seq_len, k_seq_len = attention_scores.shape
204
+ attention_scores += self.alibi_bias(q_seq_len,
205
+ k_seq_len) # auto-broadcasting to (b, num_heads, q_seq_len, k_seq_len)
206
+
207
+ _, _, _, q_head_dim = queries.shape
208
+ _, _, _, v_head_dim = values.shape
209
+ hiddens = self.num_heads * v_head_dim
210
+
211
+ attention_scores = attention_scores / math.sqrt(q_head_dim)
212
+
213
+ if attention_mask is not None:
214
+ # Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
215
+ attention_scores = attention_scores + attention_mask
216
+
217
+ # Normalize the attention scores to probabilities.
218
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
219
+
220
+ # This is actually dropping out entire tokens to attend to, which might
221
+ # seem a bit unusual, but is taken from the original Transformer paper.
222
+ attention_probs = self.dropout(attention_probs)
223
+
224
+ # Mask heads if we want to
225
+ if head_mask is not None:
226
+ attention_probs = attention_probs * head_mask
227
+
228
+ context_layer = torch.matmul(attention_probs, values)
229
+
230
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
231
+ new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
232
+ context_layer = context_layer.view(*new_context_layer_shape)
233
+
234
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
235
+
236
+ return outputs
237
+
238
+
239
+ class PerceiverAlibiAttention(nn.Module):
240
+ """
241
+ Attention module, including a dense block + Alibi
242
+ : modified from PerceiverAttention in modeling_perceiver.py to support Alibi positional bias
243
+ """
244
+
245
+ def __init__(
246
+ self,
247
+ config,
248
+ is_cross_attention=False,
249
+ qk_channels=None,
250
+ v_channels=None,
251
+ num_heads=1,
252
+ q_dim=None,
253
+ kv_dim=None,
254
+ use_query_residual=True,
255
+ rotary_emb=None,
256
+ ):
257
+ super().__init__()
258
+ # MultiHead attention
259
+ if is_cross_attention and qk_channels is None:
260
+ if config.cross_attention_shape_for_attention == "q":
261
+ qk_channels = q_dim
262
+ elif config.cross_attention_shape_for_attention == "kv":
263
+ qk_channels = kv_dim
264
+ else:
265
+ raise ValueError(f"Unknown value {config.cross_attention_shape_for_attention} for "
266
+ "cross_attention_shape_for_attention.")
267
+ else:
268
+ if qk_channels is None:
269
+ qk_channels = q_dim
270
+ if v_channels is None:
271
+ v_channels = qk_channels
272
+ self.self = PerceiverAlibiSelfAttention(config,
273
+ is_cross_attention=is_cross_attention,
274
+ qk_channels=qk_channels,
275
+ v_channels=v_channels,
276
+ num_heads=num_heads,
277
+ q_dim=q_dim,
278
+ kv_dim=kv_dim,
279
+ rotary_emb=rotary_emb)
280
+ # dense block
281
+ output_channels = None
282
+ if is_cross_attention:
283
+ output_channels = q_dim
284
+ else:
285
+ if output_channels is None:
286
+ output_channels = v_channels
287
+ self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
288
+ self.use_query_residual = use_query_residual
289
+ self.pruned_heads = set()
290
+
291
+ def prune_heads(self, heads):
292
+ if len(heads) == 0:
293
+ return
294
+ heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads,
295
+ self.self.attention_head_size, self.pruned_heads)
296
+
297
+ # Prune linear layers
298
+ self.self.query = prune_linear_layer(self.self.query, index)
299
+ self.self.key = prune_linear_layer(self.self.key, index)
300
+ self.self.value = prune_linear_layer(self.self.value, index)
301
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
302
+
303
+ # Update hyper params and store pruned heads
304
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
305
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
306
+ self.pruned_heads = self.pruned_heads.union(heads)
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states: torch.Tensor,
311
+ attention_mask: Optional[torch.FloatTensor] = None,
312
+ head_mask: Optional[torch.FloatTensor] = None,
313
+ inputs: Optional[torch.FloatTensor] = None,
314
+ inputs_mask: Optional[torch.FloatTensor] = None,
315
+ output_attentions: Optional[bool] = False,
316
+ ) -> Tuple[torch.Tensor]:
317
+ self_outputs = self.self(
318
+ hidden_states,
319
+ attention_mask,
320
+ head_mask,
321
+ inputs,
322
+ inputs_mask,
323
+ output_attentions,
324
+ )
325
+
326
+ # Output projection
327
+ attention_output = self.output(self_outputs[0])
328
+
329
+ # Optionally include a residual to the original queries.
330
+ # Consider omitting the residual if the semantics of query and output
331
+ # are different, e.g. if queries are positions and outputs are pixels.
332
+ if self.use_query_residual:
333
+ attention_output = attention_output + hidden_states
334
+
335
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
336
+ return outputs
337
+
338
+
339
+ class PerceiverAlibiLayer(nn.Module):
340
+ """Construct a single PerceiverTF layer with:
341
+ - Alibi positional bias
342
+ - RoPE
343
+ - Mixtral of Experts (MoE) feedforward layer
344
+
345
+ """
346
+
347
+ def __init__(
348
+ self,
349
+ config,
350
+ is_cross_attention=False,
351
+ qk_channels=None,
352
+ v_channels=None,
353
+ num_heads=1,
354
+ q_dim=None,
355
+ kv_dim=None,
356
+ widening_factor=1,
357
+ use_query_residual=True,
358
+ rotary_emb=None,
359
+ ):
360
+ super().__init__()
361
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
362
+ self.seq_len_dim = 1
363
+ self.attention = PerceiverAlibiAttention(config,
364
+ is_cross_attention=is_cross_attention,
365
+ qk_channels=qk_channels,
366
+ v_channels=v_channels,
367
+ num_heads=num_heads,
368
+ q_dim=q_dim,
369
+ kv_dim=kv_dim,
370
+ use_query_residual=use_query_residual,
371
+ rotary_emb=rotary_emb)
372
+ self.layernorm = get_layer_norm(q_dim, config.layer_norm_type, config.layer_norm_eps)
373
+ # self.layernorm = nn.LayerNorm(q_dim)
374
+ self.mlp = get_ff_layer(config, input_size=q_dim, widening_factor=widening_factor)
375
+
376
+ def forward(
377
+ self,
378
+ hidden_states: torch.Tensor,
379
+ attention_mask: Optional[torch.FloatTensor] = None,
380
+ head_mask: Optional[torch.FloatTensor] = None,
381
+ inputs: Optional[torch.FloatTensor] = None,
382
+ inputs_mask: Optional[torch.FloatTensor] = None,
383
+ output_attentions: Optional[bool] = False,
384
+ ) -> Tuple[torch.Tensor]:
385
+ attention_outputs = self.attention(
386
+ hidden_states,
387
+ attention_mask,
388
+ head_mask,
389
+ inputs,
390
+ inputs_mask,
391
+ output_attentions,
392
+ )
393
+ attention_output = attention_outputs[0]
394
+
395
+ outputs = attention_outputs[1:] # add attentions if we output attention weights
396
+ """apply_chunking_to_forward:
397
+ This function chunks the input_tensors into smaller input tensor parts of size
398
+ chunk_size over the dimension chunk_dim. It then applies a layer forward_fn to
399
+ each chunk independently to save memory.If the forward_fn is independent across
400
+ the chunk_dim this function will yield the same result as not applying it.
401
+ """
402
+ layer_output, router_logits = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward,
403
+ self.seq_len_dim, attention_output)
404
+
405
+ layer_output = layer_output + attention_output # residual connection
406
+ outputs = (layer_output,) + outputs + (router_logits,) # add router_logits to outputs
407
+ return outputs
408
+
409
+ def feed_forward_chunk(self, attention_output):
410
+ layer_output = self.layernorm(attention_output)
411
+ layer_output, router_logits = self.mlp(layer_output) # router_logits is returned only when using MoE.
412
+ return layer_output, router_logits
413
+
414
+
415
+ class PerceiverTFEncoderBlock(nn.Module):
416
+ """Construct a single block of PerceiverTF encoder:
417
+ - Spectral Cross Attention (SCA)
418
+ - Local latent transformer layers
419
+ - Temporal transformer layers
420
+ - added Alibi positional bias, RoPE, gMLP and MoE feedforward layer
421
+ """
422
+
423
+ def __init__(self,
424
+ config: PerceiverTFConfig,
425
+ kv_dim: Optional[int] = None,
426
+ sca_use_query_residual: bool = True,
427
+ rotary_emb_sca: Optional[nn.Module] = None,
428
+ rotary_emb_latent: Optional[nn.Module] = None,
429
+ rotary_emb_temporal: Optional[nn.Module] = None):
430
+ super().__init__()
431
+ self.config = config
432
+
433
+ # Check that we can use multihead-attention with these shapes.
434
+ if config.d_latents % config.num_self_attention_heads != 0:
435
+ raise ValueError(f"num_z_channels ({config.d_latents}) must be divisible by"
436
+ f" num_self_attend_heads ({config.num_self_attention_heads}).")
437
+ if config.d_latents % config.num_cross_attention_heads != 0:
438
+ raise ValueError(f"num_z_channels ({config.d_latents}) must be divisible by"
439
+ f" num_cross_attend_heads ({config.num_cross_attention_heads}).")
440
+
441
+ if kv_dim is None:
442
+ kv_dim = config.kv_dim
443
+ if sca_use_query_residual is None:
444
+ sca_use_query_residual = config.sca_use_query_residual
445
+
446
+ # Spectral Cross Attention (SCA) layer.
447
+ self.sca_attention_to_channel = config.attention_to_channel
448
+ self.spectral_cross_attention = PerceiverAlibiAttention(config,
449
+ is_cross_attention=True,
450
+ qk_channels=config.qk_channels,
451
+ v_channels=config.v_channels,
452
+ num_heads=config.num_cross_attention_heads,
453
+ q_dim=config.d_latents,
454
+ kv_dim=kv_dim,
455
+ use_query_residual=sca_use_query_residual,
456
+ rotary_emb=rotary_emb_sca) # (Modified) RoPE
457
+
458
+ # Local latent trasformer layers.
459
+ local_transformer_layers = []
460
+ for _ in range(config.num_local_transformers_per_block):
461
+ layer = PerceiverAlibiLayer(
462
+ config,
463
+ is_cross_attention=False,
464
+ qk_channels=config.qk_channels, # projection dim for q and k.
465
+ v_channels=config.v_channels, # projection dim for v.
466
+ num_heads=config.num_self_attention_heads,
467
+ q_dim=config.d_model,
468
+ kv_dim=config.d_model,
469
+ widening_factor=config.ff_widening_factor,
470
+ use_query_residual=config.use_query_residual,
471
+ rotary_emb=rotary_emb_latent # (Modified) RoPE
472
+ )
473
+ local_transformer_layers.append(layer)
474
+ self.local_transformer = nn.ModuleList(local_transformer_layers)
475
+
476
+ # Temporal transformer layers.
477
+ temporal_transformer_layers = []
478
+ for _ in range(config.num_temporal_transformers_per_block):
479
+ layer = PerceiverAlibiLayer(
480
+ config,
481
+ is_cross_attention=False,
482
+ qk_channels=config.qk_channels, # projection dim for q and k.
483
+ v_channels=config.v_channels, # projection dim for v.
484
+ num_heads=config.num_self_attention_heads,
485
+ q_dim=config.d_model,
486
+ kv_dim=config.d_model,
487
+ widening_factor=config.ff_widening_factor,
488
+ use_query_residual=config.use_query_residual,
489
+ rotary_emb=rotary_emb_temporal # (Modified) RoPE
490
+ )
491
+ temporal_transformer_layers.append(layer)
492
+ self.temporal_transformer = nn.ModuleList(temporal_transformer_layers)
493
+
494
+ def forward(
495
+ self,
496
+ hidden_states: torch.Tensor,
497
+ inputs: Optional[torch.FloatTensor] = None,
498
+ inputs_mask: Optional[torch.FloatTensor] = None,
499
+ local_attention_mask: Optional[torch.FloatTensor] = None,
500
+ temporal_attention_mask: Optional[torch.FloatTensor] = None,
501
+ local_head_mask: Optional[torch.FloatTensor] = None,
502
+ temporal_head_mask: Optional[torch.FloatTensor] = None,
503
+ pos_emb_temporal: Optional[torch.FloatTensor] = None,
504
+ output_attentions: Optional[bool] = False,
505
+ output_hidden_states: Optional[bool] = False,
506
+ output_router_logits: Optional[bool] = False, # Only used for MoE.
507
+ return_dict: Optional[bool] = True,
508
+ ) -> Union[Tuple, MoEModelOutputWithCrossAttentions]:
509
+ """
510
+ Inputs:
511
+ hidden_states: (B, T, K, D)
512
+ inputs: (B, T, F, C)
513
+ Returns:
514
+ hidden_states: (B, T, K, D)
515
+
516
+ Args:
517
+ hidden_states:
518
+ latent_array (B, T, num_latents, d_latents) for SCA. The latent array
519
+ with shape (B, K, D) is expanded by t, and positional embeddings are
520
+ added to it.
521
+ inputs: torch.FloatTensor
522
+ The input sequence of shape (B, T, F, C).
523
+ inputs_mask: torch.FloatTensor
524
+ Only used for SCA. By default, None.
525
+ local_attention_mask:
526
+ Used for local self-attention. By default, None.
527
+ temporal_attention_mask:
528
+ Used for temporal self-attention. By default, None.
529
+ local_head_mask:
530
+ By default, None.
531
+ temporal_head_mask:
532
+ By default, None.
533
+ pos_emb_temporal:
534
+ Optioanl. Used for temporal self-attention. By default, None. (max_t, num_latents, d_latents)
535
+ output_attentions: bool
536
+ Whether to return attentions weights.
537
+ output_hidden_states: bool
538
+ Whether to return all hidden states. If False, only last hidden
539
+ state is returned.
540
+ output_router_logits: bool
541
+ Whether to return router logits for MoE. If False, only last hidden
542
+ state is returned.
543
+ return_dict: bool
544
+ Whether to return a MoEModelOutputWithCrossAttentions instead of a tuple.
545
+ """
546
+
547
+ all_hidden_states = () if output_hidden_states else None
548
+ all_self_attentions = () if output_attentions else None
549
+ all_cross_attentions = () if output_attentions else None
550
+ all_router_logits = () if output_router_logits else None
551
+
552
+ # Collect dimension info
553
+ batch_size, t, num_latents, d_latents = hidden_states.size() # (B, T, K, D)
554
+
555
+ # if self.sca_attention_to_channel:
556
+ # _, _, _, f = inputs.size() # (B, T, C, F)
557
+ # assert d_latents == f, "d_latents must be equal to kv_dim, which is input frequency dim."
558
+ # else:
559
+ # _, _, _, c = inputs.size() # (B, T, F, C)
560
+ # assert d_latents == c, "d_latents must be equal to kv_dim, which is input channels."
561
+
562
+ # Reshape (B, T, _, _) to (B*T, _, _) for SCA and local transformer.
563
+ hidden_states = rearrange(hidden_states, "b t k d -> (b t) k d")
564
+ inputs = rearrange(inputs, "b t f c -> (b t) f c")
565
+
566
+ # Apply the SCA between the latents (hidden_states) and inputs:
567
+ layer_outputs = self.spectral_cross_attention(
568
+ hidden_states,
569
+ attention_mask=None, # Input_mask is used instead for cross-attention
570
+ inputs=inputs,
571
+ inputs_mask=inputs_mask,
572
+ output_attentions=output_attentions,
573
+ )
574
+ hidden_states = layer_outputs[0] # (B*T, K, D)
575
+
576
+ if output_attentions:
577
+ all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
578
+
579
+ # Apply the block of local latent transformer layers.
580
+ for i, layer_module in enumerate(self.local_transformer):
581
+ if output_hidden_states:
582
+ all_hidden_states = all_hidden_states + (hidden_states,)
583
+
584
+ layer_head_mask = local_head_mask[i] if local_head_mask is not None else None
585
+ layer_outputs = layer_module(
586
+ hidden_states,
587
+ attention_mask=local_attention_mask,
588
+ head_mask=layer_head_mask,
589
+ output_attentions=output_attentions,
590
+ )
591
+ hidden_states = layer_outputs[0] # (B*T, K, D)
592
+ if output_attentions:
593
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
594
+ if output_router_logits:
595
+ all_router_logits = all_router_logits + (layer_outputs[2],)
596
+
597
+ if output_hidden_states:
598
+ all_hidden_states = all_hidden_states + (hidden_states,)
599
+
600
+ # Reshape (B*T, K, D) to (B*K, T, D) for the temporal transformer.
601
+ hidden_states = rearrange(hidden_states, "(b t) k d -> (b k) t d", b=batch_size)
602
+
603
+ # Apply the block of temporal transformer layers.
604
+ for i, layer_module in enumerate(self.temporal_transformer):
605
+ if output_hidden_states:
606
+ all_hidden_states = all_hidden_states + (hidden_states,)
607
+
608
+ layer_head_mask = temporal_head_mask[i] if temporal_head_mask is not None else None
609
+
610
+ if i == 0 and pos_emb_temporal is not None:
611
+ # Add temporal positional embeddings to the hidden_states.
612
+ hidden_states = hidden_states + pos_emb_temporal[:t] # pos_emb_temporal: (T, D)
613
+
614
+ layer_outputs = layer_module(
615
+ hidden_states,
616
+ attention_mask=temporal_attention_mask,
617
+ head_mask=layer_head_mask,
618
+ output_attentions=output_attentions,
619
+ )
620
+ hidden_states = layer_outputs[0]
621
+ if output_attentions:
622
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
623
+ if output_router_logits:
624
+ all_router_logits = all_router_logits + (layer_outputs[2],)
625
+
626
+ if output_hidden_states:
627
+ all_hidden_states = all_hidden_states + (hidden_states,)
628
+
629
+ last_hideen_state = hidden_states
630
+ # Reshape (B*K, T, D) to (B, T, K, D) for the next block.
631
+ last_hideen_state = rearrange(last_hideen_state, "(b k) t d -> b t k d", b=batch_size)
632
+
633
+ # Prepare the outputs.
634
+ if not return_dict:
635
+ return tuple(
636
+ v for v in
637
+ [last_hideen_state, all_hidden_states, all_self_attentions, all_cross_attentions, all_router_logits]
638
+ if v is not None)
639
+ return MoEModelOutputWithCrossAttentions(
640
+ last_hidden_state=last_hideen_state,
641
+ hidden_states=all_hidden_states,
642
+ attentions=all_self_attentions,
643
+ cross_attentions=all_cross_attentions,
644
+ router_logits=all_router_logits,
645
+ )
646
+
647
+
648
+ class PerceiverTFEncoder(PerceiverTFPreTrainedModel):
649
+ """PerceiverTFEncoder is an encoder model based on the Perceiver and Spectral Cross Attention (SCA).
650
+
651
+ position_encoding_type: str
652
+ The type of positional encoding to use. One of the following:
653
+ - 'trainable': trainable positional embeddings
654
+ - 'alibi': AlibiNet positional embeddings
655
+ - 'alibit': AlibiNet positional embeddings with trainable slopes for each head
656
+ - 'rope': RoPE (Rotary Positional Encoding)
657
+ (experimental w/ 'trainable')
658
+ - 'tkd': trainable PE (T,K,D) on latent (default for 'trainable')
659
+ - 'td': trainable PE (T,D) on latent
660
+ - 'tk': trainable PE (T,K) on latent
661
+ - 'kdt': trainable PE (K,D) on latent, and (T,) on temporal transformer
662
+
663
+ """
664
+
665
+ def __init__(self,
666
+ config: PerceiverTFConfig,
667
+ sca_use_query_residual: Optional[bool] = None,
668
+ shared_emb: Optional[nn.Embedding] = None):
669
+ super().__init__(config)
670
+ self.config = config
671
+
672
+ if sca_use_query_residual is None:
673
+ self.sca_use_query_residual = config.sca_use_query_residual # True by default
674
+ self.position_encoding_type = config.position_encoding_type
675
+ self.sca_attention_to_channel = config.attention_to_channel
676
+
677
+ # Construct a latent array.
678
+ self.latent_array = PerceiverEmbeddings(config) # (num_latents, d_latents)
679
+
680
+ # Positional embeddings for the latent array.
681
+ if self.position_encoding_type == 'rope':
682
+ # (Modified) RoPE
683
+ self.rotary_emb_sca = get_rotary_emb(config.num_cross_attention_heads, config.rope_type_sca,
684
+ config.rope_partial_pe, config.rope_trainable)
685
+ self.rotary_emb_latent = get_rotary_emb(config.num_cross_attention_heads, config.rope_type_latent,
686
+ config.rope_partial_pe, config.rope_trainable)
687
+ self.rotary_emb_temporal = get_rotary_emb(config.num_cross_attention_heads, config.rope_type_temporal,
688
+ config.rope_partial_pe, config.rope_trainable)
689
+ else:
690
+ self.rotary_emb_sca = None
691
+ self.rotary_emb_latent = None
692
+ self.rotary_emb_temporal = None
693
+
694
+ if self.position_encoding_type in ['alibi', 'alibit', 'rope', None]:
695
+ # alibi is imeplemented within PerceiverAlibiSelfAttention, and activated by config.
696
+ # RoPE is implemented without using self.pos_emb.
697
+ self.pos_emb = None
698
+ else:
699
+ k, d = self.latent_array.latents.size()
700
+ max_t = int(config.num_max_positions) + 10 # 10 is headroom for future task tokens...
701
+ self.pos_emb = PerceiverTFTrainablePE(self.position_encoding_type, max_t, k, d)
702
+ """
703
+ self.pos_emb() returns:
704
+ pos_emb: (max_t, K, D)
705
+ pos_emb_temporal: (max_t, K, D)
706
+ """
707
+
708
+ # Construct the encoder blocks.
709
+ blocks = []
710
+ for _ in range(config.num_blocks):
711
+ block = PerceiverTFEncoderBlock(
712
+ config,
713
+ kv_dim=config.kv_dim,
714
+ sca_use_query_residual=sca_use_query_residual,
715
+ rotary_emb_sca=self.rotary_emb_sca, # (Modified) RoPE
716
+ rotary_emb_latent=self.rotary_emb_latent,
717
+ rotary_emb_temporal=self.rotary_emb_temporal)
718
+ blocks.append(block)
719
+ self.blocks = nn.ModuleList(blocks)
720
+
721
+ # Initialize weights and apply final processing
722
+ self.post_init()
723
+
724
+ def get_input_embeddings(self):
725
+ return self.latent_array.latents
726
+
727
+ def set_input_embeddings(self, value):
728
+ self.latent_array.latents = value
729
+
730
+ """temporary fix for torch.compile issue"""
731
+
732
+ def forward(self, **kwargs):
733
+ if self.training is True:
734
+ return self._forward_compile(**kwargs)
735
+ else:
736
+ return self._forward_no_compile(**kwargs)
737
+
738
+ def _forward_no_compile(self, **kwargs):
739
+ return self._forward(**kwargs)
740
+
741
+ @torch.compile
742
+ def _forward_compile(self, **kwargs):
743
+ return self._forward(**kwargs)
744
+
745
+ def _forward(
746
+ self,
747
+ inputs: Optional[torch.FloatTensor] = None, # (B, T, F, kv_dim)
748
+ inputs_embeds: Optional[torch.FloatTensor] = None, # (B, T, F, kv_dim)
749
+ inputs_mask: Optional[torch.FloatTensor] = None, # (B, F) Mask freq. of inputs in SCA.
750
+ local_attention_mask: Optional[torch.FloatTensor] = None, # (B, K)
751
+ temporal_attention_mask: Optional[torch.FloatTensor] = None, # (B, T)
752
+ local_head_mask: Optional[torch.FloatTensor] = None,
753
+ temporal_head_mask: Optional[torch.FloatTensor] = None,
754
+ output_attentions: Optional[bool] = None,
755
+ output_hidden_states: Optional[bool] = None,
756
+ output_router_logits: Optional[bool] = None,
757
+ return_dict: Optional[bool] = None,
758
+ ) -> Union[Tuple, MoEModelOutputWithCrossAttentions]:
759
+ # Inputs and inputs_embeds are tied, and actually the same. (following T5 convention)
760
+ # Inputs are from convoulutional features from audio.
761
+ # Don't be confused with latent embeddings, which is `self.latent_array.latents`, and
762
+ # used as hidden_state of block.
763
+ if inputs is None and inputs_embeds is not None:
764
+ inputs = inputs_embeds
765
+ elif inputs is None and inputs_embeds is None:
766
+ raise ValueError("You must provide 'inputs' or 'inputs_embeds' argument.")
767
+
768
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
769
+ output_hidden_states = (output_hidden_states
770
+ if output_hidden_states is not None else self.config.output_hidden_states)
771
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
772
+
773
+ batch_size, t, _f, _c = inputs.size()
774
+ device = inputs.device
775
+
776
+ # SCA attention to channels of inputs, instead of frequency bins.
777
+ if self.sca_attention_to_channel is True:
778
+ inputs = rearrange(inputs, "b t f c -> b t c f")
779
+
780
+ # Prepare head mask if needed
781
+ # 1.0 in head_mask indicate we keep the head
782
+ # attention_probs has shape bsz x n_heads x N x N
783
+ # input head_mask has shape [num_heads] or [num_blocks x num_heads]
784
+ # and head_mask is converted to shape [num_blocks x batch x num_heads x N x N]
785
+ local_head_mask = self.get_head_mask(local_head_mask,
786
+ self.config.num_blocks * self.config.num_local_transformers_per_block)
787
+ temporal_head_mask = self.get_head_mask(
788
+ temporal_head_mask, self.config.num_blocks * self.config.num_temporal_transformers_per_block)
789
+
790
+ # Prepare attention mask: not implemented
791
+
792
+ # Expand the latent embeddings by t: (B, K, D) --> (B, T, K, D)
793
+ latent_embeddings = self.latent_array(batch_size=batch_size) # (B, num_latents, d_latents)
794
+ expanded_latent_embeddings = latent_embeddings.unsqueeze(1).expand(-1, t, -1, -1)
795
+
796
+ # Add positional embeddings to the expanded latent embeddings: (B, T, K, D)
797
+ if self.pos_emb is not None:
798
+ pos_emb_latent, pos_emb_temporal = self.pos_emb.forward()
799
+ expanded_latent_embeddings = expanded_latent_embeddings + pos_emb_latent[:t]
800
+ # (max_t, K, D) -> (T, K, D) -> (B, T, K, D) auto-broadcasting
801
+ else:
802
+ pos_emb_temporal = None
803
+
804
+ # Lists to store intermediate outputs if required
805
+ all_hidden_states = []
806
+ all_attentions = []
807
+ all_cross_attentions = []
808
+ all_router_logits = []
809
+
810
+ hidden_states = expanded_latent_embeddings
811
+
812
+ # Forward-pass
813
+ for i, block in enumerate(self.blocks):
814
+ block_output = block(hidden_states=hidden_states,
815
+ inputs=inputs,
816
+ inputs_mask=inputs_mask,
817
+ local_attention_mask=local_attention_mask,
818
+ temporal_attention_mask=temporal_attention_mask,
819
+ local_head_mask=local_head_mask,
820
+ temporal_head_mask=temporal_head_mask,
821
+ pos_emb_temporal=pos_emb_temporal if i == 0 else None,
822
+ output_attentions=output_attentions,
823
+ output_hidden_states=output_hidden_states,
824
+ output_router_logits=output_router_logits,
825
+ return_dict=True)
826
+
827
+ # Update the hidden_states for the next block
828
+ hidden_states = block_output.last_hidden_state
829
+
830
+ # Append to lists if required
831
+ if output_hidden_states:
832
+ all_hidden_states.append(hidden_states)
833
+ if output_attentions:
834
+ all_attentions.append(block_output.attentions)
835
+ all_cross_attentions.append(block_output.cross_attentions)
836
+ if output_router_logits:
837
+ all_router_logits.append(block_output.router_logits)
838
+ last_hidden_states = hidden_states
839
+
840
+ # Prepare outputs
841
+ if not return_dict:
842
+ # Convert lists to tuples
843
+ return (last_hidden_states, tuple(all_hidden_states) if all_hidden_states else None,
844
+ tuple(all_attentions) if all_attentions else None,
845
+ tuple(all_cross_attentions) if all_cross_attentions else None,
846
+ tuple(all_router_logits) if all_router_logits else None)
847
+
848
+ return MoEModelOutputWithCrossAttentions(
849
+ last_hidden_state=last_hidden_states,
850
+ hidden_states=tuple(all_hidden_states) if all_hidden_states else None,
851
+ attentions=tuple(all_attentions) if all_attentions else None,
852
+ cross_attentions=tuple(all_cross_attentions) if all_cross_attentions else None,
853
+ router_logits=tuple(all_router_logits) if all_router_logits else None)
854
+
855
+
856
+ def test():
857
+ # In HuggingFace's Perceiver implementation:
858
+ # `q_dim` is the latent array dimension d_latents of ((B), num_latents, d_latents).
859
+ # `kv_dim`os the actual input dimension D of (B, T, D)
860
+ # `qk_channels`, `v_channels`: are projection dimensions for attention, (B, T, C)
861
+ # (B, T, D) --> projection --> (B, T, C)
862
+ # However, PerceiverTF does not require projection:
863
+ # It takes as input a latent tensor (B, num_latents, d_latents) and a conv_feat tensor (T, B, F, C)
864
+ # The `spectral-cross-attention` and `local-self-attention-transformer` takes as input (B*T, F, C),
865
+ # and C=D=d_latents.
866
+ from model.ops import count_parameters
867
+
868
+ # Test input
869
+ b = 2 # batch
870
+ t = 10 # time steps (330 for 6s in paper)
871
+ f = 128 # freq of conv_feat
872
+ c = 128 # channels of conv_feat
873
+ k = 24 # num_latents
874
+ d = 128 # d_latents
875
+ conv_feat = torch.randn(b, t, f, c)
876
+
877
+ # construct PerceiverTFEncoder
878
+ config = PerceiverTFConfig()
879
+ pe_types = ['alibi', 'alibit', 'trainable', 'tkd', 'td', 'tk', 'kdt', None]
880
+ config.ff_layer_type = 'moe'
881
+ config.moe_num_experts = 4
882
+ config.moe_topk = 2
883
+
884
+ for pe_type in pe_types:
885
+ config.position_encoding_type = pe_type # 'alibi', 'alibit', 'trainable', 'tkd', 'td', 'tk', 'kdt', None
886
+ config.num_latents = k
887
+ config.d_latents = d
888
+ config.kv_dim = c
889
+ config.qk_channels = d
890
+ config.v_channels = d
891
+ encoder = PerceiverTFEncoder(config)
892
+ encoder.eval()
893
+ assert encoder.latent_array.latents.size() == (k, d)
894
+ # forward
895
+ enc_hidden_state = encoder.forward(inputs_embeds=conv_feat).last_hidden_state
896
+ # print(enc_hidden_state.shape) # [2, 10, 24, 128] = [B, T, K, D]
897
+ n_param = count_parameters(encoder)[1] // 1000
898
+ print(config.position_encoding_type, f'num_param: {n_param}K')
899
+ """
900
+ PE type | num. param.
901
+ None | 1397K
902
+ alibi | 1397K
903
+ alibit (train slope) | 1397K
904
+ tkd | 2442K
905
+ td | 1441K
906
+ tk | 1405K
907
+ kdt | 1444K
908
+
909
+ MLP | 2637K
910
+ MoE (4 experts) | 4411K
911
+ MoE (6 experts) | 5594K
912
+ """
model/projection_layer.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ projection_layer.py """
11
+ from typing import Tuple
12
+
13
+ import math
14
+ import torch
15
+ from torch import nn
16
+ import torch.nn.functional as F
17
+ from torch.nn import Linear, LayerNorm
18
+
19
+ from einops import rearrange
20
+ from model.ops import count_parameters
21
+
22
+
23
+ class GroupLinearFlatten(nn.Module):
24
+ """
25
+ Implements a grouped linear layer with a flattened output.
26
+
27
+ This module applies individual linear transformations for each group in the input tensor
28
+ and then flattens the group dimension to produce the final output. It's useful when you
29
+ have distinct groups in the input tensor and you want separate linear transformations for
30
+ each of these groups.
31
+
32
+ Args:
33
+ - in_features (int): The number of input features per group.
34
+ - flatten_out_features (int): The total number of flattened output features. This value must
35
+ be divisible by num_groups. The actual number of output features
36
+ per group is computed as flatten_out_features/num_groups.
37
+ - num_groups (int): The number of distinct groups in the input tensor.
38
+ - use_bmm (bool, optional): Whether to use batch matrix multiplication for computation.
39
+ Default is True.
40
+
41
+ Shape:
42
+ - Input: (batch_size, sequence_length, num_groups, in_features)
43
+ - Output: (batch_size, sequence_length, flatten_out_features)
44
+
45
+ Examples:
46
+ >>> m = GroupLinearFlatten(128, 512, 24) #
47
+ >>> input = torch.randn(16, 10, 24, 128) # (B, T, C, F)
48
+ >>> output = m(input)
49
+ >>> output.size()
50
+ torch.Size([16, 10, 512]) # (B, T, D)
51
+ """
52
+
53
+ def __init__(self, in_features, flatten_out_features, num_groups, use_bmm=True):
54
+ super().__init__()
55
+ self.in_features = in_features
56
+ self.flatten_out_features = flatten_out_features
57
+ self.num_groups = num_groups
58
+ self.use_bmm = use_bmm
59
+
60
+ # Assuming flatten_out_features is divisible by num_groups
61
+ self.out_features_per_group = self.flatten_out_features // self.num_groups
62
+
63
+ # Each group gets its own weights
64
+ self.weight = nn.Parameter(torch.Tensor(num_groups, self.out_features_per_group, in_features))
65
+ self.reset_parameters()
66
+
67
+ def reset_parameters(self):
68
+ nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
69
+
70
+ def forward(self, input):
71
+ # input shape: (batch, seq_length, groups, in_features)
72
+ # weight shape: (groups, out_features_per_group, in_features)
73
+
74
+ batch_size, t, k, source_d = input.size()
75
+
76
+ if self.use_bmm:
77
+ # Reshape input for bmm operation
78
+ input_reshaped = rearrange(input, 'b t k d -> k d (b t)')
79
+
80
+ # Matrix multiplication: dot((k, out_features_per_group, d), (k, d, b*t)) -> (k, out_features_per_group, b*t)
81
+ output_bmm = torch.bmm(self.weight, input_reshaped)
82
+
83
+ # Reshape back to original shape and flatten the group dimension
84
+ output = rearrange(output_bmm, 'k d_out (b t) -> b t (k d_out)', b=batch_size, t=t, k=k)
85
+ else:
86
+ output = torch.einsum('bsgi,goi->bsgo', input, self.weight)
87
+ output = rearrange(output, 'b t k d_out -> b t (k d_out)')
88
+
89
+ return output
90
+
91
+
92
+ # class MultiChannelGroupLinear(nn.Module):
93
+ # """ Not Implemented Yet """
94
+ # def __init__(self, in_ch=26, in_dim=128, out_ch=13, out_dim=512):
95
+ # super().__init__()
96
+
97
+ # self.in_ch = in_ch
98
+ # self.in_dim = in_dim
99
+ # self.out_ch = out_ch
100
+ # self.out_dim = out_dim
101
+ # self.in_ch_per_group = in_ch // out_ch
102
+
103
+ # self.layer = GroupLinearFlatten(in_features=)
104
+
105
+
106
+ class MultiChannelLinearProjection(nn.Module):
107
+
108
+ def __init__(self, in_ch=26, in_dim=128, out_ch=13, out_dim=512):
109
+ super().__init__()
110
+ self.in_ch = in_ch
111
+ self.in_dim = in_dim
112
+ self.out_ch = out_ch
113
+ self.out_dim = out_dim
114
+
115
+ self.in_ch_per_group = in_ch // out_ch
116
+ self.linear_in_ch = in_ch // self.in_ch_per_group
117
+ self.linear_in_dim = in_dim * self.in_ch_per_group
118
+
119
+ # Reshaped Input shape: (b, t, in_dim//in_ch_per_group, in_dim*in_ch_per_group)
120
+ # Output shape: (b, t, out_ch, out_dim)
121
+ if in_dim * self.in_ch_per_group == out_dim:
122
+ self.linear = nn.Identity()
123
+ else:
124
+ self.linear = nn.Linear(in_features=self.linear_in_dim, out_features=out_dim, bias=False)
125
+
126
+ def forward(self, x):
127
+ """
128
+ Args:
129
+ x: (B, T, C, D)
130
+
131
+ Returns:
132
+ x: (B, C_target, T, D_target)
133
+ """
134
+ x = rearrange(x, 'b t (c1 c2) d -> b c1 t (c2 d)', c1=self.linear_in_ch, c2=self.in_ch_per_group)
135
+ return self.linear(x)
136
+
137
+
138
+ def get_multi_channel_projection_layer(input_shape: Tuple[int], output_shape: Tuple[int], proj_type: str) -> nn.Module:
139
+ """ This function returns one of the projection layers for multi-channel models."""
140
+ in_ch = input_shape[-2]
141
+ in_dim = input_shape[-1]
142
+ out_ch = output_shape[-2]
143
+ out_dim = output_shape[-1]
144
+
145
+ if proj_type == 'mc_shared_linear':
146
+ return MultiChannelLinearProjection(in_ch, in_dim, out_ch, out_dim)
147
+
148
+
149
+ def test_multi_channel_linear_projection():
150
+ x = torch.randn(2, 10, 26, 128) # (b, t, c, d)
151
+ mclp = MultiChannelLinearProjection(in_ch=26, in_dim=128, out_ch=13, out_dim=256) # actually nn.Identity()
152
+ assert type(nn.Identity()) == type(mclp.linear)
153
+ assert mclp(x).shape == (2, 13, 10, 256) # (b, _c, t, _d)
154
+
155
+ x = torch.randn(2, 10, 26, 128) # (b, t, c, d)
156
+ mclp = MultiChannelLinearProjection(in_ch=26, in_dim=128, out_ch=13, out_dim=512) # actually nn.Identity()
157
+ assert torch.nn.modules.linear.Linear == type(mclp.linear)
158
+ assert mclp(x).shape == (2, 13, 10, 512) # (b, _c, t, _d)
159
+
160
+
161
+ class FlattenMLP(nn.Module):
162
+
163
+ def __init__(self, in_features, flatten_out_features, num_groups, hidden_dim=None, activation=None):
164
+ super().__init__()
165
+
166
+ self.in_features = in_features
167
+ self.num_groups = num_groups
168
+
169
+ # Calculate flattened input dimension
170
+ self.flat_in_dim = in_features * num_groups
171
+ if hidden_dim is None:
172
+ hidden_dim = self.flat_in_dim // 2
173
+ self.hidden_dim = hidden_dim
174
+
175
+ # Check if flatten_out_features is divisible by in_features
176
+ assert flatten_out_features % in_features == 0, "flatten_out_features should be divisible by in_features."
177
+
178
+ # Define layers
179
+ self.layers = nn.Sequential(nn.Flatten(2, 3), nn.Linear(self.flat_in_dim, hidden_dim), nn.LayerNorm(hidden_dim),
180
+ activation() if activation else nn.Identity(), nn.Linear(hidden_dim, hidden_dim),
181
+ nn.LayerNorm(hidden_dim),
182
+ activation() if activation else nn.Identity(),
183
+ nn.Linear(hidden_dim, flatten_out_features))
184
+
185
+ def forward(self, x):
186
+ # x shape: (batch, seq, num_groups, in_features)
187
+ return self.layers(x)
188
+
189
+
190
+ class LinearProjection(nn.Module):
191
+
192
+ def __init__(self, in_features, flatten_out_features, num_groups):
193
+ super().__init__()
194
+
195
+ # Calculate flattened input dimension
196
+ self.flat_in_dim = in_features * num_groups
197
+ self.projection_layer = nn.Linear(in_features=self.flat_in_dim, out_features=flatten_out_features, bias=False)
198
+
199
+ def forward(self, x):
200
+ # x shape: (batch, seq, num_groups, in_features)
201
+ batch_size, t, _, _ = x.size()
202
+ x_flattened = x.reshape(batch_size, t, -1) # Flattening num_groups and in_features
203
+ return self.projection_layer(x_flattened)
204
+
205
+
206
+ class DepthwiseConvProjection(nn.Module):
207
+
208
+ def __init__(self, in_features, flatten_out_features, num_groups, depth):
209
+ super().__init__()
210
+ d_out = flatten_out_features // in_features
211
+
212
+ self.conv = nn.Conv2d(in_channels=num_groups,
213
+ out_channels=num_groups * d_out,
214
+ kernel_size=(1, depth),
215
+ groups=num_groups)
216
+
217
+ self.fc = nn.Linear(num_groups * d_out * (in_features - depth + 1), flatten_out_features)
218
+
219
+ def forward(self, x):
220
+ # Swap the dimensions of k and t to match expected input for depthwise convolution
221
+ x = x.permute(0, 2, 1, 3) # shape: (b, k, t, d)
222
+
223
+ # Convolutional layer
224
+ x = self.conv(x) # shape: (b, k*d_out, t, d-depth+1)
225
+
226
+ # Reshape the tensor for the Linear layer
227
+ batch_size, _, t, _ = x.size()
228
+ x = x.reshape(batch_size, t, -1)
229
+ return self.fc(x)
230
+
231
+
232
+ def get_projection_layer(input_shape: Tuple[int], output_shape: Tuple[int], proj_type: str) -> nn.Module:
233
+ """ This function returns one of the projection layers defined below. """
234
+ if len(input_shape) == 2:
235
+ _, d_source = input_shape
236
+ elif len(input_shape) == 3:
237
+ _, k_source, d_source = input_shape
238
+ if len(output_shape) == 2:
239
+ _, d_target = output_shape
240
+ elif len(output_shape) == 3:
241
+ _, k_target, d_target = output_shape
242
+
243
+ if 'linear' == proj_type:
244
+ return LinearProjection(in_features=d_source, flatten_out_features=d_target, num_groups=k_source)
245
+ elif 'mlp' in proj_type:
246
+ if 'gelu' in proj_type:
247
+ return FlattenMLP(in_features=d_source,
248
+ flatten_out_features=d_target,
249
+ num_groups=k_source,
250
+ activation=nn.GELU)
251
+ elif 'relu' in proj_type:
252
+ return FlattenMLP(in_features=d_source,
253
+ flatten_out_features=d_target,
254
+ num_groups=k_source,
255
+ activation=nn.ReLU)
256
+ else:
257
+ return FlattenMLP(in_features=d_source, flatten_out_features=d_target, num_groups=k_source, activation=None)
258
+ elif 'conv' in proj_type:
259
+ if 'conv4' == proj_type:
260
+ return DepthwiseConvProjection(in_features=d_source,
261
+ flatten_out_features=d_target,
262
+ num_groups=k_source,
263
+ depth=4)
264
+ elif 'conv16' == proj_type:
265
+ return DepthwiseConvProjection(in_features=d_source,
266
+ flatten_out_features=d_target,
267
+ num_groups=k_source,
268
+ depth=16)
269
+ elif 'conv32' == proj_type:
270
+ return DepthwiseConvProjection(in_features=d_source,
271
+ flatten_out_features=d_target,
272
+ num_groups=k_source,
273
+ depth=32)
274
+ elif 'conv64' == proj_type:
275
+ return DepthwiseConvProjection(in_features=d_source,
276
+ flatten_out_features=d_target,
277
+ num_groups=k_source,
278
+ depth=64)
279
+ else: # conv depth 1
280
+ return DepthwiseConvProjection(in_features=d_source,
281
+ flatten_out_features=d_target,
282
+ num_groups=k_source,
283
+ depth=1)
284
+ elif 'group_linear' == proj_type:
285
+ assert d_source % k_source == 0, "d_source and k_source must be divisible for group_linear projection."
286
+ return GroupLinearFlatten(in_features=d_source,
287
+ flatten_out_features=d_target,
288
+ num_groups=k_source,
289
+ use_bmm=True)
290
+ else:
291
+ raise ValueError(f"Invalid projection type: {proj_type}")
292
+
293
+
294
+ def test_projection_layers():
295
+ # encoder hidden states: (B, T, K, D)
296
+ b = 2
297
+ t = 110 #10
298
+ k = 24 #16
299
+ d = 128
300
+ enc_hs = torch.randn(b, t, k, d)
301
+
302
+ # target shape: (B, T, K, D//4)
303
+ target_flatten_d = 512
304
+
305
+ # GroupLinear
306
+ gl = GroupLinearFlatten(in_features=d, flatten_out_features=target_flatten_d, num_groups=k, use_bmm=True)
307
+ enc_hs_hat = gl(enc_hs)
308
+ assert enc_hs_hat.shape == (b, t, target_flatten_d)
309
+ print('GroupLinear: ', f'{count_parameters(gl)//1000}k') # 65k
310
+
311
+ # FlattenMLP
312
+ fm = FlattenMLP(in_features=d,
313
+ flatten_out_features=target_flatten_d,
314
+ num_groups=k,
315
+ hidden_dim=None,
316
+ activation=nn.GELU)
317
+ enc_hs_hat = fm(enc_hs)
318
+ assert enc_hs_hat.shape == (b, t, target_flatten_d)
319
+ print('FlattenMLP: ', f'{count_parameters(fm)//1000}k') # 3.6M
320
+
321
+ # LinearProjection
322
+ lp = LinearProjection(in_features=d, flatten_out_features=target_flatten_d, num_groups=k)
323
+ enc_hs_hat = lp(enc_hs)
324
+ assert enc_hs_hat.shape == (b, t, target_flatten_d)
325
+ print('LinearProjection: ', f'{count_parameters(lp)//1000}k') # 1M
326
+
327
+ # DepthwiseConvProjection
328
+ dc = DepthwiseConvProjection(in_features=d, flatten_out_features=target_flatten_d, num_groups=k, depth=16)
329
+ enc_hs_hat = dc(enc_hs)
330
+ assert enc_hs_hat.shape == (b, t, target_flatten_d)
331
+ print('DepthwiseConvProjection: ', f'{count_parameters(dc)//1000}k') # 4M
model/ymt3.py ADDED
@@ -0,0 +1,967 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ymt3.py"""
11
+ import os
12
+ from typing import Union, Optional, Tuple, Dict, List, Any
13
+ from collections import Counter
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ from torch.nn import CrossEntropyLoss
18
+ import torchaudio # for debugging audio
19
+ import pytorch_lightning as pl
20
+ import numpy as np
21
+ import wandb
22
+ from einops import rearrange
23
+
24
+ from transformers import T5Config
25
+ from model.t5mod import T5EncoderYMT3, T5DecoderYMT3, MultiChannelT5Decoder
26
+ from model.t5mod_helper import task_cond_dec_generate
27
+ from model.perceiver_mod import PerceiverTFEncoder
28
+ from model.perceiver_helper import PerceiverTFConfig
29
+ from model.conformer_mod import ConformerYMT3Encoder
30
+ from model.conformer_helper import ConformerYMT3Config
31
+ from model.lm_head import LMHead
32
+ from model.pitchshift_layer import PitchShiftLayer
33
+ from model.spectrogram import get_spectrogram_layer_from_audio_cfg
34
+ from model.conv_block import PreEncoderBlockRes3B
35
+ from model.conv_block import PreEncoderBlockHFTT, PreEncoderBlockRes3BHFTT # added for hFTT-like pre-encoder
36
+ from model.projection_layer import get_projection_layer, get_multi_channel_projection_layer
37
+ from model.optimizers import get_optimizer
38
+ from model.lr_scheduler import get_lr_scheduler
39
+
40
+ from utils.note_event_dataclasses import Note
41
+ from utils.note2event import mix_notes
42
+ from utils.event2note import merge_zipped_note_events_and_ties_to_notes, DECODING_ERR_TYPES
43
+ from utils.metrics import compute_track_metrics
44
+ from utils.metrics import AMTMetrics
45
+ # from utils.utils import write_model_output_as_npy
46
+ from utils.utils import write_model_output_as_midi, create_inverse_vocab, write_err_cnt_as_json
47
+ from utils.utils import Timer
48
+ from utils.task_manager import TaskManager
49
+
50
+ from config.config import audio_cfg as default_audio_cfg
51
+ from config.config import model_cfg as default_model_cfg
52
+ from config.config import shared_cfg as default_shared_cfg
53
+ from config.config import T5_BASE_CFG
54
+
55
+
56
+ class YourMT3(pl.LightningModule):
57
+ """YourMT3:
58
+
59
+ Lightning wrapper for multi-task music transcription Transformer.
60
+
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ audio_cfg: Optional[Dict] = None,
66
+ model_cfg: Optional[Dict] = None,
67
+ shared_cfg: Optional[Dict] = None,
68
+ pretrained: bool = False,
69
+ optimizer_name: str = 'adamwscale',
70
+ scheduler_name: str = 'cosine',
71
+ base_lr: float = None, # None: 'auto' for AdaFactor, 1e-3 for constant, 1e-2 for cosine
72
+ max_steps: Optional[int] = None,
73
+ weight_decay: float = 0.0,
74
+ init_factor: Optional[Union[str, float]] = None,
75
+ task_manager: TaskManager = TaskManager(),
76
+ eval_subtask_key: Optional[str] = "default",
77
+ eval_vocab: Optional[Dict] = None,
78
+ eval_drum_vocab: Optional[Dict] = None,
79
+ write_output_dir: Optional[str] = None,
80
+ write_output_vocab: Optional[Dict] = None,
81
+ onset_tolerance: float = 0.05,
82
+ add_pitch_class_metric: Optional[List[str]] = None,
83
+ add_melody_metric_to_singing: bool = True,
84
+ test_optimal_octave_shift: bool = False,
85
+ test_pitch_shift_layer: Optional[str] = None,
86
+ **kwargs: Any) -> None:
87
+ super().__init__()
88
+ if pretrained is True:
89
+ raise NotImplementedError("Pretrained model is not supported in this version.")
90
+ self.test_pitch_shift_layer = test_pitch_shift_layer # debug only
91
+
92
+ # Config
93
+ if model_cfg is None:
94
+ model_cfg = default_model_cfg # default config, not overwritten by args of trainer
95
+ if audio_cfg is None:
96
+ audio_cfg = default_audio_cfg # default config, not overwritten by args of trainer
97
+ if shared_cfg is None:
98
+ shared_cfg = default_shared_cfg # default config, not overwritten by args of trainer
99
+
100
+ # Spec Layer (need to define here to infer max token length)
101
+ self.spectrogram, spec_output_shape = get_spectrogram_layer_from_audio_cfg(
102
+ audio_cfg) # can be spec or melspec; output_shape is (T, F)
103
+ model_cfg["feat_length"] = spec_output_shape[0] # T of (T, F)
104
+
105
+ # Task manger and Tokens
106
+ self.task_manager = task_manager
107
+ self.max_total_token_length = self.task_manager.max_total_token_length
108
+
109
+ # Task Conditioning
110
+ self.use_task_cond_encoder = bool(model_cfg["use_task_conditional_encoder"])
111
+ self.use_task_cond_decoder = bool(model_cfg["use_task_conditional_decoder"])
112
+
113
+ # Select Encoder type, Model-specific Config
114
+ assert model_cfg["encoder_type"] in ["t5", "perceiver-tf", "conformer"]
115
+ assert model_cfg["decoder_type"] in ["t5", "multi-t5"]
116
+ self.encoder_type = model_cfg["encoder_type"] # {"t5", "perceiver-tf", "conformer"}
117
+ self.decoder_type = model_cfg["decoder_type"] # {"t5", "multi-t5"}
118
+ encoder_config = model_cfg["encoder"][self.encoder_type] # mutable
119
+ decoder_config = model_cfg["decoder"][self.decoder_type] # mutable
120
+
121
+ # Positional Encoding
122
+ if isinstance(model_cfg["num_max_positions"], str) and model_cfg["num_max_positions"] == 'auto':
123
+ encoder_config["num_max_positions"] = int(model_cfg["feat_length"] +
124
+ self.task_manager.max_task_token_length + 10)
125
+ decoder_config["num_max_positions"] = int(self.max_total_token_length + 10)
126
+ else:
127
+ assert isinstance(model_cfg["num_max_positions"], int)
128
+ encoder_config["num_max_positions"] = model_cfg["num_max_positions"]
129
+ decoder_config["num_max_positions"] = model_cfg["num_max_positions"]
130
+
131
+ # Select Pre-Encoder and Pre-Decoder type
132
+ if model_cfg["pre_encoder_type"] == "default":
133
+ model_cfg["pre_encoder_type"] = model_cfg["pre_encoder_type_default"].get(model_cfg["encoder_type"], None)
134
+ elif model_cfg["pre_encoder_type"] in [None, "none", "None", "0"]:
135
+ model_cfg["pre_encoder_type"] = None
136
+ if model_cfg["pre_decoder_type"] == "default":
137
+ model_cfg["pre_decoder_type"] = model_cfg["pre_decoder_type_default"].get(model_cfg["encoder_type"]).get(
138
+ model_cfg["decoder_type"], None)
139
+ elif model_cfg["pre_decoder_type"] in [None, "none", "None", "0"]:
140
+ model_cfg["pre_decoder_type"] = None
141
+ self.pre_encoder_type = model_cfg["pre_encoder_type"]
142
+ self.pre_decoder_type = model_cfg["pre_decoder_type"]
143
+
144
+ # Pre-encoder
145
+ self.pre_encoder = nn.Sequential()
146
+ if self.pre_encoder_type in ["conv", "conv1d_t", "conv1d_f"]:
147
+ kernel_size = (3, 3)
148
+ avp_kernel_size = (1, 2)
149
+ if self.pre_encoder_type == "conv1d_t":
150
+ kernel_size = (3, 1)
151
+ elif self.pre_encoder_type == "conv1d_f":
152
+ kernel_size = (1, 3)
153
+ self.pre_encoder.append(
154
+ PreEncoderBlockRes3B(1,
155
+ model_cfg["conv_out_channels"],
156
+ kernel_size=kernel_size,
157
+ avp_kernerl_size=avp_kernel_size,
158
+ activation="relu"))
159
+ pre_enc_output_shape = (spec_output_shape[0], spec_output_shape[1] // 2**3, model_cfg["conv_out_channels"]
160
+ ) # (T, F, C) excluding batch dim
161
+ elif self.pre_encoder_type == "hftt":
162
+ self.pre_encoder.append(PreEncoderBlockHFTT())
163
+ pre_enc_output_shape = (spec_output_shape[0], spec_output_shape[1], 128) # (T, F, C) excluding batch dim
164
+ elif self.pre_encoder_type == "res3b_hftt":
165
+ self.pre_encoder.append(PreEncoderBlockRes3BHFTT())
166
+ pre_enc_output_shape = (spec_output_shape[0], spec_output_shape[1] // 2**3, 128)
167
+ else:
168
+ pre_enc_output_shape = spec_output_shape # (T, F) excluding batch dim
169
+
170
+ # Auto-infer `d_feat` and `d_model`, `vocab_size`, and `num_max_positions`
171
+ if isinstance(model_cfg["d_feat"], str) and model_cfg["d_feat"] == 'auto':
172
+ if self.encoder_type == "perceiver-tf" and encoder_config["attention_to_channel"] is True:
173
+ model_cfg["d_feat"] = pre_enc_output_shape[-2] # TODO: better readablity
174
+ else:
175
+ model_cfg["d_feat"] = pre_enc_output_shape[-1] # C of (T, F, C) or F or (T, F)
176
+
177
+ if self.encoder_type == "perceiver-tf" and isinstance(encoder_config["d_model"], str):
178
+ if encoder_config["d_model"] == 'q':
179
+ encoder_config["d_model"] = encoder_config["d_latent"]
180
+ elif encoder_config["d_model"] == 'kv':
181
+ encoder_config["d_model"] = model_cfg["d_feat"]
182
+ else:
183
+ raise ValueError(f"Unknown d_model: {encoder_config['d_model']}")
184
+
185
+ # # required for PerceiverTF with attention_to_channel option
186
+ # if self.encoder_type == "perceiver-tf":
187
+ # if encoder_config["attention_to_channel"] is True:
188
+ # encoder_config["kv_dim"] = model_cfg["d_feat"] # TODO: better readablity
189
+ # else:
190
+ # encoder_config["kv_dim"] = model_cfg["conv_out_channels"]
191
+
192
+ if isinstance(model_cfg["vocab_size"], str) and model_cfg["vocab_size"] == 'auto':
193
+ model_cfg["vocab_size"] = task_manager.num_tokens
194
+
195
+ if isinstance(model_cfg["num_max_positions"], str) and model_cfg["num_max_positions"] == 'auto':
196
+ model_cfg["num_max_positions"] = int(
197
+ max(model_cfg["feat_length"], model_cfg["event_length"]) + self.task_manager.max_task_token_length + 10)
198
+
199
+ # Pre-decoder
200
+ self.pre_decoder = nn.Sequential()
201
+ if self.encoder_type == "perceiver-tf" and self.decoder_type == "t5":
202
+ t, f, c = pre_enc_output_shape # perceiver-tf: (110, 128, 128) for 2s
203
+ encoder_output_shape = (t, encoder_config["num_latents"], encoder_config["d_latent"]) # (T, K, D_source)
204
+ decoder_input_shape = (t, decoder_config["d_model"]) # (T, D_target)
205
+ proj_layer = get_projection_layer(input_shape=encoder_output_shape,
206
+ output_shape=decoder_input_shape,
207
+ proj_type=self.pre_decoder_type)
208
+ self.pre_encoder_output_shape = pre_enc_output_shape
209
+ self.encoder_output_shape = encoder_output_shape
210
+ self.decoder_input_shape = decoder_input_shape
211
+ self.pre_decoder.append(proj_layer)
212
+ elif self.encoder_type in ["t5", "conformer"] and self.decoder_type == "t5":
213
+ pass
214
+ elif self.encoder_type == "perceiver-tf" and self.decoder_type == "multi-t5":
215
+ # NOTE: this is experiemental, only for multi-channel decoding with 13 classes
216
+ assert encoder_config["num_latents"] % decoder_config["num_channels"] == 0
217
+ encoder_output_shape = (encoder_config["num_latents"], encoder_config["d_model"])
218
+ decoder_input_shape = (decoder_config["num_channels"], decoder_config["d_model"])
219
+ proj_layer = get_multi_channel_projection_layer(input_shape=encoder_output_shape,
220
+ output_shape=decoder_input_shape,
221
+ proj_type=self.pre_decoder_type)
222
+ self.pre_decoder.append(proj_layer)
223
+ else:
224
+ raise NotImplementedError(
225
+ f"Encoder type {self.encoder_type} and decoder type {self.decoder_type} is not implemented yet.")
226
+
227
+ # Positional Encoding, Vocab, etc.
228
+ if self.encoder_type in ["t5", "conformer"]:
229
+ encoder_config["num_max_positions"] = decoder_config["num_max_positions"] = model_cfg["num_max_positions"]
230
+ else: # perceiver-tf uses separate positional encoding
231
+ encoder_config["num_max_positions"] = model_cfg["feat_length"]
232
+ decoder_config["num_max_positions"] = model_cfg["num_max_positions"]
233
+ encoder_config["vocab_size"] = decoder_config["vocab_size"] = model_cfg["vocab_size"]
234
+
235
+ # Print and save updated configs
236
+ self.audio_cfg = audio_cfg
237
+ self.model_cfg = model_cfg
238
+ self.shared_cfg = shared_cfg
239
+ self.save_hyperparameters()
240
+ if self.global_rank == 0:
241
+ print(self.hparams)
242
+
243
+ # Encoder and Decoder and LM-head
244
+ self.encoder = None
245
+ self.decoder = None
246
+ self.lm_head = LMHead(decoder_config, 1.0, model_cfg["tie_word_embeddings"])
247
+ self.embed_tokens = nn.Embedding(decoder_config["vocab_size"], decoder_config["d_model"])
248
+ self.embed_tokens.weight.data.normal_(mean=0.0, std=1.0)
249
+ self.shift_right_fn = None
250
+ self.set_encoder_decoder() # shift_right_fn is also set here
251
+
252
+ # Model as ModuleDict
253
+ # self.model = nn.ModuleDict({
254
+ # "pitchshift": self.pitchshift, # no grad; created in setup() only for training,
255
+ # and called by training_step()
256
+ # "spectrogram": self.spectrogram, # no grad
257
+ # "pre_encoder": self.pre_encoder,
258
+ # "encoder": self.encoder,
259
+ # "pre_decoder": self.pre_decoder,
260
+ # "decoder": self.decoder,
261
+ # "embed_tokens": self.embed_tokens,
262
+ # "lm_head": self.lm_head,
263
+ # })
264
+
265
+ # Tables (for logging)
266
+ columns = ['Ep', 'Track ID', 'Pred Events', 'Actual Events', 'Pred Notes', 'Actual Notes']
267
+ self.sample_table = wandb.Table(columns=columns)
268
+
269
+ # Output MIDI
270
+ if write_output_dir is not None:
271
+ if write_output_vocab is None:
272
+ from config.vocabulary import program_vocab_presets
273
+ self.midi_output_vocab = program_vocab_presets["gm_ext_plus"]
274
+ else:
275
+ self.midi_output_vocab = write_output_vocab
276
+ self.midi_output_inverse_vocab = create_inverse_vocab(self.midi_output_vocab)
277
+
278
+ def set_encoder_decoder(self) -> None:
279
+ """Set encoder, decoder, lm_head and emb_tokens from self.model_cfg"""
280
+
281
+ # Generate and update T5Config
282
+ t5_basename = self.model_cfg["t5_basename"]
283
+ if t5_basename in T5_BASE_CFG.keys():
284
+ # Load from pre-defined config in config.py
285
+ t5_config = T5Config(**T5_BASE_CFG[t5_basename])
286
+ else:
287
+ # Load from HuggingFace hub
288
+ t5_config = T5Config.from_pretrained(t5_basename)
289
+
290
+ # Create encoder, decoder, lm_head and embed_tokens
291
+ if self.encoder_type == "t5":
292
+ self.encoder = T5EncoderYMT3(self.model_cfg["encoder"]["t5"], t5_config)
293
+ elif self.encoder_type == "perceiver-tf":
294
+ perceivertf_config = PerceiverTFConfig()
295
+ perceivertf_config.update(self.model_cfg["encoder"]["perceiver-tf"])
296
+ self.encoder = PerceiverTFEncoder(perceivertf_config)
297
+ elif self.encoder_type == "conformer":
298
+ conformer_config = ConformerYMT3Config()
299
+ conformer_config.update(self.model_cfg["encoder"]["conformer"])
300
+ self.encoder = ConformerYMT3Encoder(conformer_config)
301
+
302
+ if self.decoder_type == "t5":
303
+ self.decoder = T5DecoderYMT3(self.model_cfg["decoder"]["t5"], t5_config)
304
+ elif self.decoder_type == "multi-t5":
305
+ self.decoder = MultiChannelT5Decoder(self.model_cfg["decoder"]["multi-t5"], t5_config)
306
+
307
+ # `shift_right` function for decoding
308
+ self.shift_right_fn = self.decoder._shift_right
309
+
310
+ def setup(self, stage: str) -> None:
311
+ # Defining metrics
312
+ if self.hparams.eval_vocab is None:
313
+ extra_classes_per_dataset = [None]
314
+ else:
315
+ extra_classes_per_dataset = [
316
+ list(v.keys()) if v is not None else None for v in self.hparams.eval_vocab
317
+ ] # e.g. [['Piano'], ['Guitar'], ['Piano'], ['Piano', 'Strings', 'Winds'], None]
318
+
319
+ # For direct addition of extra metrics using full metric name
320
+ extra_metrics = None
321
+ if self.hparams.add_melody_metric_to_singing is True:
322
+ extra_metrics = ["melody_rpa_Singing Voice", "melody_rca_Singing Voice", "melody_oa_Singing Voice"]
323
+
324
+ # Add pitch class metric
325
+ if self.hparams.add_pitch_class_metric is not None:
326
+ for sublist in extra_classes_per_dataset:
327
+ for name in self.hparams.add_pitch_class_metric:
328
+ if sublist is not None and name in sublist:
329
+ sublist += [name + "_pc"]
330
+
331
+ extra_classes_unique = list(
332
+ set(item for sublist in extra_classes_per_dataset if sublist is not None
333
+ for item in sublist)) # e.g. ['Strings', 'Winds', 'Guitar', 'Piano']
334
+ dm = self.trainer.datamodule
335
+
336
+ # Train/Vaidation-only
337
+ if stage == "fit":
338
+ self.val_metrics_macro = AMTMetrics(prefix=f'validation/macro_', extra_classes=extra_classes_unique)
339
+ self.val_metrics = nn.ModuleList() # val_metric is a list of AMTMetrics objects
340
+ for i in range(dm.num_val_dataloaders):
341
+ self.val_metrics.append(
342
+ AMTMetrics(prefix=f'validation/({dm.get_val_dataset_name(i)})',
343
+ extra_classes=extra_classes_per_dataset[i],
344
+ error_types=DECODING_ERR_TYPES))
345
+
346
+ # Add pitchshift layer
347
+ if self.shared_cfg["AUGMENTATION"]["train_pitch_shift_range"] in [None, [0, 0]]:
348
+ self.pitchshift = None
349
+ else:
350
+ # torchaudio pitchshifter requires a dummy input for initialization in DDP
351
+ input_shape = (self.shared_cfg["BSZ"]["train_local"], 1, self.audio_cfg["input_frames"])
352
+ self.pitchshift = PitchShiftLayer(
353
+ pshift_range=self.shared_cfg["AUGMENTATION"]["train_pitch_shift_range"],
354
+ expected_input_shape=input_shape,
355
+ device=self.device)
356
+
357
+ # Test-only
358
+ elif stage == "test":
359
+ # self.test_metrics_macro = AMTMetrics(
360
+ # prefix=f'test/macro_', extra_classes=extra_classes_unique)
361
+ self.test_metrics = nn.ModuleList()
362
+ for i in range(dm.num_test_dataloaders):
363
+ self.test_metrics.append(
364
+ AMTMetrics(prefix=f'test/({dm.get_test_dataset_name(i)})',
365
+ extra_classes=extra_classes_per_dataset[i],
366
+ extra_metrics=extra_metrics,
367
+ error_types=DECODING_ERR_TYPES))
368
+
369
+ # Test pitch shift layer: debug only
370
+ if self.test_pitch_shift_layer is not None:
371
+ self.test_pitch_shift_semitone = int(self.test_pitch_shift_layer)
372
+ self.pitchshift = PitchShiftLayer(
373
+ pshift_range=[self.test_pitch_shift_semitone, self.test_pitch_shift_semitone])
374
+
375
+ def configure_optimizers(self) -> None:
376
+ """Configure optimizer and scheduler"""
377
+ optimizer, base_lr = get_optimizer(models_dict=self.named_parameters(),
378
+ optimizer_name=self.hparams.optimizer_name,
379
+ base_lr=self.hparams.base_lr,
380
+ weight_decay=self.hparams.weight_decay)
381
+
382
+ if self.hparams.optimizer_name.lower() == 'adafactor' and self.hparams.base_lr == None:
383
+ print("Using AdaFactor with auto learning rate and no scheduler")
384
+ return [optimizer]
385
+ if self.hparams.optimizer_name.lower() == 'dadaptadam':
386
+ print("Using dAdaptAdam with auto learning rate and no scheduler")
387
+ return [optimizer]
388
+ elif self.hparams.base_lr == None:
389
+ print(f"Using default learning rate {base_lr} of {self.hparams.optimizer_name} as base learning rate.")
390
+ self.hparams.base_lr = base_lr
391
+
392
+ scheduler_cfg = self.shared_cfg["LR_SCHEDULE"]
393
+ if self.hparams.max_steps != -1:
394
+ # overwrite total_steps
395
+ scheduler_cfg["total_steps"] = self.hparams.max_steps
396
+ _lr_scheduler = get_lr_scheduler(optimizer,
397
+ scheduler_name=self.hparams.scheduler_name,
398
+ base_lr=base_lr,
399
+ scheduler_cfg=scheduler_cfg)
400
+
401
+ lr_scheduler = {'scheduler': _lr_scheduler, 'interval': 'step', 'frequency': 1}
402
+ return [optimizer], [lr_scheduler]
403
+
404
+ def forward(
405
+ self,
406
+ x: torch.FloatTensor,
407
+ target_tokens: torch.LongTensor,
408
+ # task_tokens: Optional[torch.LongTensor] = None,
409
+ **kwargs) -> Dict:
410
+ """ Forward pass with teacher-forcing for training and validation.
411
+ Args:
412
+ x: (B, 1, T) waveform with default T=32767
413
+ target_tokens: (B, C, N) tokenized sequence of length N=event_length
414
+ task_tokens: (B, C, task_len) tokenized task
415
+
416
+ Returns:
417
+ {
418
+ 'logits': (B, N + task_len + 1, vocab_size)
419
+ 'loss': (1, )
420
+ }
421
+
422
+ NOTE: all the commented shapes are in the case of original MT3 setup.
423
+ """
424
+ x = self.spectrogram(x) # mel-/spectrogram: (b, 256, 512) or (B, T, F)
425
+ x = self.pre_encoder(x) # projection to d_model: (B, 256, 512)
426
+
427
+ # TODO: task_cond_encoder would not work properly because of 3-d task_tokens
428
+ # if task_tokens is not None and task_tokens.numel() > 0 and self.use_task_cond_encoder is True:
429
+ # # append task embedding to encoder input
430
+ # task_embed = self.embed_tokens(task_tokens) # (B, task_len, 512)
431
+ # x = torch.cat([task_embed, x], dim=1) # (B, task_len + 256, 512)
432
+ enc_hs = self.encoder(inputs_embeds=x)["last_hidden_state"] # (B, T', D)
433
+ enc_hs = self.pre_decoder(enc_hs) # (B, T', D) or (B, K, T, D)
434
+
435
+ # if task_tokens is not None and task_tokens.numel() > 0 and self.use_task_cond_decoder is True:
436
+ # # append task token to decoder input and output label
437
+ # labels = torch.cat([task_tokens, target_tokens], dim=2) # (B, C, task_len + N)
438
+ # else:
439
+ # labels = target_tokens # (B, C, N)
440
+ labels = target_tokens # (B, C, N)
441
+ if labels.shape[1] == 1: # for single-channel decoders, e.g. t5.
442
+ labels = labels.squeeze(1) # (B, N)
443
+
444
+ dec_input_ids = self.shift_right_fn(labels) # t5:(B, N), multi-t5:(B, C, N)
445
+ dec_inputs_embeds = self.embed_tokens(dec_input_ids) # t5:(B, N, D), multi-t5:(B, C, N, D)
446
+ dec_hs, _ = self.decoder(inputs_embeds=dec_inputs_embeds, encoder_hidden_states=enc_hs, return_dict=False)
447
+
448
+ if self.model_cfg["tie_word_embeddings"] is True:
449
+ dec_hs = dec_hs * (self.model_cfg["decoder"][self.decoder_type]["d_model"]**-0.5)
450
+
451
+ logits = self.lm_head(dec_hs)
452
+
453
+ loss = None
454
+ labels = labels.masked_fill(labels == 0, value=-100) # ignore pad tokens for loss
455
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
456
+ loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
457
+ return {"logits": logits, "loss": loss}
458
+
459
+ def inference(self,
460
+ x: torch.FloatTensor,
461
+ task_tokens: Optional[torch.LongTensor] = None,
462
+ max_token_length: Optional[int] = None,
463
+ **kwargs: Any) -> torch.Tensor:
464
+ """ Inference from audio batch by cached autoregressive decoding.
465
+ Args:
466
+ x: (b, 1, t) waveform with t=32767
467
+ task_token: (b, c, task_len) tokenized task. If None, will not append task embeddings (from task_tokens) to input.
468
+ max_length: Maximum length of generated sequence. If None, self.max_total_token_length.
469
+ **kwargs: https://huggingface.co/docs/transformers/v4.27.2/en/main_classes/text_generation#transformers.GenerationMixin.generate
470
+
471
+ Returns:
472
+ res_tokens: (b, n) resulting tokenized sequence of variable length < max_length
473
+ """
474
+ if self.test_pitch_shift_layer is not None:
475
+ x_ps = self.pitchshift(x, self.test_pitch_shift_semitone)
476
+ x = x_ps
477
+
478
+ # From spectrogram to pre-decoder is the same pipeline as in forward()
479
+ x = self.spectrogram(x) # mel-/spectrogram: (b, 256, 512) or (B, T, F)
480
+ x = self.pre_encoder(x) # projection to d_model: (B, 256, 512)
481
+ if task_tokens is not None and task_tokens.numel() > 0 and self.use_task_cond_encoder is True:
482
+ # append task embedding to encoder input
483
+ task_embed = self.embed_tokens(task_tokens) # (B, task_len, 512)
484
+ x = torch.cat([task_embed, x], dim=1) # (B, task_len + 256, 512)
485
+ enc_hs = self.encoder(inputs_embeds=x)["last_hidden_state"] # (B, task_len + 256, 512)
486
+ enc_hs = self.pre_decoder(enc_hs) # (B, task_len + 256, 512)
487
+
488
+ # Cached-autoregressive decoding with task token (can be None) as prefix
489
+ if max_token_length is None:
490
+ max_token_length = self.max_total_token_length
491
+
492
+ pred_ids = task_cond_dec_generate(decoder=self.decoder,
493
+ decoder_type=self.decoder_type,
494
+ embed_tokens=self.embed_tokens,
495
+ lm_head=self.lm_head,
496
+ encoder_hidden_states=enc_hs,
497
+ shift_right_fn=self.shift_right_fn,
498
+ prefix_ids=task_tokens,
499
+ max_length=max_token_length) # (B, task_len + N) or (B, C, task_len + N)
500
+ if pred_ids.dim() == 2:
501
+ pred_ids = pred_ids.unsqueeze(1) # (B, 1, task_len + N)
502
+
503
+ if self.test_pitch_shift_layer is None:
504
+ return pred_ids
505
+ else:
506
+ return pred_ids, x_ps
507
+
508
+ def inference_file(
509
+ self,
510
+ bsz: int,
511
+ audio_segments: torch.FloatTensor, # (n_items, 1, segment_len): from a single file
512
+ note_token_array: Optional[torch.LongTensor] = None,
513
+ task_token_array: Optional[torch.LongTensor] = None,
514
+ # subtask_key: Optional[str] = "default"
515
+ ) -> Tuple[List[np.ndarray], Optional[torch.Tensor]]:
516
+ """ Inference from audio batch by autoregressive decoding:
517
+ Args:
518
+ bsz: batch size
519
+ audio_segments: (n_items, 1, segment_len): segmented audio from a single file
520
+ note_token_array: (n_items, max_token_len): Optional. If token_array is None, will not return loss.
521
+ subtask_key: (str): If None, not using subtask prefix. By default, using "default" defined in task manager.
522
+ """
523
+ # if subtask_key is not None:
524
+ # _subtask_token = torch.LongTensor(
525
+ # self.task_manager.get_eval_subtask_prefix_dict()[subtask_key]).to(self.device)
526
+
527
+ n_items = audio_segments.shape[0]
528
+ loss = 0.
529
+ pred_token_array_file = [] # each element is (B, C, L) np.ndarray
530
+ x_ps_concat = []
531
+
532
+ for i in range(0, n_items, bsz):
533
+ if i + bsz > n_items: # last batch can be smaller
534
+ x = audio_segments[i:n_items].to(self.device)
535
+ # if subtask_key is not None:
536
+ # b = n_items - i # bsz for the last batch
537
+ # task_tokens = _subtask_token.expand((b, -1)) # (b, task_len)
538
+ if note_token_array is not None:
539
+ target_tokens = note_token_array[i:n_items].to(self.device)
540
+ if task_token_array is not None and task_token_array.numel() > 0:
541
+ task_tokens = task_token_array[i:n_items].to(self.device)
542
+ else:
543
+ task_tokens = None
544
+ else:
545
+ x = audio_segments[i:i + bsz].to(self.device) # (bsz, 1, segment_len)
546
+ # if subtask_key is not None:
547
+ # task_tokens = _subtask_token.expand((bsz, -1)) # (bsz, task_len)
548
+ if note_token_array is not None:
549
+ target_tokens = note_token_array[i:i + bsz].to(self.device) # (bsz, token_len)
550
+ if task_token_array is not None and task_token_array.numel() > 0:
551
+ task_tokens = task_token_array[i:i + bsz].to(self.device)
552
+ else:
553
+ task_tokens = None
554
+
555
+ # token prediction (fast-autoregressive decoding)
556
+ # if subtask_key is not None:
557
+ # preds = self.inference(x, task_tokens).detach().cpu().numpy()
558
+ # else:
559
+ # preds = self.inference(x).detach().cpu().numpy()
560
+
561
+ if self.test_pitch_shift_layer is not None: # debug only
562
+ preds, x_ps = self.inference(x, task_tokens)
563
+ preds = preds.detach().cpu().numpy()
564
+ x_ps_concat.append(x_ps.detach().cpu())
565
+ else:
566
+ preds = self.inference(x, task_tokens).detach().cpu().numpy()
567
+ if len(preds) != len(x):
568
+ raise ValueError(f'preds: {len(preds)}, x: {len(x)}')
569
+ pred_token_array_file.append(preds)
570
+
571
+ # validation loss (by teacher forcing)
572
+ if note_token_array is not None:
573
+ loss_weight = x.shape[0] / n_items
574
+ loss += self(x, target_tokens)['loss'] * loss_weight
575
+ # loss += self(x, target_tokens, task_tokens)['loss'] * loss_weight
576
+ else:
577
+ loss = None
578
+
579
+ if self.test_pitch_shift_layer is not None: # debug only
580
+ if self.hparams.write_output_dir is not None:
581
+ x_ps_concat = torch.cat(x_ps_concat, dim=0)
582
+ return pred_token_array_file, loss, x_ps_concat.flatten().unsqueeze(0)
583
+ else:
584
+ return pred_token_array_file, loss
585
+
586
+ def training_step(self, batch, batch_idx) -> torch.Tensor:
587
+ # batch: {
588
+ # 'dataset1': [Tuple[audio_segments(b, 1, t), tokens(b, max_token_len), ...]]
589
+ # 'dataset2': [Tuple[audio_segments(b, 1, t), tokens(b, max_token_len), ...]]
590
+ # 'dataset3': ...
591
+ # }
592
+ audio_segments, note_tokens, pshift_steps = [torch.cat(t, dim=0) for t in zip(*batch.values())]
593
+
594
+ if self.pitchshift is not None:
595
+ # Pitch shift
596
+ n_groups = len(batch)
597
+ audio_segments = torch.chunk(audio_segments, n_groups, dim=0)
598
+ pshift_steps = torch.chunk(pshift_steps, n_groups, dim=0)
599
+ for p in pshift_steps:
600
+ assert p.eq(p[0]).all().item()
601
+
602
+ audio_segments = torch.cat([self.pitchshift(a, p[0].item()) for a, p in zip(audio_segments, pshift_steps)],
603
+ dim=0)
604
+
605
+ loss = self(audio_segments, note_tokens)['loss']
606
+ self.log('train_loss',
607
+ loss,
608
+ on_step=True,
609
+ on_epoch=True,
610
+ prog_bar=True,
611
+ batch_size=note_tokens.shape[0],
612
+ sync_dist=True)
613
+ # print('lr', self.trainer.optimizers[0].param_groups[0]['lr'])
614
+ return loss
615
+
616
+ def validation_step(self, batch, batch_idx, dataloader_idx=0) -> Dict:
617
+ # File-wise validation
618
+ if self.task_manager.num_decoding_channels == 1:
619
+ bsz = self.shared_cfg["BSZ"]["validation"]
620
+ else:
621
+ bsz = self.shared_cfg["BSZ"]["validation"] // self.task_manager.num_decoding_channels * 3
622
+ # audio_segments, notes_dict, note_token_array, task_token_array = batch
623
+ audio_segments, notes_dict, note_token_array = batch
624
+ task_token_array = None
625
+
626
+ # Loop through the tensor in chunks of bsz (=subbsz actually)
627
+ n_items = audio_segments.shape[0]
628
+ start_secs_file = [32767 * i / 16000 for i in range(n_items)]
629
+ with Timer() as t:
630
+ pred_token_array_file, loss = self.inference_file(bsz, audio_segments, note_token_array, task_token_array)
631
+ """
632
+ notes_dict: # Ground truth notes
633
+ {
634
+ 'mtrack_id': int,
635
+ 'program': List[int],
636
+ 'is_drum': bool,
637
+ 'duration_sec': float,
638
+ 'notes': List[Note],
639
+ }
640
+ """
641
+ # Process a list of channel-wise token arrays for a file
642
+ num_channels = self.task_manager.num_decoding_channels
643
+ pred_notes_in_file = []
644
+ n_err_cnt = Counter()
645
+ for ch in range(num_channels):
646
+ pred_token_array_ch = [arr[:, ch, :] for arr in pred_token_array_file] # (B, L)
647
+ zipped_note_events_and_tie, list_events, ne_err_cnt = self.task_manager.detokenize_list_batches(
648
+ pred_token_array_ch, start_secs_file, return_events=True)
649
+ pred_notes_ch, n_err_cnt_ch = merge_zipped_note_events_and_ties_to_notes(zipped_note_events_and_tie)
650
+ pred_notes_in_file.append(pred_notes_ch)
651
+ n_err_cnt += n_err_cnt_ch
652
+ pred_notes = mix_notes(pred_notes_in_file) # This is the mixed notes from all channels
653
+
654
+ if self.hparams.write_output_dir is not None:
655
+ track_info = [notes_dict[k] for k in notes_dict.keys() if k.endswith("_id")][0]
656
+ dataset_info = [k for k in notes_dict.keys() if k.endswith('_id')][0][:-3]
657
+ # write_model_output_as_npy(zipped_note_events_and_tie, self.hparams.write_output_dir,
658
+ # track_info)
659
+ write_model_output_as_midi(pred_notes,
660
+ self.hparams.write_output_dir,
661
+ track_info,
662
+ self.midi_output_inverse_vocab,
663
+ output_dir_suffix=str(dataset_info) + '_' +
664
+ str(self.hparams.eval_subtask_key))
665
+ # generate sample text to display in log table
666
+ # pred_events_text = [str([list_events[0][:200]])]
667
+ # pred_notes_text = [str([pred_notes[:200]])]
668
+
669
+ # this is local GPU metric per file, not global metric in DDP
670
+ drum_metric, non_drum_metric, instr_metric = compute_track_metrics(
671
+ pred_notes,
672
+ notes_dict['notes'],
673
+ eval_vocab=self.hparams.eval_vocab[dataloader_idx],
674
+ eval_drum_vocab=self.hparams.eval_drum_vocab,
675
+ onset_tolerance=self.hparams.onset_tolerance,
676
+ add_pitch_class_metric=self.hparams.add_pitch_class_metric)
677
+ self.val_metrics[dataloader_idx].bulk_update(drum_metric)
678
+ self.val_metrics[dataloader_idx].bulk_update(non_drum_metric)
679
+ self.val_metrics[dataloader_idx].bulk_update(instr_metric)
680
+ self.val_metrics_macro.bulk_update(drum_metric)
681
+ self.val_metrics_macro.bulk_update(non_drum_metric)
682
+ self.val_metrics_macro.bulk_update(instr_metric)
683
+
684
+ # Log sample table: predicted notes and ground truth notes
685
+ # if batch_idx in (0, 1) and self.global_rank == 0:
686
+ # actual_notes_text = [str([notes_dict['notes'][:200]])]
687
+ # actual_tokens = token_array[0, :200].detach().cpu().numpy().tolist()
688
+ # actual_events_text = [str(self.tokenizer._decode(actual_tokens))]
689
+ # track_info = [notes_dict[k] for k in notes_dict.keys() if k.endswith("_id")]
690
+ # self.sample_table.add_data(self.current_epoch, track_info, pred_events_text,
691
+ # actual_events_text, pred_notes_text, actual_notes_text)
692
+ # self.logger.log_table('Samples', self.sample_table.columns, self.sample_table.data)
693
+
694
+ decoding_time_sec = t.elapsed_time()
695
+ self.log('val_loss', loss, prog_bar=True, batch_size=n_items, sync_dist=True)
696
+ # self.val_metrics[dataloader_idx].bulk_update_errors({'decoding_time': decoding_time_sec})
697
+
698
+ def on_validation_epoch_end(self) -> None:
699
+ for val_metrics in self.val_metrics:
700
+ self.log_dict(val_metrics.bulk_compute(), sync_dist=True)
701
+ val_metrics.bulk_reset()
702
+ self.log_dict(self.val_metrics_macro.bulk_compute(), sync_dist=True)
703
+ self.val_metrics_macro.bulk_reset()
704
+
705
+ def test_step(self, batch, batch_idx, dataloader_idx=0) -> Dict:
706
+ # File-wise evaluation
707
+ if self.task_manager.num_decoding_channels == 1:
708
+ bsz = self.shared_cfg["BSZ"]["validation"]
709
+ else:
710
+ bsz = self.shared_cfg["BSZ"]["validation"] // self.task_manager.num_decoding_channels * 3
711
+ # audio_segments, notes_dict, note_token_array, task_token_array = batch
712
+ audio_segments, notes_dict, note_token_array = batch
713
+ task_token_array = None
714
+
715
+ # Test pitch shift layer: debug only
716
+ if self.test_pitch_shift_layer is not None and self.test_pitch_shift_semitone != 0:
717
+ for n in notes_dict['notes']:
718
+ if n.is_drum == False:
719
+ n.pitch = n.pitch + self.test_pitch_shift_semitone
720
+
721
+ # Loop through the tensor in chunks of bsz (=subbsz actually)
722
+ n_items = audio_segments.shape[0]
723
+ start_secs_file = [32767 * i / 16000 for i in range(n_items)]
724
+
725
+ if self.test_pitch_shift_layer is not None and self.hparams.write_output_dir is not None:
726
+ pred_token_array_file, loss, x_ps = self.inference_file(bsz, audio_segments, None, None)
727
+ else:
728
+ pred_token_array_file, loss = self.inference_file(bsz, audio_segments, None, None)
729
+ if len(pred_token_array_file) > 0:
730
+
731
+ # Process a list of channel-wise token arrays for a file
732
+ num_channels = self.task_manager.num_decoding_channels
733
+ pred_notes_in_file = []
734
+ n_err_cnt = Counter()
735
+ for ch in range(num_channels):
736
+ pred_token_array_ch = [arr[:, ch, :] for arr in pred_token_array_file] # (B, L)
737
+ zipped_note_events_and_tie, list_events, ne_err_cnt = self.task_manager.detokenize_list_batches(
738
+ pred_token_array_ch, start_secs_file, return_events=True)
739
+ pred_notes_ch, n_err_cnt_ch = merge_zipped_note_events_and_ties_to_notes(zipped_note_events_and_tie)
740
+ pred_notes_in_file.append(pred_notes_ch)
741
+ n_err_cnt += n_err_cnt_ch
742
+ pred_notes = mix_notes(pred_notes_in_file) # This is the mixed notes from all channels
743
+
744
+ if self.test_pitch_shift_layer is not None and self.hparams.write_output_dir is not None:
745
+ # debug only
746
+ wav_output_dir = os.path.join(self.hparams.write_output_dir, f"model_output_{dataset_info}")
747
+ os.makedirs(wav_output_dir, exist_ok=True)
748
+ wav_output_file = os.path.join(wav_output_dir, f"{track_info}_ps_{self.test_pitch_shift_semitone}.wav")
749
+ torchaudio.save(wav_output_file, x_ps.squeeze(1), 16000, bits_per_sample=16)
750
+
751
+ drum_metric, non_drum_metric, instr_metric = compute_track_metrics(
752
+ pred_notes,
753
+ notes_dict['notes'],
754
+ eval_vocab=self.hparams.eval_vocab[dataloader_idx],
755
+ eval_drum_vocab=self.hparams.eval_drum_vocab,
756
+ onset_tolerance=self.hparams.onset_tolerance,
757
+ add_pitch_class_metric=self.hparams.add_pitch_class_metric,
758
+ add_melody_metric=['Singing Voice'] if self.hparams.add_melody_metric_to_singing else None,
759
+ add_frame_metric=True,
760
+ add_micro_metric=True,
761
+ add_multi_f_metric=True)
762
+
763
+ if self.hparams.write_output_dir is not None and self.global_rank == 0:
764
+ # write model output to file
765
+ track_info = [notes_dict[k] for k in notes_dict.keys() if k.endswith("_id")][0]
766
+ dataset_info = [k for k in notes_dict.keys() if k.endswith('_id')][0][:-3]
767
+ f_score = f"OnF{non_drum_metric['onset_f']:.2f}_MulF{instr_metric['multi_f']:.2f}"
768
+ write_model_output_as_midi(pred_notes,
769
+ self.hparams.write_output_dir,
770
+ track_info,
771
+ self.midi_output_inverse_vocab,
772
+ output_dir_suffix=str(dataset_info) + '_' +
773
+ str(self.hparams.eval_subtask_key) + '_' + f_score)
774
+ write_err_cnt_as_json(track_info, self.hparams.write_output_dir,
775
+ str(dataset_info) + '_' + str(self.hparams.eval_subtask_key) + '_' + f_score,
776
+ n_err_cnt, ne_err_cnt)
777
+
778
+ # Test with optimal octave shift
779
+ if self.hparams.test_optimal_octave_shift:
780
+ track_info = [notes_dict[k] for k in notes_dict.keys() if k.endswith("_id")][0]
781
+ dataset_info = [k for k in notes_dict.keys() if k.endswith('_id')][0][:-3]
782
+ score = [instr_metric['onset_f_Bass']]
783
+ ref_notes_plus = []
784
+ ref_notes_minus = []
785
+ for note in notes_dict['notes']:
786
+ if note.is_drum == True:
787
+ ref_notes_plus.append(note)
788
+ ref_notes_minus.append(note)
789
+ else:
790
+ ref_notes_plus.append(
791
+ Note(is_drum=note.is_drum,
792
+ program=note.program,
793
+ onset=note.onset,
794
+ offset=note.offset,
795
+ pitch=note.pitch + 12,
796
+ velocity=note.velocity))
797
+ ref_notes_minus.append(
798
+ Note(is_drum=note.is_drum,
799
+ program=note.program,
800
+ onset=note.onset,
801
+ offset=note.offset,
802
+ pitch=note.pitch - 12,
803
+ velocity=note.velocity))
804
+
805
+ drum_metric_plus, non_drum_metric_plus, instr_metric_plus = compute_track_metrics(
806
+ pred_notes,
807
+ ref_notes_plus,
808
+ eval_vocab=self.hparams.eval_vocab[dataloader_idx],
809
+ eval_drum_vocab=self.hparams.eval_drum_vocab,
810
+ onset_tolerance=self.hparams.onset_tolerance,
811
+ add_pitch_class_metric=self.hparams.add_pitch_class_metric)
812
+ drum_metric_minus, non_drum_metric_minus, instr_metric_minus = compute_track_metrics(
813
+ ref_notes_minus,
814
+ notes_dict['notes'],
815
+ eval_vocab=self.hparams.eval_vocab[dataloader_idx],
816
+ eval_drum_vocab=self.hparams.eval_drum_vocab,
817
+ onset_tolerance=self.hparams.onset_tolerance,
818
+ add_pitch_class_metric=self.hparams.add_pitch_class_metric)
819
+
820
+ score.append(instr_metric_plus['onset_f_Bass'])
821
+ score.append(instr_metric_minus['onset_f_Bass'])
822
+ max_index = score.index(max(score))
823
+ if max_index == 0:
824
+ print(f"ZERO: {track_info}, z/p/m: {score[0]:.2f}/{score[1]:.2f}/{score[2]:.2f}")
825
+ elif max_index == 1:
826
+ # plus
827
+ instr_metric['onset_f_Bass'] = instr_metric_plus['onset_f_Bass']
828
+ print(f"PLUS: {track_info}, z/p/m: {score[0]:.2f}/{score[1]:.2f}/{score[2]:.2f}")
829
+ write_model_output_as_midi(ref_notes_plus,
830
+ self.hparams.write_output_dir,
831
+ track_info + '_ref_octave_plus',
832
+ self.midi_output_inverse_vocab,
833
+ output_dir_suffix=str(dataset_info) + '_' +
834
+ str(self.hparams.eval_subtask_key))
835
+ else:
836
+ # minus
837
+ instr_metric['onset_f_Bass'] = instr_metric_minus['onset_f_Bass']
838
+ print(f"MINUS: {track_info}, z/p/m: {score[0]:.2f}/{score[1]:.2f}/{score[2]:.2f}")
839
+ write_model_output_as_midi(ref_notes_minus,
840
+ self.hparams.write_output_dir,
841
+ track_info + '_ref_octave_minus',
842
+ self.midi_output_,
843
+ output_dir_suffix=str(dataset_info) + '_' +
844
+ str(self.hparams.eval_subtask_key))
845
+
846
+ self.test_metrics[dataloader_idx].bulk_update(drum_metric)
847
+ self.test_metrics[dataloader_idx].bulk_update(non_drum_metric)
848
+ self.test_metrics[dataloader_idx].bulk_update(instr_metric)
849
+ # self.test_metrics_macro.bulk_update(drum_metric)
850
+ # self.test_metrics_macro.bulk_update(non_drum_metric)
851
+ # self.test_metrics_macro.bulk_update(instr_metric)
852
+
853
+ def on_test_epoch_end(self) -> None:
854
+ # all_gather is done seeminglesly by torchmetrics
855
+ for test_metrics in self.test_metrics:
856
+ self.log_dict(test_metrics.bulk_compute(), sync_dist=True)
857
+ test_metrics.bulk_reset()
858
+ # self.log_dict(self.test_metrics_macro.bulk_compute(), sync_dist=True)
859
+ # self.test_metrics_macro.bulk_reset()
860
+
861
+
862
+ def test_case_forward_mt3():
863
+ import torch
864
+ from config.config import audio_cfg, model_cfg, shared_cfg
865
+ from model.ymt3 import YourMT3
866
+ model = YourMT3()
867
+ model.eval()
868
+ x = torch.randn(2, 1, 32767)
869
+ labels = torch.randint(0, 596, (2, 1, 1024), requires_grad=False) # (B, C=1, T)
870
+ task_tokens = torch.LongTensor([])
871
+ output = model.forward(x, labels, task_tokens)
872
+ logits, loss = output['logits'], output['loss']
873
+ assert logits.shape == (2, 1024, 596) # (B, N, vocab_size)
874
+
875
+
876
+ def test_case_inference_mt3():
877
+ import torch
878
+ from config.config import audio_cfg, model_cfg, shared_cfg
879
+ from model.ymt3 import YourMT3
880
+ model_cfg["num_max_positions"] = 1024 + 3 + 1
881
+ model = YourMT3(model_cfg=model_cfg)
882
+ model.eval()
883
+ x = torch.randn(2, 1, 32767)
884
+ task_tokens = torch.randint(0, 596, (2, 3), requires_grad=False)
885
+ pred_ids = model.inference(x, task_tokens, max_token_length=10) # (2, 3, 9) (B, C, L-task_len)
886
+ # TODO: need to check the length of pred_ids when task_tokens is not None
887
+
888
+
889
+ def test_case_forward_enc_perceiver_tf_dec_t5():
890
+ import torch
891
+ from model.ymt3 import YourMT3
892
+ from config.config import audio_cfg, model_cfg, shared_cfg
893
+ model_cfg["encoder_type"] = "perceiver-tf"
894
+ audio_cfg["codec"] = "spec"
895
+ audio_cfg["hop_length"] = 300
896
+
897
+ model = YourMT3(audio_cfg=audio_cfg, model_cfg=model_cfg)
898
+ model.eval()
899
+
900
+ x = torch.randn(2, 1, 32767)
901
+ labels = torch.randint(0, 596, (2, 1, 1024), requires_grad=False)
902
+
903
+ # forward
904
+ output = model.forward(x, labels)
905
+ logits, loss = output['logits'], output['loss'] # logits: (2, 1024, 596) (B, N, vocab_size)
906
+
907
+ # inference
908
+ pred_ids = model.inference(x, None, max_token_length=3) # (2, 1, 3) (B, C, L)
909
+
910
+
911
+ def test_case_forward_enc_conformer_dec_t5():
912
+ import torch
913
+ from model.ymt3 import YourMT3
914
+ from config.config import audio_cfg, model_cfg, shared_cfg
915
+ model_cfg["encoder_type"] = "conformer"
916
+ audio_cfg["codec"] = "melspec"
917
+ audio_cfg["hop_length"] = 128
918
+ model = YourMT3(audio_cfg=audio_cfg, model_cfg=model_cfg)
919
+ model.eval()
920
+
921
+ x = torch.randn(2, 1, 32767)
922
+ labels = torch.randint(0, 596, (2, 1024), requires_grad=False)
923
+
924
+ # forward
925
+ output = model.forward(x, labels)
926
+ logits, loss = output['logits'], output['loss'] # logits: (2, 1024, 596) (B, N, vocab_size)
927
+
928
+ # inference
929
+ pred_ids = model.inference(x, None, 20) # (2, 1, 20) (B, C, L)
930
+
931
+
932
+ def test_case_enc_perceiver_tf_dec_multi_t5():
933
+ import torch
934
+ from model.ymt3 import YourMT3
935
+ from config.config import audio_cfg, model_cfg, shared_cfg
936
+ model_cfg["encoder_type"] = "perceiver-tf"
937
+ model_cfg["decoder_type"] = "multi-t5"
938
+ model_cfg["encoder"]["perceiver-tf"]["attention_to_channel"] = True
939
+ model_cfg["encoder"]["perceiver-tf"]["num_latents"] = 26
940
+ audio_cfg["codec"] = "spec"
941
+ audio_cfg["hop_length"] = 300
942
+ model = YourMT3(audio_cfg=audio_cfg, model_cfg=model_cfg)
943
+ model.eval()
944
+
945
+ x = torch.randn(2, 1, 32767)
946
+ labels = torch.randint(0, 596, (2, 13, 200), requires_grad=False) # (B, C, T)
947
+
948
+ # x = model.spectrogram(x)
949
+ # x = model.pre_encoder(x) # (2, 110, 128, 128) (B, T, C, D)
950
+ # enc_hs = model.encoder(inputs_embeds=x)["last_hidden_state"] # (2, 110, 128, 128) (B, T, C, D)
951
+ # enc_hs = model.pre_decoder(enc_hs) # (2, 13, 110, 512) (B, C, T, D)
952
+
953
+ # dec_input_ids = model.shift_right_fn(labels) # (2, 13, 200) (B, C, T)
954
+ # dec_inputs_embeds = model.embed_tokens(dec_input_ids) # (2, 13, 200, 512) (B, C, T, D)
955
+ # dec_hs, _ = model.decoder(
956
+ # inputs_embeds=dec_inputs_embeds, encoder_hidden_states=enc_hs, return_dict=False)
957
+ # logits = model.lm_head(dec_hs) # (2, 13, 200, 596) (B, C, T, vocab_size)
958
+
959
+ # forward
960
+ x = torch.randn(2, 1, 32767)
961
+ labels = torch.randint(0, 596, (2, 13, 200), requires_grad=False) # (B, C, T)
962
+ output = model.forward(x, labels)
963
+ logits, loss = output['logits'], output['loss'] # (2, 13, 200, 596) (B, C, T, vocab_size)
964
+
965
+ # inference
966
+ model.max_total_token_length = 123 # to save time..
967
+ pred_ids = model.inference(x, None) # (2, 13, 123) (B, C, L)
tests/model/spectrogram_test.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import unittest
3
+ from model.spectrogram import Melspectrogram
4
+
5
+
6
+ class TestMelspectrogram(unittest.TestCase):
7
+
8
+ def test_melspectrogram(self):
9
+ # Create a Melspectrogram instance with default parameters
10
+ melspec = Melspectrogram()
11
+
12
+ # Create a random input tensor (B, C, T) with T = 32767 samples for 2048 ms
13
+ x = torch.randn(2, 1, 32767)
14
+
15
+ # Compute the Melspectrogram
16
+ y = melspec(x)
17
+
18
+ # Check the output shape
19
+ self.assertEqual(y.shape, (2, 256, 512))
20
+
21
+ # Check if the output contains NaN values
22
+ self.assertFalse(torch.isnan(y).any())
23
+
24
+ # Check if the output contains infinite values
25
+ self.assertFalse(torch.isinf(y).any())
26
+
27
+
28
+ if __name__ == "__main__":
29
+ unittest.main()
utils/README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YourMT3: Utils
2
+
3
+
4
+ ## CachedAudioDataset
5
+
6
+ ```mermaid
7
+ graph TB
8
+ A[Call __getitem__]:::main --> B1(Update cache):::process
9
+ A --> B2(Get segments from cache):::process
10
+ B1 --> C1[Load & cut audio]:::subprocess
11
+ C1 --> C2[Load & cut note events]:::subprocess
12
+ C2 --> C3[Augment data]:::subprocess
13
+ C3 --> C4[Tokenize & pad events]:::subprocess
14
+ C4 --> C5[Save to cache]:::subprocess
15
+ B2 --> D1[Return audio segments]:::output
16
+ B2 --> D2[Return tokens]:::output
17
+
18
+ classDef main fill:#FED7E2,stroke:#000000;
19
+ classDef process fill:#FEE2E2,stroke:#000000;
20
+ classDef subprocess fill:#E0F0F4,stroke:#000000;
21
+ classDef output fill:#F0E6EF,stroke:#000000;
22
+ ```
utils/__pycache__/event2note.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
utils/__pycache__/midi.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
utils/__pycache__/note_event_dataclasses.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
utils/audio.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """audio.py"""
11
+ import os
12
+ import subprocess
13
+ import numpy as np
14
+ import wave
15
+ import math
16
+ from typing import Tuple, List
17
+ from numpy.lib.stride_tricks import as_strided
18
+
19
+
20
+ def load_audio_file(filename: str,
21
+ seg_start_sec: float = 0.,
22
+ seg_length_sec: float = 0.,
23
+ fs: int = 16000,
24
+ dtype: np.dtype = np.float64) -> np.ndarray:
25
+ """Load audio file and return the segment of audio."""
26
+ start_frame_idx = int(np.floor(seg_start_sec * fs))
27
+ seg_length_frame = int(np.floor(seg_length_sec * fs))
28
+ end_frame_idx = start_frame_idx + seg_length_frame
29
+
30
+ file_ext = filename[-3:]
31
+
32
+ if file_ext == 'wav':
33
+ with wave.open(filename, 'r') as f:
34
+ f.setpos(start_frame_idx)
35
+ if seg_length_sec == 0:
36
+ x = f.readframes(f.getnframes())
37
+ else:
38
+ x = f.readframes(end_frame_idx - start_frame_idx)
39
+
40
+ if dtype == np.float64:
41
+ x = np.frombuffer(x, dtype=np.int16) / 2**15
42
+ elif dtype == np.float32:
43
+ x = np.frombuffer(x, dtype=np.int16) / 2**15
44
+ x = x.astype(np.float32)
45
+ elif dtype == np.int16:
46
+ x = np.frombuffer(x, dtype=np.int16)
47
+ elif dtype is None:
48
+ pass
49
+ else:
50
+ raise NotImplementedError(f"Unsupported dtype: {dtype}")
51
+ else:
52
+ raise NotImplementedError(f"Unsupported file extension: {file_ext}")
53
+
54
+ return x
55
+
56
+
57
+ def get_audio_file_info(filename: str) -> Tuple[int, int, int]:
58
+ """Get audio file info.
59
+
60
+ Args:
61
+ filename: path to the audio file
62
+ Returns:
63
+ fs: sampling rate
64
+ n_frames: number of frames
65
+ n_channels: number of channels
66
+
67
+ """
68
+ file_ext = filename[-3:]
69
+
70
+ if file_ext == 'wav':
71
+ with wave.open(filename, 'r') as f:
72
+ fs = f.getframerate()
73
+ n_frames = f.getnframes()
74
+ n_channels = f.getnchannels()
75
+ else:
76
+ raise NotImplementedError(f"Unsupported file extension: {file_ext}")
77
+
78
+ return fs, n_frames, n_channels
79
+
80
+
81
+ def get_segments_from_numpy_array(arr: np.ndarray,
82
+ slice_length: int,
83
+ start_frame_indices: List[int],
84
+ dtype: np.dtype = np.float32) -> np.ndarray:
85
+ """Get random audio slices from numpy array.
86
+
87
+ Args:
88
+ arr: numpy array of shape (c, n_frames)
89
+ slice_length: length of the slice
90
+ start_frame_indices: list of m start frames
91
+ Returns:
92
+ slices: numpy array of shape (m, c, slice_length)
93
+ """
94
+ c, max_length = arr.shape
95
+ max_length = arr.shape[1]
96
+ m = len(start_frame_indices)
97
+
98
+ slices = np.zeros((m, c, slice_length), dtype=dtype)
99
+ for i, start_frame in enumerate(start_frame_indices):
100
+ end_frame = start_frame + slice_length
101
+ assert (end_frame <= max_length - 1)
102
+ slices[i, :, :] = arr[:, start_frame:end_frame].astype(dtype)
103
+ return slices
104
+
105
+
106
+ def slice_padded_array(x: np.ndarray, slice_length: int, slice_hop: int, pad: bool = True) -> np.ndarray:
107
+ """
108
+ Slices the input array into overlapping windows based on the given slice length and slice hop.
109
+
110
+ Args:
111
+ x: The input array to be sliced.
112
+ slice_length: The length of each slice.
113
+ slice_hop: The number of elements between the start of each slice.
114
+ pad: If True, the last slice will be padded with zeros if necessary.
115
+
116
+ Returns:
117
+ A numpy array with shape (n_slices, slice_length) containing the slices.
118
+ """
119
+ num_slices = (x.shape[1] - slice_length) // slice_hop + 1
120
+ remaining = (x.shape[1] - slice_length) % slice_hop
121
+
122
+ if pad and remaining > 0:
123
+ padding = np.zeros((x.shape[0], slice_length - remaining))
124
+ x = np.hstack((x, padding))
125
+ num_slices += 1
126
+
127
+ shape: Tuple[int, int] = (num_slices, slice_length)
128
+ strides: Tuple[int, int] = (slice_hop * x.strides[1], x.strides[1])
129
+ sliced_x = as_strided(x, shape=shape, strides=strides)
130
+
131
+ return sliced_x
132
+
133
+
134
+ def slice_padded_array_for_subbatch(x: np.ndarray,
135
+ slice_length: int,
136
+ slice_hop: int,
137
+ pad: bool = True,
138
+ sub_batch_size: int = 1,
139
+ dtype: np.dtype = np.float32) -> np.ndarray:
140
+ """
141
+ Slices the input array into overlapping windows based on the given slice length and slice hop,
142
+ and pads it to make the output divisible by the sub_batch_size.
143
+
144
+ NOTE: This method is currently not used.
145
+
146
+ Args:
147
+ x: The input array to be sliced, such as (1, n_frames).
148
+ slice_length: The length of each slice.
149
+ slice_hop: The number of elements between the start of each slice.
150
+ pad: If True, the last slice will be padded with zeros if necessary.
151
+ sub_batch_size: The desired number of slices to be divisible by.
152
+
153
+ Returns:
154
+ A numpy array with shape (n_slices, slice_length) containing the slices.
155
+ """
156
+ num_slices = (x.shape[1] - slice_length) // slice_hop + 1
157
+ remaining = (x.shape[1] - slice_length) % slice_hop
158
+
159
+ if pad and remaining > 0:
160
+ padding = np.zeros((x.shape[0], slice_length - remaining), dtype=dtype)
161
+ x = np.hstack((x, padding))
162
+ num_slices += 1
163
+
164
+ # Adjust the padding to make n_slices divisible by sub_batch_size
165
+ if pad and num_slices % sub_batch_size != 0:
166
+ additional_padding_needed = (sub_batch_size - (num_slices % sub_batch_size)) * slice_hop
167
+ additional_padding = np.zeros((x.shape[0], additional_padding_needed), dtype=dtype)
168
+ x = np.hstack((x, additional_padding))
169
+ num_slices += (sub_batch_size - (num_slices % sub_batch_size))
170
+
171
+ shape: Tuple[int, int] = (num_slices, slice_length)
172
+ strides: Tuple[int, int] = (slice_hop * x.strides[1], x.strides[1])
173
+ sliced_x = as_strided(x, shape=shape, strides=strides)
174
+
175
+ return sliced_x
176
+
177
+
178
+ def pitch_shift_audio(src_audio_file: os.PathLike,
179
+ min_pitch_shift: int = -5,
180
+ max_pitch_shift: int = 6,
181
+ random_microshift_range: tuple[int, int] = (-10, 11)):
182
+ """
183
+ Pitch shift audio file using the Sox command-line tool.
184
+
185
+ NOTE: This method is currently not used. Previously, we used this for
186
+ offline augmentation for GuitarSet.
187
+
188
+ Args:
189
+ src_audio_file: Path to the input audio file.
190
+ min_pitch_shift: Minimum pitch shift in semitones.
191
+ max_pitch_shift: Maximum pitch shift in semitones.
192
+ random_microshift_range: Range of random microshifts to apply in tenths of a semitone.
193
+
194
+ Returns:
195
+ None
196
+
197
+ Raises:
198
+ CalledProcessError: If the Sox command fails to execute.
199
+
200
+ """
201
+
202
+ # files
203
+ src_audio_dir = os.path.dirname(src_audio_file)
204
+ src_audio_filename = os.path.basename(src_audio_file).split('.')[0]
205
+
206
+ # load source audio
207
+ try:
208
+ audio = load_audio_file(src_audio_file, dtype=np.int16)
209
+ audio = audio / 2**15
210
+ audio = audio.astype(np.float16)
211
+ except Exception as e:
212
+ print(f"Failed to load audio file: {src_audio_file}. {e}")
213
+ return
214
+
215
+ # pitch shift audio for each semitone in the range
216
+ for pitch_shift in range(min_pitch_shift, max_pitch_shift):
217
+ if pitch_shift == 0:
218
+ continue
219
+
220
+ # pitch shift audio by sox
221
+ dst_audio_file = os.path.join(src_audio_dir, f'{src_audio_filename}_pshift{pitch_shift}.wav')
222
+ shift_semitone = 100 * pitch_shift + np.random.randint(*random_microshift_range)
223
+
224
+ # build Sox command
225
+ command = ['sox', src_audio_file, '-r', '16000', dst_audio_file, 'pitch', str(shift_semitone)]
226
+
227
+ try:
228
+ # execute Sox command and check for errors
229
+ subprocess.run(command, check=True)
230
+ print(f"Created {dst_audio_file}")
231
+ except subprocess.CalledProcessError as e:
232
+ print(f"Failed to pitch shift audio file: {src_audio_file}, pitch_shift: {pitch_shift}. {e}")
233
+
234
+
235
+ def write_wav_file(filename: str, x: np.ndarray, samplerate: int = 16000) -> None:
236
+ """
237
+ Write a mono PCM WAV file from a NumPy array of audio samples.
238
+
239
+ Args:
240
+ filename (str): The name of the WAV file to be created.
241
+ x (np.ndarray): A 1D NumPy array containing the audio samples to be written to the WAV file.
242
+ The audio samples should be in the range [-1, 1].
243
+ samplerate (int): The sample rate (in Hz) of the audio samples.
244
+
245
+ Returns:
246
+ None
247
+ """
248
+ # Set the WAV file parameters
249
+ nchannels = 1 # Mono
250
+ sampwidth = 2 # 16-bit
251
+ framerate = samplerate
252
+ nframes = len(x)
253
+
254
+ # Scale the audio samples to the range [-32767, 32767]
255
+ x_scaled = np.array(x * 32767, dtype=np.int16)
256
+
257
+ # Set the buffer size for writing the WAV file
258
+ BUFFER_SIZE = 1024
259
+
260
+ # Open the WAV file for writing
261
+ with wave.open(filename, "wb") as wav_file:
262
+ # Set the WAV file parameters
263
+ wav_file.setparams((nchannels, sampwidth, framerate, nframes, "NONE", "NONE"))
264
+
265
+ # Write the audio samples to the file in chunks
266
+ for i in range(0, len(x_scaled), BUFFER_SIZE):
267
+ # Get the next chunk of audio samples
268
+ chunk = x_scaled[i:i + BUFFER_SIZE]
269
+
270
+ # Convert the chunk of audio samples to a byte string and write it to the WAV file
271
+ wav_file.writeframes(chunk.tobytes())
272
+
273
+ # Close the WAV file
274
+ wav_file.close()
275
+
276
+
277
+ def guess_onset_offset_by_amp_envelope(x, fs=16000, onset_threshold=0.05, offset_threshold=0.02, frame_size=256):
278
+ """ Guess onset/offset from audio signal x """
279
+ amp_env = []
280
+ num_frames = math.floor(len(x) / frame_size)
281
+ for t in range(num_frames):
282
+ lower = t * frame_size
283
+ upper = (t + 1) * frame_size - 1
284
+ # Find maximum of each frame and add it to our array
285
+ amp_env.append(np.max(x[lower:upper]))
286
+ amp_env = np.array(amp_env)
287
+ # Find the first index where the amplitude envelope is greater than the threshold
288
+ onset = np.where(amp_env > onset_threshold)[0][0] * frame_size
289
+ offset = (len(amp_env) - 1 - np.where(amp_env[::-1] > offset_threshold)[0][0]) * frame_size
290
+ return onset, offset, amp_env
291
+
292
+
293
+ # from pydub import AudioSegment
294
+ # def convert_flac_to_wav(input_path, output_path):
295
+ # # Load FLAC file using Pydub
296
+ # sound = AudioSegment.from_file(input_path, format="flac")
297
+
298
+ # # Set the parameters for the output WAV file
299
+ # channels = 1 # mono
300
+ # sample_width = 2 # 16-bit
301
+ # frame_rate = 16000
302
+
303
+ # # Convert the input sound to the specified format
304
+ # sound = sound.set_frame_rate(frame_rate)
305
+ # sound = sound.set_channels(channels)
306
+ # sound = sound.set_sample_width(sample_width)
307
+
308
+ # # Save the output WAV file to the specified path
309
+ # sound.export(output_path, format="wav")