|
import os |
|
|
|
from trainer import Trainer, TrainerArgs |
|
|
|
from TTS.tts.configs.shared_configs import BaseDatasetConfig , CharactersConfig |
|
from TTS.tts.configs.vits_config import VitsConfig |
|
from TTS.tts.datasets import load_tts_samples |
|
from TTS.tts.models.vits import Vits, VitsAudioConfig |
|
from TTS.tts.utils.text.tokenizer import TTSTokenizer |
|
from TTS.utils.audio import AudioProcessor |
|
|
|
output_path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
TRAIN_PATH = "/home/aymen/Tamazight-NLP/Speech/media_ipsapps/shi_tls/cut_22kHz_offset/manifests/" |
|
dataset_config = BaseDatasetConfig( |
|
formatter="nemo", meta_file_train="manifest_processed.json", path=TRAIN_PATH |
|
) |
|
|
|
audio_config = VitsAudioConfig( |
|
sample_rate=22050, win_length=1024, hop_length=256, num_mels=80, mel_fmin=0, mel_fmax=None |
|
) |
|
|
|
character_config = CharactersConfig( |
|
characters_class= "TTS.tts.models.vits.VitsCharacters", |
|
characters= "ⴰⴱⴳⴷⴹⴻⴼⴽⵀⵃⵄⵅⵇⵉⵊⵍⵎⵏⵓⵔⵕⵖⵙⵚⵛⵜⵟⵡⵢⵣⵥⵯ", |
|
punctuations=" !,.:?", |
|
pad= "<PAD>", |
|
eos= "<EOS>", |
|
bos= "<BOS>", |
|
blank= "<BLNK>", |
|
) |
|
|
|
config = VitsConfig( |
|
audio=audio_config, |
|
characters=character_config, |
|
run_name="vits_shi_male", |
|
batch_size=16, |
|
eval_batch_size=4, |
|
batch_group_size=5, |
|
num_loader_workers=4, |
|
num_eval_loader_workers=4, |
|
run_eval=True, |
|
test_delay_epochs=-1, |
|
epochs=1000, |
|
save_step=5000, |
|
text_cleaner="no_cleaners", |
|
use_phonemes=False, |
|
compute_input_seq_cache=True, |
|
print_step=25, |
|
print_eval=True, |
|
mixed_precision=True, |
|
output_path=output_path, |
|
datasets=[dataset_config], |
|
cudnn_benchmark=False, |
|
test_sentences=[ |
|
["ⴰⵣⵓⵍ. ⵎⴰⵏⵣⴰⴽⵉⵏ?"], |
|
["ⵡⴰ ⵜⴰⵎⵖⴰⵔⵜ ⵎⴰ ⴷ ⵓⴽⴰⵏ ⵜⵙⴽⵔⵜ?"], |
|
["ⴳⵏ! ⴰⴷ ⴰⴽ ⵉⵙⵙⴳⵏ ⵕⴱⴱⵉ ⵉⵜⵜⵓ ⴽ."], |
|
["ⴰⵔⵔⴰⵡ ⵏ ⵍⵀⵎⵎ ⵢⵓⴽⵔ ⴰⵖ ⵉⵀⴷⵓⵎⵏ ⵏⵏⵖ!"] |
|
], |
|
) |
|
|
|
|
|
|
|
|
|
ap = AudioProcessor.init_from_config(config) |
|
|
|
|
|
|
|
|
|
tokenizer, config = TTSTokenizer.init_from_config(config) |
|
|
|
|
|
|
|
|
|
|
|
|
|
train_samples, eval_samples = load_tts_samples( |
|
dataset_config, |
|
eval_split=True, |
|
eval_split_max_size=config.eval_split_max_size, |
|
eval_split_size=config.eval_split_size, |
|
) |
|
|
|
|
|
model = Vits(config, ap, tokenizer, speaker_manager=None) |
|
|
|
|
|
trainer = Trainer( |
|
TrainerArgs(), |
|
config, |
|
output_path, |
|
model=model, |
|
train_samples=train_samples, |
|
eval_samples=eval_samples, |
|
) |
|
trainer.fit() |
|
|