Spaces:
Build error
Build error
awesome-paulw
commited on
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +163 -0
- LICENSE +21 -0
- README.md +2 -8
- config.py +66 -0
- functions/core_functions.py +214 -0
- functions/core_functions1.py +327 -0
- functions/logging_utils.py +56 -0
- functions/slice_utils.py +115 -0
- go-webui.bat +4 -0
- gradio_utils.py +171 -0
- i18n/locale/en_US.json +299 -0
- i18n/locale/es_ES.json +284 -0
- i18n/locale/fr_FR.json +284 -0
- i18n/locale/it_IT.json +276 -0
- i18n/locale/ja_JP.json +283 -0
- i18n/locale/ko_KR.json +285 -0
- i18n/locale/pt_BR.json +93 -0
- i18n/locale/ru_RU.json +135 -0
- i18n/locale/tr_TR.json +135 -0
- i18n/locale/zh_CN.json +287 -0
- i18n/locale/zh_HK.json +135 -0
- i18n/locale/zh_SG.json +135 -0
- i18n/locale/zh_TW.json +135 -0
- old/main.py +659 -0
- old/webui.py +188 -0
- old/xtts_demo.py +973 -0
- openvoice_cli/__init__.py +0 -0
- openvoice_cli/__main__.py +125 -0
- openvoice_cli/api.py +137 -0
- openvoice_cli/attentions.py +465 -0
- openvoice_cli/commons.py +160 -0
- openvoice_cli/downloader.py +41 -0
- openvoice_cli/mel_processing.py +182 -0
- openvoice_cli/models.py +498 -0
- openvoice_cli/modules.py +598 -0
- openvoice_cli/se_extractor.py +151 -0
- openvoice_cli/transforms.py +209 -0
- openvoice_cli/utils.py +193 -0
- requirements.txt +19 -0
- tools/__init__.py +0 -0
- tools/asr/config.py +31 -0
- tools/asr/fasterwhisper_asr.py +110 -0
- tools/asr/funasr_asr.py +76 -0
- tools/asr/models/.gitignore +2 -0
- tools/cmd-denoise.py +29 -0
- tools/denoise-model/.gitignore +2 -0
- tools/i18n/i18n.py +36 -0
- tools/i18n/locale/en_US.json +135 -0
- tools/i18n/locale/es_ES.json +135 -0
- tools/i18n/locale/fr_FR.json +135 -0
.gitignore
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
161 |
+
|
162 |
+
base_models
|
163 |
+
finetune_models
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 awesome-paulw
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.22.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: xtts_awesome
|
3 |
+
app_file: webui.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.22.0
|
|
|
|
|
6 |
---
|
|
|
|
config.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys,os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# 推理用的指定模型
|
6 |
+
sovits_path = ""
|
7 |
+
gpt_path = ""
|
8 |
+
is_half_str = os.environ.get("is_half", "True")
|
9 |
+
is_half = True if is_half_str.lower() == 'true' else False
|
10 |
+
is_share_str = os.environ.get("is_share","False")
|
11 |
+
is_share= True if is_share_str.lower() == 'true' else False
|
12 |
+
|
13 |
+
cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
14 |
+
bert_path = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
|
15 |
+
pretrained_sovits_path = "GPT_SoVITS/pretrained_models/s2G488k.pth"
|
16 |
+
pretrained_gpt_path = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
|
17 |
+
|
18 |
+
exp_root = "logs"
|
19 |
+
python_exec = sys.executable or "python"
|
20 |
+
if torch.cuda.is_available():
|
21 |
+
infer_device = "cuda"
|
22 |
+
else:
|
23 |
+
infer_device = "cpu"
|
24 |
+
|
25 |
+
webui_port_main = 9874
|
26 |
+
webui_port_uvr5 = 9873
|
27 |
+
webui_port_infer_tts = 9872
|
28 |
+
webui_port_subfix = 9871
|
29 |
+
|
30 |
+
api_port = 9880
|
31 |
+
|
32 |
+
if infer_device == "cuda":
|
33 |
+
gpu_name = torch.cuda.get_device_name(0)
|
34 |
+
if (
|
35 |
+
("16" in gpu_name and "V100" not in gpu_name.upper())
|
36 |
+
or "P40" in gpu_name.upper()
|
37 |
+
or "P10" in gpu_name.upper()
|
38 |
+
or "1060" in gpu_name
|
39 |
+
or "1070" in gpu_name
|
40 |
+
or "1080" in gpu_name
|
41 |
+
):
|
42 |
+
is_half=False
|
43 |
+
|
44 |
+
if(infer_device=="cpu"):is_half=False
|
45 |
+
|
46 |
+
class Config:
|
47 |
+
def __init__(self):
|
48 |
+
self.sovits_path = sovits_path
|
49 |
+
self.gpt_path = gpt_path
|
50 |
+
self.is_half = is_half
|
51 |
+
|
52 |
+
self.cnhubert_path = cnhubert_path
|
53 |
+
self.bert_path = bert_path
|
54 |
+
self.pretrained_sovits_path = pretrained_sovits_path
|
55 |
+
self.pretrained_gpt_path = pretrained_gpt_path
|
56 |
+
|
57 |
+
self.exp_root = exp_root
|
58 |
+
self.python_exec = python_exec
|
59 |
+
self.infer_device = infer_device
|
60 |
+
|
61 |
+
self.webui_port_main = webui_port_main
|
62 |
+
self.webui_port_uvr5 = webui_port_uvr5
|
63 |
+
self.webui_port_infer_tts = webui_port_infer_tts
|
64 |
+
self.webui_port_subfix = webui_port_subfix
|
65 |
+
|
66 |
+
self.api_port = api_port
|
functions/core_functions.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
4 |
+
sys.path.append(root_dir)
|
5 |
+
|
6 |
+
import tempfile
|
7 |
+
import logging
|
8 |
+
from pathlib import Path
|
9 |
+
from datetime import datetime
|
10 |
+
from pydub import AudioSegment
|
11 |
+
import pysrt
|
12 |
+
import torch
|
13 |
+
import torchaudio
|
14 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
15 |
+
from TTS.tts.models.xtts import Xtts
|
16 |
+
from openvoice_cli.downloader import download_checkpoint
|
17 |
+
from openvoice_cli.api import ToneColorConverter
|
18 |
+
import openvoice_cli.se_extractor as se_extractor
|
19 |
+
from functions.logging_utils import setup_logger, read_logs
|
20 |
+
|
21 |
+
setup_logger("logs/core_functions.log")
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
def clear_gpu_cache():
|
25 |
+
if torch.cuda.is_available():
|
26 |
+
torch.cuda.empty_cache()
|
27 |
+
|
28 |
+
XTTS_MODEL = None
|
29 |
+
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
30 |
+
global XTTS_MODEL
|
31 |
+
clear_gpu_cache()
|
32 |
+
if not xtts_checkpoint or not xtts_config or not xtts_vocab:
|
33 |
+
return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
|
34 |
+
config = XttsConfig()
|
35 |
+
config.load_json(xtts_config)
|
36 |
+
XTTS_MODEL = Xtts.init_from_config(config)
|
37 |
+
print("Loading XTTS model! ")
|
38 |
+
XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab,speaker_file_path=xtts_speaker, use_deepspeed=False)
|
39 |
+
if torch.cuda.is_available():
|
40 |
+
XTTS_MODEL.cuda()
|
41 |
+
|
42 |
+
print("Model Loaded!")
|
43 |
+
return "Model Loaded!"
|
44 |
+
|
45 |
+
def run_tts(lang, tts_text, speaker_audio_file, output_file_path, temperature, length_penalty, repetition_penalty, top_k, top_p, sentence_split, use_config):
|
46 |
+
if XTTS_MODEL is None:
|
47 |
+
raise Exception("XTTS_MODEL is not loaded. Please load the model before running TTS.")
|
48 |
+
if not tts_text.strip():
|
49 |
+
raise ValueError("Text for TTS is empty.")
|
50 |
+
if not os.path.exists(speaker_audio_file):
|
51 |
+
raise FileNotFoundError(f"Speaker audio file not found: {speaker_audio_file}")
|
52 |
+
|
53 |
+
gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_file, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
|
54 |
+
|
55 |
+
if use_config:
|
56 |
+
out = XTTS_MODEL.inference(
|
57 |
+
text=tts_text,
|
58 |
+
language=lang,
|
59 |
+
gpt_cond_latent=gpt_cond_latent,
|
60 |
+
speaker_embedding=speaker_embedding,
|
61 |
+
temperature=XTTS_MODEL.config.temperature,
|
62 |
+
length_penalty=XTTS_MODEL.config.length_penalty,
|
63 |
+
repetition_penalty=XTTS_MODEL.config.repetition_penalty,
|
64 |
+
top_k=XTTS_MODEL.config.top_k,
|
65 |
+
top_p=XTTS_MODEL.config.top_p,
|
66 |
+
enable_text_splitting = True
|
67 |
+
)
|
68 |
+
else:
|
69 |
+
out = XTTS_MODEL.inference(
|
70 |
+
text=tts_text,
|
71 |
+
language=lang,
|
72 |
+
gpt_cond_latent=gpt_cond_latent,
|
73 |
+
speaker_embedding=speaker_embedding,
|
74 |
+
temperature=temperature,
|
75 |
+
length_penalty=length_penalty,
|
76 |
+
repetition_penalty=float(repetition_penalty),
|
77 |
+
top_k=top_k,
|
78 |
+
top_p=top_p,
|
79 |
+
enable_text_splitting = sentence_split
|
80 |
+
)
|
81 |
+
|
82 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
83 |
+
out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
|
84 |
+
out_path = fp.name
|
85 |
+
torchaudio.save(out_path, out["wav"], 24000)
|
86 |
+
|
87 |
+
return "Speech generated !", out_path, speaker_audio_file
|
88 |
+
|
89 |
+
|
90 |
+
def process_srt_and_generate_audio(
|
91 |
+
srt_file,
|
92 |
+
lang,
|
93 |
+
speaker_reference_audio,
|
94 |
+
temperature,
|
95 |
+
length_penalty,
|
96 |
+
repetition_penalty,
|
97 |
+
top_k,
|
98 |
+
top_p,
|
99 |
+
sentence_split,
|
100 |
+
use_config ):
|
101 |
+
try:
|
102 |
+
subtitles = pysrt.open(srt_file)
|
103 |
+
audio_files = []
|
104 |
+
output_dir = create_output_dir(parent_dir='/content/drive/MyDrive/Voice Conversion Result')
|
105 |
+
|
106 |
+
for index, subtitle in enumerate(subtitles):
|
107 |
+
audio_filename = f"audio_{index+1:03d}.wav"
|
108 |
+
audio_file_path = os.path.join(output_dir, audio_filename)
|
109 |
+
|
110 |
+
subtitle_text=remove_endperiod(subtitle.text)
|
111 |
+
|
112 |
+
run_tts(lang, subtitle_text, speaker_reference_audio, audio_file_path,
|
113 |
+
temperature, length_penalty, repetition_penalty, top_k, top_p,
|
114 |
+
sentence_split, use_config)
|
115 |
+
logger.info(f"Generated audio file: {audio_file_path}")
|
116 |
+
audio_files.append(audio_file_path)
|
117 |
+
|
118 |
+
output_audio_path = merge_audio_with_srt_timing(subtitles, audio_files, output_dir)
|
119 |
+
return output_audio_path
|
120 |
+
except Exception as e:
|
121 |
+
logger.error(f"Error in process_srt_and_generate_audio: {e}")
|
122 |
+
raise
|
123 |
+
|
124 |
+
|
125 |
+
def create_output_dir(parent_dir):
|
126 |
+
try:
|
127 |
+
folder_name = datetime.now().strftime("audio_outputs_%Y-%m-%d_%H-%M-%S")
|
128 |
+
output_dir = os.path.join(parent_dir, folder_name)
|
129 |
+
|
130 |
+
if not os.path.exists(output_dir):
|
131 |
+
os.makedirs(output_dir)
|
132 |
+
logger.info(f"Created output directory at: {output_dir}")
|
133 |
+
|
134 |
+
return output_dir
|
135 |
+
except Exception as e:
|
136 |
+
logger.error(f"Failed to create output directory: {e}")
|
137 |
+
raise
|
138 |
+
|
139 |
+
|
140 |
+
def srt_time_to_ms(srt_time):
|
141 |
+
return (srt_time.hours * 3600 + srt_time.minutes * 60 + srt_time.seconds) * 1000 + srt_time.milliseconds
|
142 |
+
|
143 |
+
|
144 |
+
def merge_audio_with_srt_timing(subtitles, audio_files, output_dir):
|
145 |
+
try:
|
146 |
+
combined = AudioSegment.silent(duration=0)
|
147 |
+
last_position_ms = 0
|
148 |
+
|
149 |
+
for subtitle, audio_file in zip(subtitles, audio_files):
|
150 |
+
start_time_ms = srt_time_to_ms(subtitle.start)
|
151 |
+
if last_position_ms < start_time_ms:
|
152 |
+
silence_duration = start_time_ms - last_position_ms
|
153 |
+
combined += AudioSegment.silent(duration=silence_duration)
|
154 |
+
last_position_ms = start_time_ms
|
155 |
+
|
156 |
+
audio = AudioSegment.from_file(audio_file, format="wav")
|
157 |
+
|
158 |
+
combined += audio
|
159 |
+
last_position_ms += len(audio)
|
160 |
+
|
161 |
+
output_path = os.path.join(output_dir, "combined_audio_with_timing.wav")
|
162 |
+
combined.export(output_path, format="wav")
|
163 |
+
logger.info(f"Exported combined audio to: {output_path}")
|
164 |
+
|
165 |
+
return output_path
|
166 |
+
except Exception as e:
|
167 |
+
logger.error(f"Error merging audio files: {e}")
|
168 |
+
raise
|
169 |
+
|
170 |
+
|
171 |
+
def remove_endperiod(subtitle):
|
172 |
+
if subtitle.endswith('.'):
|
173 |
+
subtitle = subtitle[:-1]
|
174 |
+
return subtitle
|
175 |
+
|
176 |
+
def convert_voice(reference_audio, audio_to_convert):
|
177 |
+
|
178 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
179 |
+
base_name, ext = os.path.splitext(os.path.basename(audio_to_convert))
|
180 |
+
new_file_name = base_name + 'convertedvoice' + ext
|
181 |
+
output_path = os.path.join(os.path.dirname(audio_to_convert), new_file_name)
|
182 |
+
|
183 |
+
tune_one(input_file=audio_to_convert, ref_file=reference_audio, output_file=output_path, device=device)
|
184 |
+
|
185 |
+
return output_path
|
186 |
+
|
187 |
+
def tune_one(input_file,ref_file,output_file,device):
|
188 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
189 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
190 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
191 |
+
|
192 |
+
if not os.path.exists(ckpt_converter):
|
193 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
194 |
+
download_checkpoint(ckpt_converter)
|
195 |
+
|
196 |
+
device = device
|
197 |
+
|
198 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
199 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
200 |
+
|
201 |
+
source_se, _ = se_extractor.get_se(input_file, tone_color_converter, vad=True)
|
202 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
203 |
+
|
204 |
+
output_dir = os.path.dirname(output_file)
|
205 |
+
if output_dir:
|
206 |
+
if not os.path.exists(output_dir):
|
207 |
+
os.makedirs(output_dir, exist_ok=True)
|
208 |
+
|
209 |
+
tone_color_converter.convert(
|
210 |
+
audio_src_path=input_file,
|
211 |
+
src_se=source_se,
|
212 |
+
tgt_se=target_se,
|
213 |
+
output_path=output_file,
|
214 |
+
)
|
functions/core_functions1.py
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
# 获取当前文件所在目录的上一级目录
|
4 |
+
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
5 |
+
# 将根目录添加到系统路径
|
6 |
+
sys.path.append(root_dir)
|
7 |
+
|
8 |
+
import tempfile
|
9 |
+
import logging
|
10 |
+
from pathlib import Path
|
11 |
+
from datetime import datetime
|
12 |
+
from pydub import AudioSegment
|
13 |
+
import pysrt
|
14 |
+
import torch
|
15 |
+
import torchaudio
|
16 |
+
import traceback
|
17 |
+
from utils.formatter import format_audio_list, find_latest_best_model
|
18 |
+
from utils.gpt_train import train_gpt
|
19 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
20 |
+
from TTS.tts.models.xtts import Xtts
|
21 |
+
from openvoice_cli.downloader import download_checkpoint
|
22 |
+
from openvoice_cli.api import ToneColorConverter
|
23 |
+
import openvoice_cli.se_extractor as se_extractor
|
24 |
+
from functions.logging_utils import setup_logger, read_logs
|
25 |
+
|
26 |
+
# 设置日志处理器
|
27 |
+
setup_logger("logs/core_functions.log")
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
def clear_gpu_cache():
|
31 |
+
# clear the GPU cache
|
32 |
+
if torch.cuda.is_available():
|
33 |
+
torch.cuda.empty_cache()
|
34 |
+
|
35 |
+
XTTS_MODEL = None
|
36 |
+
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
37 |
+
global XTTS_MODEL
|
38 |
+
clear_gpu_cache()
|
39 |
+
if not xtts_checkpoint or not xtts_config or not xtts_vocab:
|
40 |
+
return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
|
41 |
+
config = XttsConfig()
|
42 |
+
config.load_json(xtts_config)
|
43 |
+
XTTS_MODEL = Xtts.init_from_config(config)
|
44 |
+
print("Loading XTTS model! ")
|
45 |
+
XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab,speaker_file_path=xtts_speaker, use_deepspeed=False)
|
46 |
+
if torch.cuda.is_available():
|
47 |
+
XTTS_MODEL.cuda()
|
48 |
+
|
49 |
+
print("Model Loaded!")
|
50 |
+
return "Model Loaded!"
|
51 |
+
|
52 |
+
def run_tts(lang, tts_text, speaker_audio_file, output_file_path, temperature, length_penalty, repetition_penalty, top_k, top_p, speed, sentence_split, use_config):
|
53 |
+
if XTTS_MODEL is None:
|
54 |
+
raise Exception("XTTS_MODEL is not loaded. Please load the model before running TTS.")
|
55 |
+
if not tts_text.strip():
|
56 |
+
raise ValueError("Text for TTS is empty.")
|
57 |
+
if not os.path.exists(speaker_audio_file):
|
58 |
+
raise FileNotFoundError(f"Speaker audio file not found: {speaker_audio_file}")
|
59 |
+
|
60 |
+
gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_file, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
|
61 |
+
|
62 |
+
if use_config:
|
63 |
+
out = XTTS_MODEL.inference(
|
64 |
+
text=tts_text,
|
65 |
+
language=lang,
|
66 |
+
gpt_cond_latent=gpt_cond_latent,
|
67 |
+
speaker_embedding=speaker_embedding,
|
68 |
+
temperature=XTTS_MODEL.config.temperature, # Add custom parameters here
|
69 |
+
length_penalty=XTTS_MODEL.config.length_penalty,
|
70 |
+
repetition_penalty=XTTS_MODEL.config.repetition_penalty,
|
71 |
+
top_k=XTTS_MODEL.config.top_k,
|
72 |
+
top_p=XTTS_MODEL.config.top_p,
|
73 |
+
speed=speed,
|
74 |
+
enable_text_splitting = True
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
out = XTTS_MODEL.inference(
|
78 |
+
text=tts_text,
|
79 |
+
language=lang,
|
80 |
+
gpt_cond_latent=gpt_cond_latent,
|
81 |
+
speaker_embedding=speaker_embedding,
|
82 |
+
temperature=temperature, # Add custom parameters here
|
83 |
+
length_penalty=length_penalty,
|
84 |
+
repetition_penalty=float(repetition_penalty),
|
85 |
+
top_k=top_k,
|
86 |
+
top_p=top_p,
|
87 |
+
speed=speed,
|
88 |
+
enable_text_splitting = sentence_split
|
89 |
+
)
|
90 |
+
|
91 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
92 |
+
out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
|
93 |
+
out_path = fp.name
|
94 |
+
torchaudio.save(out_path, out["wav"], 24000)
|
95 |
+
|
96 |
+
return "Speech generated !", out_path, speaker_audio_file
|
97 |
+
|
98 |
+
|
99 |
+
def load_params_tts(out_path,version):
|
100 |
+
|
101 |
+
out_path = Path(out_path)
|
102 |
+
|
103 |
+
# base_model_path = Path.cwd() / "models" / version
|
104 |
+
|
105 |
+
# if not base_model_path.exists():
|
106 |
+
# return "Base model not found !","","",""
|
107 |
+
|
108 |
+
ready_model_path = out_path / "ready"
|
109 |
+
|
110 |
+
vocab_path = ready_model_path / "vocab.json"
|
111 |
+
config_path = ready_model_path / "config.json"
|
112 |
+
speaker_path = ready_model_path / "speakers_xtts.pth"
|
113 |
+
reference_path = ready_model_path / "reference.wav"
|
114 |
+
|
115 |
+
model_path = ready_model_path / "model.pth"
|
116 |
+
|
117 |
+
if not model_path.exists():
|
118 |
+
model_path = ready_model_path / "unoptimize_model.pth"
|
119 |
+
if not model_path.exists():
|
120 |
+
return "Params for TTS not found", "", "", ""
|
121 |
+
|
122 |
+
return "Params for TTS loaded", model_path, config_path, vocab_path,speaker_path, reference_path
|
123 |
+
|
124 |
+
|
125 |
+
def process_srt_and_generate_audio(
|
126 |
+
srt_file,
|
127 |
+
lang,
|
128 |
+
speaker_reference_audio,
|
129 |
+
temperature,
|
130 |
+
length_penalty,
|
131 |
+
repetition_penalty,
|
132 |
+
top_k,
|
133 |
+
top_p,
|
134 |
+
speed,
|
135 |
+
sentence_split,
|
136 |
+
use_config ):
|
137 |
+
try:
|
138 |
+
subtitles = pysrt.open(srt_file)
|
139 |
+
audio_files = []
|
140 |
+
output_dir = create_output_dir(parent_dir='/content/drive/MyDrive/Voice Conversion Result')
|
141 |
+
|
142 |
+
for index, subtitle in enumerate(subtitles):
|
143 |
+
audio_filename = f"audio_{index+1:03d}.wav"
|
144 |
+
audio_file_path = os.path.join(output_dir, audio_filename)
|
145 |
+
|
146 |
+
subtitle_text=remove_endperiod(subtitle.text)
|
147 |
+
|
148 |
+
run_tts(lang, subtitle_text, speaker_reference_audio, audio_file_path,
|
149 |
+
temperature, length_penalty, repetition_penalty, top_k, top_p,
|
150 |
+
speed, sentence_split, use_config)
|
151 |
+
logger.info(f"Generated audio file: {audio_file_path}")
|
152 |
+
audio_files.append(audio_file_path)
|
153 |
+
|
154 |
+
output_audio_path = merge_audio_with_srt_timing(subtitles, audio_files, output_dir)
|
155 |
+
return output_audio_path
|
156 |
+
except Exception as e:
|
157 |
+
logger.error(f"Error in process_srt_and_generate_audio: {e}")
|
158 |
+
raise
|
159 |
+
|
160 |
+
|
161 |
+
def create_output_dir(parent_dir):
|
162 |
+
try:
|
163 |
+
# 定义一个基于当前日期和时间的文件夹名称
|
164 |
+
folder_name = datetime.now().strftime("audio_outputs_%Y-%m-%d_%H-%M-%S")
|
165 |
+
|
166 |
+
# 定义父目录,这里假设在Colab的根目录
|
167 |
+
#parent_dir = "/content/drive/MyDrive/Voice Conversion Result"
|
168 |
+
|
169 |
+
# 完整的文件夹路径
|
170 |
+
output_dir = os.path.join(parent_dir, folder_name)
|
171 |
+
|
172 |
+
# 创建文件夹
|
173 |
+
if not os.path.exists(output_dir):
|
174 |
+
os.makedirs(output_dir)
|
175 |
+
logger.info(f"Created output directory at: {output_dir}")
|
176 |
+
|
177 |
+
return output_dir
|
178 |
+
except Exception as e:
|
179 |
+
logger.error(f"Failed to create output directory: {e}")
|
180 |
+
raise
|
181 |
+
|
182 |
+
|
183 |
+
def srt_time_to_ms(srt_time):
|
184 |
+
return (srt_time.hours * 3600 + srt_time.minutes * 60 + srt_time.seconds) * 1000 + srt_time.milliseconds
|
185 |
+
|
186 |
+
|
187 |
+
def merge_audio_with_srt_timing(subtitles, audio_files, output_dir):
|
188 |
+
try:
|
189 |
+
combined = AudioSegment.silent(duration=0)
|
190 |
+
last_position_ms = 0
|
191 |
+
|
192 |
+
for subtitle, audio_file in zip(subtitles, audio_files):
|
193 |
+
start_time_ms = srt_time_to_ms(subtitle.start)
|
194 |
+
if last_position_ms < start_time_ms:
|
195 |
+
silence_duration = start_time_ms - last_position_ms
|
196 |
+
combined += AudioSegment.silent(duration=silence_duration)
|
197 |
+
last_position_ms = start_time_ms
|
198 |
+
|
199 |
+
audio = AudioSegment.from_file(audio_file, format="wav")
|
200 |
+
|
201 |
+
combined += audio
|
202 |
+
last_position_ms += len(audio)
|
203 |
+
|
204 |
+
output_path = os.path.join(output_dir, "combined_audio_with_timing.wav")
|
205 |
+
#combined_with_set_frame_rate = combined.set_frame_rate(24000)
|
206 |
+
#combined_with_set_frame_rate.export(output_path, format="wav")
|
207 |
+
combined.export(output_path, format="wav")
|
208 |
+
logger.info(f"Exported combined audio to: {output_path}")
|
209 |
+
|
210 |
+
return output_path
|
211 |
+
except Exception as e:
|
212 |
+
logger.error(f"Error merging audio files: {e}")
|
213 |
+
raise
|
214 |
+
|
215 |
+
|
216 |
+
def remove_endperiod(subtitle):
|
217 |
+
"""Removes the period (.) at the end of a subtitle.
|
218 |
+
"""
|
219 |
+
if subtitle.endswith('.'):
|
220 |
+
subtitle = subtitle[:-1]
|
221 |
+
return subtitle
|
222 |
+
|
223 |
+
def convert_voice(reference_audio, audio_to_convert):
|
224 |
+
|
225 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
226 |
+
# 定义输入和输出音频路径
|
227 |
+
#input_audio_path = audio_to_convert
|
228 |
+
base_name, ext = os.path.splitext(os.path.basename(audio_to_convert))
|
229 |
+
new_file_name = base_name + 'convertedvoice' + ext
|
230 |
+
output_path = os.path.join(os.path.dirname(audio_to_convert), new_file_name)
|
231 |
+
|
232 |
+
tune_one(input_file=audio_to_convert, ref_file=reference_audio, output_file=output_path, device=device)
|
233 |
+
|
234 |
+
return output_path
|
235 |
+
|
236 |
+
def tune_one(input_file,ref_file,output_file,device):
|
237 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
238 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
239 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
240 |
+
|
241 |
+
if not os.path.exists(ckpt_converter):
|
242 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
243 |
+
download_checkpoint(ckpt_converter)
|
244 |
+
|
245 |
+
device = device
|
246 |
+
|
247 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
248 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
249 |
+
|
250 |
+
source_se, _ = se_extractor.get_se(input_file, tone_color_converter, vad=True)
|
251 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
252 |
+
|
253 |
+
# Ensure output directory exists and is writable
|
254 |
+
output_dir = os.path.dirname(output_file)
|
255 |
+
if output_dir:
|
256 |
+
if not os.path.exists(output_dir):
|
257 |
+
os.makedirs(output_dir, exist_ok=True)
|
258 |
+
|
259 |
+
# Run the tone color converter
|
260 |
+
tone_color_converter.convert(
|
261 |
+
audio_src_path=input_file,
|
262 |
+
src_se=source_se,
|
263 |
+
tgt_se=target_se,
|
264 |
+
output_path=output_file,
|
265 |
+
)
|
266 |
+
|
267 |
+
|
268 |
+
'''
|
269 |
+
def tune_batch(input_dir, ref_file, output_dir=None, device='cpu', output_format='.wav'):
|
270 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
271 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
272 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
273 |
+
|
274 |
+
if not os.path.exists(ckpt_converter):
|
275 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
276 |
+
download_checkpoint(ckpt_converter)
|
277 |
+
|
278 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
279 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
280 |
+
|
281 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
282 |
+
|
283 |
+
# Use default output directory 'out' if not provided
|
284 |
+
if output_dir is None:
|
285 |
+
output_dir = os.path.join(current_dir, 'out')
|
286 |
+
os.makedirs(output_dir, exist_ok=True)
|
287 |
+
|
288 |
+
# Check for any audio files in the input directory (wav, mp3, flac) using glob
|
289 |
+
audio_extensions = ('*.wav', '*.mp3', '*.flac')
|
290 |
+
audio_files = []
|
291 |
+
for extension in audio_extensions:
|
292 |
+
audio_files.extend(glob.glob(os.path.join(input_dir, extension)))
|
293 |
+
|
294 |
+
for audio_file in tqdm(audio_files,"Tune file",len(audio_files)):
|
295 |
+
# Extract source SE from audio file
|
296 |
+
source_se, _ = se_extractor.get_se(audio_file, tone_color_converter, vad=True)
|
297 |
+
|
298 |
+
# Run the tone color converter
|
299 |
+
filename_without_extension = os.path.splitext(os.path.basename(audio_file))[0]
|
300 |
+
output_filename = f"{filename_without_extension}_tuned{output_format}"
|
301 |
+
output_file = os.path.join(output_dir, output_filename)
|
302 |
+
|
303 |
+
tone_color_converter.convert(
|
304 |
+
audio_src_path=audio_file,
|
305 |
+
src_se=source_se,
|
306 |
+
tgt_se=target_se,
|
307 |
+
output_path=output_file,
|
308 |
+
)
|
309 |
+
print(f"Converted {audio_file} to {output_file}")
|
310 |
+
|
311 |
+
return output_dir
|
312 |
+
|
313 |
+
def main_single(args):
|
314 |
+
tune_one(input_file=args.input, ref_file=args.ref, output_file=args.output, device=args.device)
|
315 |
+
|
316 |
+
def main_batch(args):
|
317 |
+
output_dir = tune_batch(
|
318 |
+
input_dir=args.input_dir,
|
319 |
+
ref_file=args.ref_file,
|
320 |
+
output_dir=args.output_dir,
|
321 |
+
device=args.device,
|
322 |
+
output_format=args.output_format
|
323 |
+
)
|
324 |
+
print(f"Batch processing complete. Converted files are saved in {output_dir}")
|
325 |
+
'''
|
326 |
+
|
327 |
+
|
functions/logging_utils.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import logging
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import sys
|
6 |
+
import logging
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
# 日志重定向类
|
10 |
+
class Logger:
|
11 |
+
def __init__(self, filename="log.out"):
|
12 |
+
self.log_file = filename
|
13 |
+
self.terminal = sys.stdout
|
14 |
+
self.log = open(self.log_file, "w")
|
15 |
+
|
16 |
+
def write(self, message):
|
17 |
+
self.terminal.write(message)
|
18 |
+
self.log.write(message)
|
19 |
+
|
20 |
+
def flush(self):
|
21 |
+
self.terminal.flush()
|
22 |
+
self.log.flush()
|
23 |
+
|
24 |
+
def isatty(self):
|
25 |
+
return False
|
26 |
+
|
27 |
+
# 设置日志处理器
|
28 |
+
def setup_logger(log_file_path):
|
29 |
+
# 创建日志文件
|
30 |
+
log_file = Path(log_file_path)
|
31 |
+
log_file.parent.mkdir(parents=True, exist_ok=True)
|
32 |
+
|
33 |
+
# 重定向 stdout 和 stderr 到日志文件
|
34 |
+
sys.stdout = Logger(str(log_file))
|
35 |
+
sys.stderr = sys.stdout
|
36 |
+
|
37 |
+
# 配置日志格式
|
38 |
+
logging.basicConfig(
|
39 |
+
level=logging.INFO,
|
40 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
41 |
+
handlers=[
|
42 |
+
logging.StreamHandler(sys.stdout)
|
43 |
+
]
|
44 |
+
)
|
45 |
+
|
46 |
+
def read_logs(log_file_path):
|
47 |
+
sys.stdout.flush()
|
48 |
+
with open(log_file_path, "r") as f:
|
49 |
+
return f.read()
|
50 |
+
|
51 |
+
# Clear logs
|
52 |
+
def remove_log_file(file_path):
|
53 |
+
log_file = Path(file_path)
|
54 |
+
|
55 |
+
if log_file.exists() and log_file.is_file():
|
56 |
+
log_file.unlink()
|
functions/slice_utils.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import sys
|
4 |
+
import warnings
|
5 |
+
import platform
|
6 |
+
import psutil
|
7 |
+
import torch
|
8 |
+
import signal
|
9 |
+
import site
|
10 |
+
import traceback
|
11 |
+
from subprocess import Popen
|
12 |
+
from config import python_exec
|
13 |
+
|
14 |
+
now_dir = os.getcwd()
|
15 |
+
sys.path.insert(0, now_dir)
|
16 |
+
warnings.filterwarnings("ignore")
|
17 |
+
torch.manual_seed(233333)
|
18 |
+
tmp = os.path.join(now_dir, "TEMP")
|
19 |
+
os.makedirs(tmp, exist_ok=True)
|
20 |
+
os.environ["TEMP"] = tmp
|
21 |
+
if(os.path.exists(tmp)):
|
22 |
+
for name in os.listdir(tmp):
|
23 |
+
if(name=="jieba.cache"):continue
|
24 |
+
path="%s/%s"%(tmp,name)
|
25 |
+
delete=os.remove if os.path.isfile(path) else shutil.rmtree
|
26 |
+
try:
|
27 |
+
delete(path)
|
28 |
+
except Exception as e:
|
29 |
+
print(str(e))
|
30 |
+
pass
|
31 |
+
import site
|
32 |
+
site_packages_roots = []
|
33 |
+
for path in site.getsitepackages():
|
34 |
+
if "packages" in path:
|
35 |
+
site_packages_roots.append(path)
|
36 |
+
if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
|
37 |
+
#os.environ["OPENBLAS_NUM_THREADS"] = "4"
|
38 |
+
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
|
39 |
+
os.environ["all_proxy"] = ""
|
40 |
+
for site_packages_root in site_packages_roots:
|
41 |
+
if os.path.exists(site_packages_root):
|
42 |
+
try:
|
43 |
+
with open("%s/users.pth" % (site_packages_root), "w") as f:
|
44 |
+
f.write(
|
45 |
+
"%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
|
46 |
+
% (now_dir, now_dir, now_dir, now_dir, now_dir)
|
47 |
+
)
|
48 |
+
break
|
49 |
+
except PermissionError:
|
50 |
+
pass
|
51 |
+
|
52 |
+
def kill_proc_tree(pid, including_parent=True):
|
53 |
+
try:
|
54 |
+
parent = psutil.Process(pid)
|
55 |
+
except psutil.NoSuchProcess:
|
56 |
+
# Process already terminated
|
57 |
+
return
|
58 |
+
|
59 |
+
children = parent.children(recursive=True)
|
60 |
+
for child in children:
|
61 |
+
try:
|
62 |
+
os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
|
63 |
+
except OSError:
|
64 |
+
pass
|
65 |
+
if including_parent:
|
66 |
+
try:
|
67 |
+
os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
|
68 |
+
except OSError:
|
69 |
+
pass
|
70 |
+
|
71 |
+
system=platform.system()
|
72 |
+
def kill_process(pid):
|
73 |
+
if(system=="Windows"):
|
74 |
+
cmd = "taskkill /t /f /pid %s" % pid
|
75 |
+
os.system(cmd)
|
76 |
+
else:
|
77 |
+
kill_proc_tree(pid)
|
78 |
+
|
79 |
+
ps_slice=[]
|
80 |
+
def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
|
81 |
+
global ps_slice
|
82 |
+
inp = my_utils.clean_path(inp)
|
83 |
+
opt_root = my_utils.clean_path(opt_root)
|
84 |
+
if(os.path.exists(inp)==False):
|
85 |
+
yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
86 |
+
return
|
87 |
+
if os.path.isfile(inp):n_parts=1
|
88 |
+
elif os.path.isdir(inp):pass
|
89 |
+
else:
|
90 |
+
yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
91 |
+
return
|
92 |
+
if (ps_slice == []):
|
93 |
+
for i_part in range(n_parts):
|
94 |
+
cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
|
95 |
+
print(cmd)
|
96 |
+
p = Popen(cmd, shell=True)
|
97 |
+
ps_slice.append(p)
|
98 |
+
yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
|
99 |
+
for p in ps_slice:
|
100 |
+
p.wait()
|
101 |
+
ps_slice=[]
|
102 |
+
yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
103 |
+
else:
|
104 |
+
yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
|
105 |
+
|
106 |
+
def close_slice():
|
107 |
+
global ps_slice
|
108 |
+
if (ps_slice != []):
|
109 |
+
for p_slice in ps_slice:
|
110 |
+
try:
|
111 |
+
kill_process(p_slice.pid)
|
112 |
+
except:
|
113 |
+
traceback.print_exc()
|
114 |
+
ps_slice=[]
|
115 |
+
return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
|
go-webui.bat
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
set PYTHONIOENCODING=utf-8
|
3 |
+
"C:\Users\paulw\AppData\Local\Programs\Python\Python38\python.exe" main.py
|
4 |
+
pause
|
gradio_utils.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
import torch
|
5 |
+
import traceback
|
6 |
+
from pathlib import Path
|
7 |
+
from multiprocessing import cpu_count
|
8 |
+
from functions.core_functions1 import clear_gpu_cache
|
9 |
+
from functions.logging_utils import remove_log_file
|
10 |
+
from functions.slice_utils import open_slice, close_slice
|
11 |
+
from utils.formatter import format_audio_list
|
12 |
+
from utils.gpt_train import train_gpt
|
13 |
+
|
14 |
+
def get_audio_files_from_folder(folder_path):
|
15 |
+
audio_files = []
|
16 |
+
for root, dirs, files in os.walk(folder_path):
|
17 |
+
for file in files:
|
18 |
+
if file.endswith(".wav") or file.endswith(".mp3") or file.endswith(".flac") or file.endswith(".m4a") or file.endswith(".webm"):
|
19 |
+
audio_files.append(os.path.join(root, file))
|
20 |
+
return audio_files
|
21 |
+
|
22 |
+
def preprocess_dataset(audio_path, audio_folder, language, whisper_model, out_path, train_csv, eval_csv, progress):
|
23 |
+
out_path = os.path.join(out_path, "dataset")
|
24 |
+
os.makedirs(out_path, exist_ok=True)
|
25 |
+
|
26 |
+
if audio_path is not None and audio_path != []:
|
27 |
+
try:
|
28 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_path, whisper_model=whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
29 |
+
except:
|
30 |
+
traceback.print_exc()
|
31 |
+
error = traceback.format_exc()
|
32 |
+
return f"The data processing was interrupted due to an error! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
33 |
+
elif audio_folder is not None:
|
34 |
+
audio_files = get_audio_files_from_folder(audio_folder)
|
35 |
+
try:
|
36 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_files, whisper_model=whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
37 |
+
except:
|
38 |
+
traceback.print_exc()
|
39 |
+
error = traceback.format_exc()
|
40 |
+
return f"The data processing was interrupted due to an error! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
41 |
+
else:
|
42 |
+
return "You should provide either audio files or a folder containing audio files!", "", ""
|
43 |
+
|
44 |
+
if audio_total_size < 120:
|
45 |
+
message = "The sum of the duration of the audios that you provided should be at least 2 minutes!"
|
46 |
+
print(message)
|
47 |
+
return message, "", ""
|
48 |
+
|
49 |
+
print("Dataset Processed!")
|
50 |
+
return "Dataset Processed!", train_meta, eval_meta
|
51 |
+
|
52 |
+
def train_model(custom_model, version, language, train_csv, eval_csv, num_epochs, batch_size, grad_accum, output_path, max_audio_length):
|
53 |
+
run_dir = Path(output_path) / "run"
|
54 |
+
|
55 |
+
if run_dir.exists():
|
56 |
+
os.remove(run_dir)
|
57 |
+
|
58 |
+
if not train_csv or not eval_csv:
|
59 |
+
return "You need to run the data processing step or manually set `Train CSV` and `Eval CSV` fields!", "", "", "", "", ""
|
60 |
+
try:
|
61 |
+
max_audio_length = int(max_audio_length * 22050)
|
62 |
+
speaker_xtts_path, config_path, original_xtts_checkpoint, vocab_file, exp_path, speaker_wav = train_gpt(custom_model, version, language, num_epochs, batch_size, grad_accum, train_csv, eval_csv, output_path=output_path, max_audio_length=max_audio_length)
|
63 |
+
except:
|
64 |
+
traceback.print_exc()
|
65 |
+
error = traceback.format_exc()
|
66 |
+
return f"The training was interrupted due to an error! Please check the console to check the full error message! \n Error summary: {error}", "", "", "", "", ""
|
67 |
+
|
68 |
+
ready_dir = Path(output_path) / "ready"
|
69 |
+
|
70 |
+
ft_xtts_checkpoint = os.path.join(exp_path, "best_model.pth")
|
71 |
+
|
72 |
+
shutil.copy(ft_xtts_checkpoint, ready_dir / "unoptimize_model.pth")
|
73 |
+
|
74 |
+
ft_xtts_checkpoint = os.path.join(ready_dir, "unoptimize_model.pth")
|
75 |
+
|
76 |
+
speaker_reference_path = Path(speaker_wav)
|
77 |
+
speaker_reference_new_path = ready_dir / "reference.wav"
|
78 |
+
shutil.copy(speaker_reference_path, speaker_reference_new_path)
|
79 |
+
|
80 |
+
print("Model training done!")
|
81 |
+
return "Model training done!", config_path, vocab_file, ft_xtts_checkpoint, speaker_xtts_path, speaker_reference_new_path
|
82 |
+
|
83 |
+
def optimize_model(out_path, clear_train_data):
|
84 |
+
out_path = Path(out_path)
|
85 |
+
ready_dir = out_path / "ready"
|
86 |
+
run_dir = out_path / "run"
|
87 |
+
dataset_dir = out_path / "dataset"
|
88 |
+
|
89 |
+
if clear_train_data in {"run", "all"} and run_dir.exists():
|
90 |
+
try:
|
91 |
+
shutil.rmtree(run_dir)
|
92 |
+
except PermissionError as e:
|
93 |
+
print(f"An error occurred while deleting {run_dir}: {e}")
|
94 |
+
|
95 |
+
if clear_train_data in {"dataset", "all"} and dataset_dir.exists():
|
96 |
+
try:
|
97 |
+
shutil.rmtree(dataset_dir)
|
98 |
+
except PermissionError as e:
|
99 |
+
print(f"An error occurred while deleting {dataset_dir}: {e}")
|
100 |
+
|
101 |
+
model_path = ready_dir / "unoptimize_model.pth"
|
102 |
+
|
103 |
+
if not model_path.is_file():
|
104 |
+
return "Unoptimized model not found in ready folder", ""
|
105 |
+
|
106 |
+
checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
|
107 |
+
del checkpoint["optimizer"]
|
108 |
+
|
109 |
+
for key in list(checkpoint["model"].keys()):
|
110 |
+
if "dvae" in key:
|
111 |
+
del checkpoint["model"][key]
|
112 |
+
|
113 |
+
os.remove(model_path)
|
114 |
+
|
115 |
+
optimized_model_file_name = "model.pth"
|
116 |
+
optimized_model = ready_dir / optimized_model_file_name
|
117 |
+
|
118 |
+
torch.save(checkpoint, optimized_model)
|
119 |
+
ft_xtts_checkpoint = str(optimized_model)
|
120 |
+
|
121 |
+
return f"Model optimized and saved at {ft_xtts_checkpoint}!", ft_xtts_checkpoint
|
122 |
+
|
123 |
+
def load_params(out_path):
|
124 |
+
path_output = Path(out_path)
|
125 |
+
|
126 |
+
dataset_path = path_output / "dataset"
|
127 |
+
|
128 |
+
if not dataset_path.exists():
|
129 |
+
return "The output folder does not exist!", "", "", ""
|
130 |
+
|
131 |
+
eval_train = dataset_path / "metadata_train.csv"
|
132 |
+
eval_csv = dataset_path / "metadata_eval.csv"
|
133 |
+
|
134 |
+
lang_file_path = dataset_path / "lang.txt"
|
135 |
+
|
136 |
+
current_language = None
|
137 |
+
if os.path.exists(lang_file_path):
|
138 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
139 |
+
current_language = existing_lang_file.read().strip()
|
140 |
+
|
141 |
+
print(current_language)
|
142 |
+
return "The data has been updated", eval_train, eval_csv, current_language
|
143 |
+
|
144 |
+
def load_params_tts(out_path, version):
|
145 |
+
path_output = Path(out_path)
|
146 |
+
|
147 |
+
ready_dir = path_output / "ready"
|
148 |
+
|
149 |
+
xtts_config_path = ready_dir / "config.json"
|
150 |
+
xtts_vocab_path = ready_dir / "vocab.json"
|
151 |
+
xtts_checkpoint_path = ready_dir / "model.pth"
|
152 |
+
xtts_speaker_path = ready_dir / "speaker.pth"
|
153 |
+
speaker_reference_path = ready_dir / "reference.wav"
|
154 |
+
|
155 |
+
missing_files = []
|
156 |
+
if not xtts_config_path.exists():
|
157 |
+
missing_files.append(str(xtts_config_path))
|
158 |
+
if not xtts_vocab_path.exists():
|
159 |
+
missing_files.append(str(xtts_vocab_path))
|
160 |
+
if not xtts_checkpoint_path.exists():
|
161 |
+
missing_files.append(str(xtts_checkpoint_path))
|
162 |
+
if not xtts_speaker_path.exists():
|
163 |
+
missing_files.append(str(xtts_speaker_path))
|
164 |
+
if not speaker_reference_path.exists():
|
165 |
+
missing_files.append(str(speaker_reference_path))
|
166 |
+
|
167 |
+
if missing_files:
|
168 |
+
return f"The following files are missing from the ready folder: {', '.join(missing_files)}", "", "", "", "", ""
|
169 |
+
|
170 |
+
print("Loaded parameters for TTS.")
|
171 |
+
return "Loaded parameters for TTS.", str(xtts_checkpoint_path), str(xtts_config_path), str(xtts_vocab_path), str(xtts_speaker_path), str(speaker_reference_path)
|
i18n/locale/en_US.json
ADDED
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
|
3 |
+
"UVR5已开启": "UVR5 opened ",
|
4 |
+
"UVR5已关闭": "UVR5 closed",
|
5 |
+
"输入文件夹路径": "Input folder path",
|
6 |
+
"输出文件夹路径": "Output folder path",
|
7 |
+
"ASR 模型": "ASR model",
|
8 |
+
"ASR 模型尺寸": "ASR model size",
|
9 |
+
"ASR 语言设置": "ASR language",
|
10 |
+
"模型切换": "Model switch",
|
11 |
+
"是否开启dpo训练选项(实验性)": "Enable DPO training (experimental feature)",
|
12 |
+
"开启无参考文本模式。不填参考文本亦相当于开启。": "Enable no reference mode. If you don't fill 'Text for reference audio', no reference mode will be enabled.",
|
13 |
+
"使用无参考文本模式时建议使用微调的GPT": "Please use your trained GPT model if you don't use reference audio.",
|
14 |
+
"后续将支持转音素、手工修改音素、语音合成分步执行。": " Step-to-step phoneme transformation and modification coming soon!",
|
15 |
+
"gpt采样参数(无参考文本时不要太低):": "GPT parameters:",
|
16 |
+
"按标点符号切": "Slice by every punct",
|
17 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE</b> for details.",
|
18 |
+
"0-前置数据集获取工具": "0-Fetch dataset",
|
19 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)",
|
20 |
+
"是否开启UVR5-WebUI": "Open UVR5-WebUI",
|
21 |
+
"UVR5进程输出信息": "UVR5 process output log",
|
22 |
+
"0b-语音切分工具": "0b-Audio slicer",
|
23 |
+
".list标注文件的路径": ".list annotation file path",
|
24 |
+
"GPT模型列表": "GPT weight list",
|
25 |
+
"SoVITS模型列表": "SoVITS weight list",
|
26 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "Fill in the directory of segmented audio. The complete path of the read audio file is equal to the directory concatenated with the waveform's corresponding filename from the list file (not the full path).",
|
27 |
+
"音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)",
|
28 |
+
"切分后的子音频的输出根目录": "Audio slicer output folder",
|
29 |
+
"怎么切": "How to slice the sentence",
|
30 |
+
"不切": "No slice",
|
31 |
+
"凑四句一切": "Slice once every 4 sentences",
|
32 |
+
"按英文句号.切": "Slice by English punct",
|
33 |
+
"threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise",
|
34 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "Minimum length",
|
35 |
+
"min_interval:最短切割间隔": "Minumum interval for audio cutting",
|
36 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: FO hop size, the smaller the value, the higher the accuracy)",
|
37 |
+
"max_sil_kept:切完后静音最多留多长": "Maximum length for silence to be kept",
|
38 |
+
"开启语音切割": "Start audio slicer",
|
39 |
+
"终止语音切割": "Stop audio cutting",
|
40 |
+
"max:归一化后最大值多少": "Loudness multiplier after normalized",
|
41 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion of normalized audio merged into dataset",
|
42 |
+
"切割使用的进程数": "CPU threads used for audio slicing",
|
43 |
+
"语音切割进程输出信息": "Audio slicer output log",
|
44 |
+
"0c-中文批量离线ASR工具": "0c-Chinese ASR tool",
|
45 |
+
"开启离线批量ASR": "Start batch ASR",
|
46 |
+
"终止ASR进程": "Stop ASR task",
|
47 |
+
"批量ASR(中文only)输入文件夹路径": "Batch ASR (Chinese only) input folder",
|
48 |
+
"ASR进程输出信息": "ASR output log",
|
49 |
+
"0d-语音文本校对标注工具": "0d-Speech to text proofreading tool",
|
50 |
+
"是否开启打标WebUI": "Open labelling WebUI",
|
51 |
+
"打标数据标注文件路径": "path to proofreading text file",
|
52 |
+
"打标工具进程输出信息": "Proofreading tool output log",
|
53 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS",
|
54 |
+
"*实验/模型名": "*Experiment/model name",
|
55 |
+
"显卡信息": "GPU Information",
|
56 |
+
"预训练的SoVITS-G模型路径": "Pretrained SoVITS-G model path",
|
57 |
+
"预训练的SoVITS-D模型路径": "Pretrained SoVITS-D model path",
|
58 |
+
"预训练的GPT模型路径": "Pretrained GPT model path",
|
59 |
+
"1A-训练集格式化工具": "1A-Dataset formatting",
|
60 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "output folder (logs/{experiment name}) should have files and folders starts with 23456.",
|
61 |
+
"*文本标注文件": "*Text labelling file",
|
62 |
+
"*训练集音频文件目录": "*Audio dataset folder",
|
63 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Training the file name corresponding to the waveform of the waveform in the List file of the audio file",
|
64 |
+
"1Aa-文本内容": "1Aa-Text",
|
65 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU number is separated by -, each GPU will run one process ",
|
66 |
+
"预训练的中文BERT模型路径": " Pretrained BERT model path",
|
67 |
+
"开启文本获取": "Start speech-to-text",
|
68 |
+
"终止文本获取进程": "Stop speech-to-text",
|
69 |
+
"文本进程输出信息": "Text processing output",
|
70 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL self-supervised feature extraction",
|
71 |
+
"预训练的SSL模型路径": "Pretrained SSL model path",
|
72 |
+
"开启SSL提取": "Start SSL extracting",
|
73 |
+
"终止SSL提取进程": "Stop SSL extraction",
|
74 |
+
"SSL进程输出信息": "SSL output log",
|
75 |
+
"1Ac-语义token提取": "1Ac-semantics token extraction",
|
76 |
+
"开启语义token提取": "Start semantics token extraction",
|
77 |
+
"终止语义token提取进程": "Stop semantics token extraction",
|
78 |
+
"语义token提取进程输出信息": "Sematics token extraction output log",
|
79 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-One-click formatting",
|
80 |
+
"开启一键三连": "Start one-click formatting",
|
81 |
+
"终止一键三连": "Stop one-click formatting",
|
82 |
+
"一键三连进程输出信息": "One-click formatting output",
|
83 |
+
"1B-微调训练": "1B-Fine-tuned training",
|
84 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS training. The model is located in SoVITS_weights.",
|
85 |
+
"每张显卡的batch_size": "Batch size per GPU:",
|
86 |
+
"总训练轮数total_epoch,不建议太高": "Total epochs, do not increase to a value that is too high",
|
87 |
+
"文本模块学习率权重": "Text model learning rate weighting",
|
88 |
+
"保存频率save_every_epoch": "Save frequency (save_every_epoch):",
|
89 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:",
|
90 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:",
|
91 |
+
"开启SoVITS训练": "Start SoVITS training",
|
92 |
+
"终止SoVITS训练": "Stop SoVITS training",
|
93 |
+
"SoVITS训练进程输出信息": "SoVITS training output log",
|
94 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT training. The model is located in GPT_weights.",
|
95 |
+
"总训练轮数total_epoch": "Total training epochs (total_epoch):",
|
96 |
+
"开启GPT训练": "Start GPT training",
|
97 |
+
"终止GPT训练": "Stop GPT training",
|
98 |
+
"GPT训练进程输出信息": "GPT training output log",
|
99 |
+
"1C-推理": "1C-inference",
|
100 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.",
|
101 |
+
"*GPT模型列表": "*GPT models list",
|
102 |
+
"*SoVITS模型列表": "*SoVITS models list",
|
103 |
+
"GPU卡号,只能填1个整数": "GPU number, can only input ONE integer",
|
104 |
+
"刷新模型路径": "refreshing model paths",
|
105 |
+
"是否开启TTS推理WebUI": "Open TTS inference WEBUI",
|
106 |
+
"TTS推理WebUI进程输出信息": "TTS inference webui output log",
|
107 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer",
|
108 |
+
"施工中,请静候佳音": "In construction, please wait",
|
109 |
+
"参考音频在3~10秒范围外,请更换!": "Reference audio is outside the 3-10 second range, please choose another one!",
|
110 |
+
"请上传3~10秒内参考音频,超过会报错!": "Please upload a reference audio within the 3-10 second range; if it exceeds this duration, it will raise errors.",
|
111 |
+
"TTS推理进程已开启": "TTS inference process is opened",
|
112 |
+
"TTS推理进程已关闭": "TTS inference process closed",
|
113 |
+
"打标工具WebUI已开启": "proofreading tool webui is opened",
|
114 |
+
"打标工具WebUI已关闭": "proofreading tool webui is closed",
|
115 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.",
|
116 |
+
"*请上传并填写参考信息": "*Please upload and fill reference information",
|
117 |
+
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*Please fill the text that needs inference. Select Chinese for mixed Chinese and English text, choose Japanese for mixed Japanese and English text. Mixed Chinese and Japanese is currently not supported; non-target language text will be automatically discarded.",
|
118 |
+
"ASR任务开启:%s": "ASR training started: %s",
|
119 |
+
"GPT训练完成": "Finished GPT training",
|
120 |
+
"GPT训练开始:%s": "GPT training started: %s",
|
121 |
+
"SSL提取进程执行中": "SSL extracting",
|
122 |
+
"SSL提取进程结束": "SSL extraction finished",
|
123 |
+
"SoVITS训练完成": "SoVITS training finished",
|
124 |
+
"SoVITS训练开始:%s": "SoVITS training started:%s",
|
125 |
+
"一键三连中途报错": "An error has occured during One-click formatting",
|
126 |
+
"一键三连进程结束": "Finished one-click formatting",
|
127 |
+
"中文": "Chinese",
|
128 |
+
"凑50字一切": "Cut per 50 characters",
|
129 |
+
"凑五句一切": "Cut per 5 sentences",
|
130 |
+
"切分后文本": "Text after sliced",
|
131 |
+
"切割执行中": "Slicing audio",
|
132 |
+
"切割结束": "finished audio slicing",
|
133 |
+
"参考音频的文本": "Text for reference audio",
|
134 |
+
"参考音频的语种": "Language for reference audio",
|
135 |
+
"合成语音": "Start inference",
|
136 |
+
"后续将支持混合语种编码文本输入。": "Mixed languages input will be supported soon.",
|
137 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": " An ASR task is already in progress, please stop before starting the next task",
|
138 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "A GPT training task is already in progress, please stop before starting the next task",
|
139 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "A SSL extraction task is already in progress, please stop before starting the next task",
|
140 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "A SoVITS training task is already in progress, please stop before starting the next task",
|
141 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "An ASR task is already in progress, please stop before starting the next task",
|
142 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "An audio slicing task is already in progress, please stop before starting the next task",
|
143 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "A TTS proofreading task is already in progress, please stop before starting the next task",
|
144 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "A semantics token extraction task is already in progress, please stop before starting the next task",
|
145 |
+
"已终止ASR进程": "ASR task has been stopped",
|
146 |
+
"已终止GPT训练": "GPT training has been stopped",
|
147 |
+
"已终止SoVITS训练": "SoVITS training has been stopped",
|
148 |
+
"已终止所有1a进程": "All 1a tasks has been stopped",
|
149 |
+
"已终止所有1b进程": "All 1b tasks has been stopped",
|
150 |
+
"已终止所有一键三连进程": "All one-clicking formatting tasks has been stopped",
|
151 |
+
"已终止所有切割进程": "All audio slicing tasks has been stopped",
|
152 |
+
"已终止所有语义token进程": "All semantics token tasks has been stopped",
|
153 |
+
"按中文句号。切": "Slice by Chinese punct",
|
154 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.",
|
155 |
+
"文本进程执行中": "Text processing",
|
156 |
+
"文本进程结束": "Finished text processing",
|
157 |
+
"日文": "Japanese",
|
158 |
+
"英文": "English",
|
159 |
+
"语义token提取进程执行中": "Semantics token extracting",
|
160 |
+
"语义token提取进程结束": "Finished semantics token extraction",
|
161 |
+
"请上传参考音频": "Please upload reference audio",
|
162 |
+
"输入路径不存在": "No input file or directory",
|
163 |
+
"输入路径存在但既不是文件也不是文件夹": "Input directory exists, but it is not a file or a folder",
|
164 |
+
"输出的语音": "Inference Result",
|
165 |
+
"进度:1a-done": "Progress:1a-done",
|
166 |
+
"进度:1a-done, 1b-ing": "Progress:1a-done, 1b-ing",
|
167 |
+
"进度:1a-ing": "Progress:1a-ing",
|
168 |
+
"进度:1a1b-done": "Progress:1a1b-done",
|
169 |
+
"进度:1a1b-done, 1cing": "Progress:1a1b-done, 1cing",
|
170 |
+
"进度:all-done": "Progress:all-done",
|
171 |
+
"需要合成的切分前文本": "Inference text that needs to be sliced",
|
172 |
+
"需要合成的文本": "Inference text",
|
173 |
+
"需要合成的语种": "Inference text language",
|
174 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.",
|
175 |
+
"A模型权重": "Weight (w) for Model A:",
|
176 |
+
"A模型路径": "Path to Model A:",
|
177 |
+
"B模型路径": "Path to Model B:",
|
178 |
+
"E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src",
|
179 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:",
|
180 |
+
"Index Rate": "Index Rate",
|
181 |
+
"Onnx导出": "Export Onnx",
|
182 |
+
"Onnx输出路径": "Onnx Export Path:",
|
183 |
+
"RVC模型路径": "RVC Model Path:",
|
184 |
+
"ckpt处理": "ckpt Processing",
|
185 |
+
"harvest进程数": "Number of CPU processes used for harvest pitch algorithm",
|
186 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
187 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
188 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",
|
189 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.",
|
190 |
+
"step1:正在处理数据": "Step 1: Processing data",
|
191 |
+
"step2:正在提取音高&正在提取特征": "step2:Pitch extraction & feature extraction",
|
192 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.",
|
193 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):",
|
194 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index",
|
195 |
+
"step3a:正在训练模型": "Step 3a: Model training started",
|
196 |
+
"一键训练": "One-click training",
|
197 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Multiple audio files can also be imported. If a folder path exists, this input is ignored.",
|
198 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br>\u2003\u2003(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.<br>Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).<br>The model is divided into three categories:<br>1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.<br>2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.<br>3. De-reverb and de-delay models (by FoxJoy):<br>\u2003\u2003(1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;<br> (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.<br>De-reverb/de-delay notes:<br>1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.<br>2. The MDX-Net-Dereverb model is quite slow.<br>3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.",
|
199 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:",
|
200 |
+
"伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal",
|
201 |
+
"使用模型采样率": "使用模型采样率",
|
202 |
+
"使用设备采样率": "使用设备采样率",
|
203 |
+
"保存名": "Save name:",
|
204 |
+
"保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):",
|
205 |
+
"保存的模型名不带后缀": "Saved model name (without extension):",
|
206 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:",
|
207 |
+
"修改": "Modify",
|
208 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)",
|
209 |
+
"停止音频转换": "Stop audio conversion",
|
210 |
+
"全流程结束!": "All processes have been completed!",
|
211 |
+
"刷新音色列表和索引路径": "Refresh voice list and index path",
|
212 |
+
"加载模型": "Load model",
|
213 |
+
"加载预训练底模D路径": "Load pre-trained base model D path:",
|
214 |
+
"加载预训练底模G路径": "Load pre-trained base model G path:",
|
215 |
+
"单次推理": "Single Inference",
|
216 |
+
"卸载音色省显存": "Unload voice to save GPU memory:",
|
217 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):",
|
218 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:",
|
219 |
+
"否": "No",
|
220 |
+
"启用相位声码器": "启用相位声码器",
|
221 |
+
"响应阈值": "Response threshold",
|
222 |
+
"响度因子": "loudness factor",
|
223 |
+
"处理数据": "Process data",
|
224 |
+
"导出Onnx模型": "Export Onnx Model",
|
225 |
+
"导出文件格式": "Export file format",
|
226 |
+
"常见问题解答": "FAQ (Frequently Asked Questions)",
|
227 |
+
"常规设置": "General settings",
|
228 |
+
"开始音频转换": "Start audio conversion",
|
229 |
+
"性能设置": "Performance settings",
|
230 |
+
"批量推理": "Batch Inference",
|
231 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
|
232 |
+
"指定输出主人声文件夹": "Specify the output folder for vocals:",
|
233 |
+
"指定输出文件夹": "Specify output folder:",
|
234 |
+
"指定输出非主人声文件夹": "Specify the output folder for accompaniment:",
|
235 |
+
"推理时间(ms):": "Inference time (ms):",
|
236 |
+
"推理音色": "Inferencing voice:",
|
237 |
+
"提取": "Extract",
|
238 |
+
"提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:",
|
239 |
+
"是": "Yes",
|
240 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:",
|
241 |
+
"查看": "View",
|
242 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)",
|
243 |
+
"检索特征占比": "Search feature ratio (controls accent strength, too high has artifacting):",
|
244 |
+
"模型": "Model",
|
245 |
+
"模型推理": "Model Inference",
|
246 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:",
|
247 |
+
"模型是否带音高指导": "Whether the model has pitch guidance:",
|
248 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):",
|
249 |
+
"模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):",
|
250 |
+
"模型版本型号": "Model architecture version:",
|
251 |
+
"模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion",
|
252 |
+
"模型路径": "Path to Model:",
|
253 |
+
"淡入淡出长度": "Fade length",
|
254 |
+
"版本": "Version",
|
255 |
+
"特征提取": "Feature extraction",
|
256 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:",
|
257 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.",
|
258 |
+
"目标采样率": "Target sample rate:",
|
259 |
+
"算法延迟(ms):": "Algorithmic delays(ms):",
|
260 |
+
"自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:",
|
261 |
+
"融合": "Fusion",
|
262 |
+
"要改的模型信息": "Model information to be modified:",
|
263 |
+
"要置入的模型信息": "Model information to be placed:",
|
264 |
+
"训练": "Train",
|
265 |
+
"训练模型": "Train model",
|
266 |
+
"训练特征索引": "Train feature index",
|
267 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.",
|
268 |
+
"请指定说话人id": "Please specify the speaker/singer ID:",
|
269 |
+
"请选择index文件": "Please choose the .index file",
|
270 |
+
"请选择pth文件": "Please choose the .pth file",
|
271 |
+
"请选择说话人id": "Select Speaker/Singer ID:",
|
272 |
+
"转换": "Convert",
|
273 |
+
"输入实验名": "Enter the experiment name:",
|
274 |
+
"输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:",
|
275 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):",
|
276 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):",
|
277 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:",
|
278 |
+
"输入监听": "Input voice monitor",
|
279 |
+
"输入训练文件夹路径": "Enter the path of the training folder:",
|
280 |
+
"输入设备": "Input device",
|
281 |
+
"输入降噪": "Input noise reduction",
|
282 |
+
"输出信息": "Output information",
|
283 |
+
"输出变声": "Output converted voice",
|
284 |
+
"输出设备": "Output device",
|
285 |
+
"输出降噪": "Output noise reduction",
|
286 |
+
"输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)",
|
287 |
+
"选择.index文件": "Select the .index file",
|
288 |
+
"选择.pth文件": "Select the .pth file",
|
289 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
290 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement",
|
291 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",
|
292 |
+
"采样率:": "采样率:",
|
293 |
+
"采样长度": "Sample length",
|
294 |
+
"重载设备列表": "Reload device list",
|
295 |
+
"音调设置": "Pitch settings",
|
296 |
+
"音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)",
|
297 |
+
"音高算法": "pitch detection algorithm",
|
298 |
+
"额外推理时长": "Extra inference time"
|
299 |
+
}
|
i18n/locale/es_ES.json
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica compatible para admitir su entrenamiento.",
|
3 |
+
"UVR5已开启": "UVR5 está habilitado",
|
4 |
+
"UVR5已关闭": "UVR5 está deshabilitado",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. <br>Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo <b>LICENSE</b> en el directorio raíz para obtener más detalles.",
|
6 |
+
"0-前置数据集获取工具": "0-Herramienta de obtención de conjunto de datos previo",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Herramienta de separación de voz y acompañamiento UVR5 y eliminación de reverberación y retardo",
|
8 |
+
"是否开启UVR5-WebUI": "¿Habilitar UVR5-WebUI?",
|
9 |
+
"UVR5进程输出信息": "Información de salida del proceso UVR5",
|
10 |
+
"0b-语音切分工具": "0b-Herramienta de división de voz",
|
11 |
+
".list标注文件的路径": "Ruta del archivo de anotación .list",
|
12 |
+
"GPT模型列表": "Lista de modelos GPT",
|
13 |
+
"SoVITS模型列表": "Lista de modelos SoVITS",
|
14 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "Directorio donde se guardan los archivos de audio después del corte! Ruta completa del archivo de audio a leer = este directorio - nombre de archivo correspondiente a la forma de onda en el archivo de lista (no la ruta completa).",
|
15 |
+
"音频自动切分输入路径,可文件可文件夹": "Ruta de entrada para la división automática de audio, puede ser un archivo o una carpeta",
|
16 |
+
"切分后的子音频的输出根目录": "Directorio raíz de salida de los sub-audios después de la división",
|
17 |
+
"怎么切": "Cómo cortar",
|
18 |
+
"不切": "No cortar",
|
19 |
+
"凑四句一切": "Completa cuatro oraciones para rellenar todo",
|
20 |
+
"按英文句号.切": "Cortar por puntos en inglés.",
|
21 |
+
"threshold:音量小于这个值视作静音的备选切割点": "umbral: puntos de corte alternativos considerados como silencio si el volumen es menor que este valor",
|
22 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: duración mínima de cada segmento, si el primer segmento es demasiado corto, se conecta continuamente con los siguientes hasta que supera este valor",
|
23 |
+
"min_interval:最短切割间隔": "min_interval: intervalo mínimo de corte",
|
24 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: cómo calcular la curva de volumen, cuanto más pequeño, mayor precisión pero mayor carga computacional (mayor precisión no significa mejor rendimiento)",
|
25 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: duración máxima del silencio después del corte",
|
26 |
+
"开启语音切割": "Habilitar la división de voz",
|
27 |
+
"终止语音切割": "Terminar la división de voz",
|
28 |
+
"max:归一化后最大值多少": "max: valor máximo después de la normalización",
|
29 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proporción de mezcla de audio normalizado que entra",
|
30 |
+
"切割使用的进程数": "Número de procesos utilizados para la división",
|
31 |
+
"语音切割进程输出信息": "Información de salida del proceso de división de voz",
|
32 |
+
"0c-中文批量离线ASR工具": "0c-Herramienta de ASR en lote fuera de línea en chino",
|
33 |
+
"开启离线批量ASR": "¿Habilitar ASR en lote fuera de línea?",
|
34 |
+
"终止ASR进程": "Terminar el proceso ASR",
|
35 |
+
"批量ASR(中文only)输入文件夹路径": "Ruta de la carpeta de entrada para ASR en lote (solo en chino)",
|
36 |
+
"ASR进程输出信息": "Información de salida del proceso ASR",
|
37 |
+
"0d-语音文本校对标注工具": "0d-Herramienta de corrección y etiquetado de texto de voz",
|
38 |
+
"是否开启打标WebUI": "¿Habilitar la interfaz web de etiquetado?",
|
39 |
+
"打标数据标注文件路径": "Ruta del archivo de etiquetado de datos",
|
40 |
+
"打标工具进程输出信息": "Información de salida del proceso de la herramienta de etiquetado",
|
41 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
42 |
+
"*实验/模型名": "*Nombre del experimento/modelo",
|
43 |
+
"显卡信息": "Información de la tarjeta gráfica",
|
44 |
+
"预训练的SoVITS-G模型路径": "Ruta del modelo SoVITS-G preentrenado",
|
45 |
+
"预训练的SoVITS-D模型路径": "Ruta del modelo SoVITS-D preentrenado",
|
46 |
+
"预训练的GPT模型路径": "Ruta del modelo GPT preentrenado",
|
47 |
+
"1A-训练集格式化工具": "1A-Herramienta de formateo del conjunto de datos de entrenamiento",
|
48 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Debe haber archivos y carpetas que comiencen con 23456 en el directorio logs/nombre del experimento",
|
49 |
+
"*文本标注文件": "*Archivo de etiquetado de texto",
|
50 |
+
"*训练集音频文件目录": "*Directorio de archivos de audio de entrenamiento",
|
51 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directorio de archivos de audio de entrenamiento, concatenar con los nombres de archivo correspondientes en el archivo list.",
|
52 |
+
"1Aa-文本内容": "1Aa-Contenido del texto",
|
53 |
+
"GPU卡号以-分割,每个卡号一个进程": "Número de tarjeta GPU separado por '-', cada número de tarjeta es un proceso",
|
54 |
+
"预训练的中文BERT模型路径": "Ruta del modelo BERT en chino preentrenado",
|
55 |
+
"开启文本获取": "¿Habilitar la obtención de texto?",
|
56 |
+
"终止文本获取进程": "Terminar el proceso de obtención de texto",
|
57 |
+
"文本进程输出信息": "Información de salida del proceso de obtención de texto",
|
58 |
+
"1Ab-SSL自监督特征提取": "1Ab-Extracción de características auto-supervisada SSL",
|
59 |
+
"预训练的SSL模型路径": "Ruta del modelo SSL preentrenado",
|
60 |
+
"开启SSL提取": "¿Habilitar la extracción SSL?",
|
61 |
+
"终止SSL提取进程": "Terminar el proceso de extracción SSL",
|
62 |
+
"SSL进程输出信息": "Información de salida del proceso SSL",
|
63 |
+
"1Ac-语义token提取": "1Ac-Extracción de tokens semánticos",
|
64 |
+
"开启语义token提取": "¿Habilitar la extracción de tokens semánticos?",
|
65 |
+
"终止语义token提取进程": "Terminar el proceso de extracción de tokens semánticos",
|
66 |
+
"语义token提取进程输出信息": "Información de salida del proceso de extracción de tokens semánticos",
|
67 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Formateo del conjunto de datos de entrenamiento en un solo paso",
|
68 |
+
"开启一键三连": "¿Habilitar un solo paso de formateo?",
|
69 |
+
"终止一键三连": "Terminar el proceso de un solo paso de formateo",
|
70 |
+
"一键三连进程输出信息": "Información de salida del proceso de triple acción",
|
71 |
+
"1B-微调训练": "1B-Entrenamiento de ajuste fino",
|
72 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entrenamiento de SoVITS. Los archivos de modelo para compartir se encuentran en SoVITS_weights.",
|
73 |
+
"每张显卡的batch_size": "Tamaño de lote por tarjeta gráfica",
|
74 |
+
"总训练轮数total_epoch,不建议太高": "Número total de épocas de entrenamiento, no se recomienda demasiado alto",
|
75 |
+
"文本模块学习率权重": "Peso de la tasa de aprendizaje del módulo de texto",
|
76 |
+
"保存频率save_every_epoch": "Frecuencia de guardado (cada epoch)",
|
77 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "¿Guardar solo el último archivo ckpt para ahorrar espacio en disco?",
|
78 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el modelo final pequeño en la carpeta de pesos en cada punto de guardado?",
|
79 |
+
"开启SoVITS训练": "Iniciar entrenamiento de SoVITS",
|
80 |
+
"终止SoVITS训练": "Detener entrenamiento de SoVITS",
|
81 |
+
"SoVITS训练进程输出信息": "Información de salida del proceso de entrenamiento de SoVITS",
|
82 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entrenamiento de GPT. Los archivos de modelo para compartir se encuentran en GPT_weights.",
|
83 |
+
"总训练轮数total_epoch": "Número total de épocas de entrenamiento",
|
84 |
+
"开启GPT训练": "Iniciar entrenamiento de GPT",
|
85 |
+
"终止GPT训练": "Detener entrenamiento de GPT",
|
86 |
+
"GPT训练进程输出信息": "Información de salida del proceso de entrenamiento de GPT",
|
87 |
+
"1C-推理": "1C-Inferencia",
|
88 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Seleccione el modelo almacenado en SoVITS_weights y GPT_weights después del entrenamiento. Uno de ellos es el modelo base, útil para experimentar con TTS de 5 segundos sin entrenamiento.",
|
89 |
+
"*GPT模型列表": "*Lista de modelos GPT",
|
90 |
+
"*SoVITS模型列表": "*Lista de modelos SoVITS",
|
91 |
+
"GPU卡号,只能填1个整数": "Número de tarjeta GPU, solo se puede ingresar un número entero",
|
92 |
+
"刷新模型路径": "Actualizar la ruta del modelo",
|
93 |
+
"是否开启TTS推理WebUI": "¿Habilitar la interfaz web de inferencia TTS?",
|
94 |
+
"TTS推理WebUI进程输出信息": "Información de salida del proceso de interfaz web de inferencia TTS",
|
95 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Cambio de voz",
|
96 |
+
"施工中,请静候佳音": "En construcción, por favor espere pacientemente",
|
97 |
+
"TTS推理进程已开启": "Proceso de inferencia TTS iniciado",
|
98 |
+
"TTS推理进程已关闭": "Proceso de inferencia TTS cerrado",
|
99 |
+
"打标工具WebUI已开启": "Interfaz web de la herramienta de etiquetado iniciada",
|
100 |
+
"打标工具WebUI已关闭": "Interfaz web de la herramienta de etiquetado cerrada",
|
101 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo LICENSE en el directorio raíz para obtener más detalles.",
|
102 |
+
"*请上传并填写参考信息": "*Por favor, suba y complete la información de referencia",
|
103 |
+
"*请填写需要合成的目标文本": "*Por favor, complete el texto objetivo que necesita ser sintetizado",
|
104 |
+
"ASR任务开启:%s": "Tarea ASR iniciada: %s",
|
105 |
+
"GPT训练完成": "Entrenamiento de GPT completado",
|
106 |
+
"GPT训练开始:%s": "Entrenamiento de GPT iniciado: %s",
|
107 |
+
"SSL提取进程执行中": "Proceso de extracción SSL en ejecución",
|
108 |
+
"SSL提取进程结束": "Proceso de extracción SSL finalizado",
|
109 |
+
"SoVITS训练完成": "Entrenamiento de SoVITS completado",
|
110 |
+
"SoVITS训练开始:%s": "Entrenamiento de SoVITS iniciado: %s",
|
111 |
+
"一键三连中途报错": "Error intermedio en triple acción",
|
112 |
+
"一键三连进程结束": "Proceso de triple acción finalizado",
|
113 |
+
"中文": "Chino",
|
114 |
+
"凑50字一切": "Todo para alcanzar las 50 palabras",
|
115 |
+
"凑五句一切": "Todo para alcanzar las cinco frases",
|
116 |
+
"切分后文本": "Texto después de la división",
|
117 |
+
"切割执行中": "División en proceso",
|
118 |
+
"切割结束": "División finalizada",
|
119 |
+
"参考音频的文本": "Texto de referencia del audio",
|
120 |
+
"参考音频的语种": "Idioma del audio de referencia",
|
121 |
+
"合成语音": "Síntesis de voz",
|
122 |
+
"后续将支持混合语种编码文本输入。": "En el futuro, se admitirá la entrada de texto con codificación de idiomas mixtos.",
|
123 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "Ya hay una tarea ASR en curso, debe detenerla antes de comenzar la siguiente tarea",
|
124 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de GPT en curso, debe detenerla antes de comenzar la siguiente tarea",
|
125 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "Ya hay una tarea de extracción SSL en curso, debe detenerla antes de comenzar la siguiente tarea",
|
126 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de SoVITS en curso, debe detenerla antes de comenzar la siguiente tarea",
|
127 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Ya hay una tarea de triple acción en curso, debe detenerla antes de comenzar la siguiente tarea",
|
128 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "Ya hay una tarea de división en curso, debe detenerla antes de comenzar la siguiente tarea",
|
129 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "Ya hay una tarea de texto en curso, debe detenerla antes de comenzar la siguiente tarea",
|
130 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "Ya hay una tarea de extracción de tokens semánticos en curso, debe detenerla antes de comenzar la siguiente tarea",
|
131 |
+
"已终止ASR进程": "Proceso ASR terminado",
|
132 |
+
"已终止GPT训练": "Entrenamiento de GPT terminado",
|
133 |
+
"已终止SoVITS训练": "Entrenamiento de SoVITS terminado",
|
134 |
+
"已终止所有1a进程": "Se han terminado todos los procesos 1a",
|
135 |
+
"已终止所有1b进程": "Se han terminado todos los procesos 1b",
|
136 |
+
"已终止所有一键三连进程": "Se han terminado todos los procesos de triple acción",
|
137 |
+
"已终止所有切割进程": "Proceso de corte terminado",
|
138 |
+
"已终止所有语义token进程": "Proceso de extracción de tokens semánticos terminado",
|
139 |
+
"按中文句号。切": "Cortar según puntos en chino",
|
140 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Herramienta de división de texto. El resultado de la síntesis puede no ser bueno para textos demasiado largos, por lo que se recomienda dividirlos primero. La síntesis se realiza separando el texto según los saltos de línea y luego uniendo los fragmentos.",
|
141 |
+
"文本进程执行中": "Proceso de texto en ejecución",
|
142 |
+
"文本进程结束": "Proceso de texto finalizado",
|
143 |
+
"日文": "Japonés",
|
144 |
+
"英文": "Inglés",
|
145 |
+
"语义token提取进程执行中": "Proceso de extracción de tokens semánticos en ejecución",
|
146 |
+
"语义token提取进程结束": "Proceso de extracción de tokens semánticos finalizado",
|
147 |
+
"请上传参考音频": "Por favor, suba el audio de referencia",
|
148 |
+
"输入路径不存在": "La ruta de entrada no existe",
|
149 |
+
"输入路径存在但既不是文件也不是文件夹": "La ruta de entrada existe pero no es ni un archivo ni una carpeta",
|
150 |
+
"输出的语音": "Audio de salida",
|
151 |
+
"进度:1a-done": "Progreso: 1a-hecho",
|
152 |
+
"进度:1a-done, 1b-ing": "Progreso: 1a-hecho, 1b-en proceso",
|
153 |
+
"进度:1a-ing": "Progreso: 1a-en proceso",
|
154 |
+
"进度:1a1b-done": "Progreso: 1a1b-hecho",
|
155 |
+
"进度:1a1b-done, 1cing": "Progreso: 1a1b-hecho, 1c-en proceso",
|
156 |
+
"进度:all-done": "Progreso: todo hecho",
|
157 |
+
"需要合成的切分前文本": "Texto a sintetizar antes de la división",
|
158 |
+
"需要合成的文本": "Texto a sintetizar",
|
159 |
+
"需要合成的语种": "Idioma para la síntesis",
|
160 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si es >=3, se utiliza la mediana para filtrar los resultados del reconocimiento de altura tonal de harvest, el valor es el radio del filtro. Su uso puede debilitar los sonidos sordos.",
|
161 |
+
"A模型权重": "Peso del modelo A",
|
162 |
+
"A模型路径": "Ruta del modelo A",
|
163 |
+
"B模型路径": "Ruta del modelo B",
|
164 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
165 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, una línea por altura tonal, en lugar de F0 y cambio de tono predeterminados",
|
166 |
+
"Index Rate": "Tasa de índice",
|
167 |
+
"Onnx导出": "Exportar a Onnx",
|
168 |
+
"Onnx输出路径": "Ruta de salida de Onnx",
|
169 |
+
"RVC模型路径": "Ruta del modelo RVC",
|
170 |
+
"ckpt处理": "Procesamiento de ckpt",
|
171 |
+
"harvest进程数": "Número de procesos de harvest",
|
172 |
+
"index文件路径不可包含中文": "La ruta del archivo de índice no puede contener caracteres chinos",
|
173 |
+
"pth文件路径不可包含中文": "La ruta del archivo pth no puede contener caracteres chinos",
|
174 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuración de números de tarjeta rmvpe: usando - para separar los números de tarjeta de diferentes procesos de entrada, por ejemplo, 0-0-1 para ejecutar 2 procesos en la tarjeta 0 y 1 proceso en la tarjeta 1",
|
175 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Completa la configuración del experimento. Los datos del experimento se encuentran en logs, cada experimento en una carpeta, debe ingresar manualmente la ruta del nombre del experimento, que incluye la configuración del experimento, el registro y los archivos del modelo entrenado.",
|
176 |
+
"step1:正在处理数据": "Paso 1: Procesando datos",
|
177 |
+
"step2:正在提取音高&正在提取特征": "Paso 2: Extrayendo tono y características",
|
178 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorre automáticamente todos los archivos en la carpeta de entrenamiento que se pueden decodificar en archivos de audio y realiza la normalización de segmentos. Genera 2 carpetas de audio en el directorio del experimento; por ahora, solo es compatible con el entrenamiento de una sola persona.",
|
179 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Extraer tono con CPU (si el modelo incluye tono) y extraer características con GPU (seleccionar número de tarjeta)",
|
180 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Completa la configuración de entrenamiento y comienza a entrenar el modelo e indexar",
|
181 |
+
"step3a:正在训练模型": "Paso 3a: Entrenando el modelo",
|
182 |
+
"一键训练": "Entrenamiento con un clic",
|
183 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden ingresar archivos de audio por lotes, seleccionar uno, prioridad para leer carpetas",
|
184 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Usar - para separar los números de tarjeta utilizados como entrada, por ejemplo, 0-1-2 para usar las tarjetas 0, 1 y 2",
|
185 |
+
"伴奏人声分离&去混响&去回声": "Separación de acompañamiento y voz principal y eliminación de reverberación y eco",
|
186 |
+
"使用模型采样率": "Usar tasa de muestreo del modelo",
|
187 |
+
"使用设备采样率": "Usar tasa de muestreo del dispositivo",
|
188 |
+
"保存名": "Nombre de guardado",
|
189 |
+
"保存的文件名, 默认空为和源文件同名": "Nombre de archivo guardado, vacío por defecto para tener el mismo nombre que el archivo fuente",
|
190 |
+
"保存的模型名不带后缀": "Nombre del modelo guardado sin extensión",
|
191 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降��索引效果": "Proteger las consonantes claras y los sonidos de respiración, evitando artefactos como el desgarro eléctrico. No activar al tirar hasta 0.5, reducir para aumentar la protección, pero puede disminuir la efectividad del índice",
|
192 |
+
"修改": "Modificar",
|
193 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar información del modelo (solo compatible con archivos de modelo pequeños extraídos en la carpeta weights)",
|
194 |
+
"停止音频转换": "Detener la conversión de audio",
|
195 |
+
"全流程结束!": "¡Proceso completo!",
|
196 |
+
"刷新音色列表和索引路径": "Actualizar lista de tonos e índice de ruta",
|
197 |
+
"加载模型": "Cargar modelo",
|
198 |
+
"加载预训练底模D路径": "Cargar ruta del modelo D preentrenado",
|
199 |
+
"加载预训练底模G路径": "Cargar ruta del modelo G preentrenado",
|
200 |
+
"单次推理": "Inferencia única",
|
201 |
+
"卸载音色省显存": "Descargar tono para ahorrar memoria de video",
|
202 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Cambiar tono (número entero, cantidad de semitonos, subir octava 12 bajar octava -12)",
|
203 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Reprocesar y remuestrear a la tasa de muestreo final, 0 para no remuestrear",
|
204 |
+
"否": "No",
|
205 |
+
"启用相位声码器": "Activar codificador de fase",
|
206 |
+
"响应阈值": "Umbral de respuesta",
|
207 |
+
"响度因子": "Factor de sonoridad",
|
208 |
+
"处理数据": "Procesar datos",
|
209 |
+
"导出Onnx模型": "Exportar modelo Onnx",
|
210 |
+
"导出文件格式": "Formato de archivo de exportación",
|
211 |
+
"常见问题解答": "Preguntas frecuentes",
|
212 |
+
"常规设置": "Configuración general",
|
213 |
+
"开始音频转换": "Iniciar conversión de audio",
|
214 |
+
"性能设置": "Configuración de rendimiento",
|
215 |
+
"批量推理": "Inferencia por lotes",
|
216 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta de audio a convertir o cargue varios archivos de audio, la salida se realiza en la carpeta especificada (opt por defecto). ",
|
217 |
+
"指定输出主人声文件夹": "Especificar carpeta de salida de voz principal",
|
218 |
+
"指定输出文件夹": "Especificar carpeta de salida",
|
219 |
+
"指定输出非主人声文件夹": "Especificar carpeta de salida de no voz principal",
|
220 |
+
"推理时间(ms):": "Tiempo de inferencia (ms):",
|
221 |
+
"推理音色": "Tono de inferencia",
|
222 |
+
"提取": "Extraer",
|
223 |
+
"提取音高和处理数据使用的CPU进程数": "Número de procesadores de CPU utilizados para extraer tono y procesar datos",
|
224 |
+
"是": "Sí",
|
225 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Almacenar en caché todos los conjuntos de entrenamiento en la memoria de video. Pequeños conjuntos de datos menores a 10 minutos pueden almacenarse en caché para acelerar el entrenamiento; almacenar en caché grandes conjuntos de datos puede saturar la memoria de video y no acelerará mucho.",
|
226 |
+
"查看": "Ver",
|
227 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo compatible con archivos pequeños extraídos en la carpeta weights)",
|
228 |
+
"检索特征占比": "Proporción de características de búsqueda",
|
229 |
+
"模型": "Modelo",
|
230 |
+
"模型推理": "Inferencia de modelo",
|
231 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingresar la ruta del modelo grande en la carpeta logs), útil cuando se quiere dejar de entrenar a la mitad y el modelo no ha extraído automáticamente un modelo pequeño guardado, o para probar la situación del modelo intermedio",
|
232 |
+
"模型是否带音高指导": "¿El modelo incluye guía de altura tonal?",
|
233 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "¿El modelo incluye guía de altura tonal? (Necesario para cantar, opcional para voz)",
|
234 |
+
"模型是否带音高指导,1是0否": "¿El modelo incluye guía de altura tonal? 1 para sí, 0 para no",
|
235 |
+
"模型版本型号": "Versión y modelo del modelo",
|
236 |
+
"模型融合, 可用于测试音色融合": "Fusión de modelos, útil para probar la mezcla de tonos",
|
237 |
+
"模型路径": "Ruta del modelo",
|
238 |
+
"淡入淡出长度": "Longitud de desvanecimiento",
|
239 |
+
"版本": "Versión",
|
240 |
+
"特征提取": "Extracción de características",
|
241 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de búsqueda de características, si está vacío, se utiliza el resultado seleccionado en el menú desplegable",
|
242 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recomendación para cambiar de hombre a mujer +12 teclas, cambiar de mujer a hombre -12 teclas. Si la amplitud del rango tonal causa distorsión del tono, también puede ajustarse manualmente al rango tonal adecuado. ",
|
243 |
+
"目标采样率": "Tasa de muestreo objetivo",
|
244 |
+
"算法延迟(ms):": "Retardo del algoritmo (ms):",
|
245 |
+
"自动检测index路径,下拉式选择(dropdown)": "Detectar automáticamente la ruta del índice, seleccionar en menú desplegable",
|
246 |
+
"融合": "Fusión",
|
247 |
+
"要改的模型信息": "Información del modelo a cambiar",
|
248 |
+
"要置入的模型信息": "Información del modelo a insertar",
|
249 |
+
"训练": "Entrenar",
|
250 |
+
"训练模型": "Entrenar modelo",
|
251 |
+
"训练特征索引": "Entrenar índice de características",
|
252 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento terminado, puede ver registros de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento",
|
253 |
+
"请指定说话人id": "Por favor, especifique el ID del hablante",
|
254 |
+
"请选择index文件": "Seleccione el archivo index, por favor",
|
255 |
+
"请选择pth文件": "Seleccione el archivo pth, por favor",
|
256 |
+
"请选择说话人id": "Seleccione el ID del hablante, por favor",
|
257 |
+
"转换": "Convertir",
|
258 |
+
"输入实验名": "Ingrese el nombre del experimento",
|
259 |
+
"输入待处理音频文件夹路径": "Ingrese la ruta de la carpeta de audio a procesar",
|
260 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta de la carpeta de audio a procesar (puede copiarla desde la barra de direcciones del administrador de archivos)",
|
261 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo de audio a procesar (el formato predeterminado es un ejemplo correcto)",
|
262 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ingrese la proporción de fusión para reemplazar el sobre de volumen de origen con el sobre de volumen de salida; cuanto más cercano a 1, más se utiliza el sobre de salida",
|
263 |
+
"输入监听": "Entrada de monitoreo",
|
264 |
+
"输入训练文件夹路径": "Ingrese la ruta de la carpeta de entrenamiento",
|
265 |
+
"输入设备": "Dispositivo de entrada",
|
266 |
+
"输入降噪": "Entrada de reducción de ruido",
|
267 |
+
"输出信息": "Información de salida",
|
268 |
+
"输出变声": "Salida de cambio de voz",
|
269 |
+
"输出设备": "Dispositivo de salida",
|
270 |
+
"输出降噪": "Salida de reducción de ruido",
|
271 |
+
"输出音频(右下角三个点,点了可以下载)": "Salida de audio (los tres puntos en la esquina inferior derecha, haga clic para descargar)",
|
272 |
+
"选择.index文件": "Seleccione el archivo .index, por favor",
|
273 |
+
"选择.pth文件": "Seleccione el archivo .pth, por favor",
|
274 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU",
|
275 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU, rmvpe tiene el mejor efecto y consume poco GPU",
|
276 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: para voz, pm acelera con buena calidad de audio pero CPU deficiente, dio acelera pero harvest tiene mejor calidad aunque es más lento, rmvpe tiene el mejor efecto y consume poco CPU/GPU",
|
277 |
+
"采样率:": "Tasa de muestreo:",
|
278 |
+
"采样长度": "Longitud de muestreo",
|
279 |
+
"重载设备列表": "Recargar lista de dispositivos",
|
280 |
+
"音调设置": "Configuración de tono",
|
281 |
+
"音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice controladores del mismo tipo)",
|
282 |
+
"音高算法": "Algoritmo de tono",
|
283 |
+
"额外推理时长": "Tiempo adicional de inferencia"
|
284 |
+
}
|
i18n/locale/fr_FR.json
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, votre carte graphique n'est pas compatible avec l'entraînement.",
|
3 |
+
"UVR5已开启": "UVR5 est activé",
|
4 |
+
"UVR5已关闭": "UVR5 est désactivé",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité. <br>Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir <b>LICENSE</b> dans le répertoire racine pour plus de détails.",
|
6 |
+
"0-前置数据集获取工具": "0-Outil de récupération de jeu de données préalable",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Outil de séparation de la voix humaine et de l'accompagnement UVR5 & suppression de la réverbération et du retard",
|
8 |
+
"是否开启UVR5-WebUI": "Activer UVR5-WebUI",
|
9 |
+
"UVR5进程输出信息": "Informations de processus UVR5",
|
10 |
+
"0b-语音切分工具": "0b-Outil de découpage vocal",
|
11 |
+
".list标注文件的路径": "Chemin du fichier d'annotation .list",
|
12 |
+
"GPT模型列表": "Liste des modèles GPT",
|
13 |
+
"SoVITS模型列表": "Liste des modèles SoVITS",
|
14 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "Répertoire où sont enregistrés les fichiers audio après la découpe ! Chemin complet du fichier audio à lire = ce répertoire - nom du fichier correspondant à la forme d'onde dans le fichier liste (pas le chemin complet).",
|
15 |
+
"音频自动切分输入路径,可文件可文件夹": "Chemin d'entrée automatique de découpage audio, peut être un fichier ou un dossier",
|
16 |
+
"切分后的子音频的输出根目录": "Répertoire racine de sortie des sous-audios après découpage",
|
17 |
+
"怎么切": "Comment découper",
|
18 |
+
"不切": "Pas de découpe",
|
19 |
+
"凑四句一切": "Composez quatre phrases pour tout remplir",
|
20 |
+
"按英文句号.切": "Découpez par des points en anglais",
|
21 |
+
"threshold:音量小于这个值视作静音的备选切割点": "seuil: le volume inférieur à cette valeur est considéré comme un point de coupe silencieux alternatif",
|
22 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: longueur minimale de chaque segment, si le premier segment est trop court, il est continué avec le segment suivant jusqu'à dépasser cette valeur",
|
23 |
+
"min_interval:最短切割间隔": "min_interval: intervalle de coupe minimum",
|
24 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: comment calculer la courbe de volume, plus petit pour une précision plus élevée mais une charge de calcul plus élevée (ce n'est pas une meilleure précision)",
|
25 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: durée maximale de silence après la coupe",
|
26 |
+
"开启语音切割": "Activer le découpage vocal",
|
27 |
+
"终止语音切割": "Arrêter le découpage vocal",
|
28 |
+
"max:归一化后最大值多少": "max: valeur maximale après normalisation",
|
29 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion d'audio normalisé mélangé",
|
30 |
+
"切割使用的进程数": "Nombre de processus utilisés pour le découpage",
|
31 |
+
"语音切割进程输出信息": "Informations de processus de découpage vocal",
|
32 |
+
"0c-中文批量离线ASR工具": "0c-Outil chinois de transcription automatique hors ligne en masse",
|
33 |
+
"开启离线批量ASR": "Activer la transcription automatique hors ligne en masse",
|
34 |
+
"终止ASR进程": "Arrêter le processus ASR",
|
35 |
+
"批量ASR(中文only)输入文件夹路径": "Chemin du dossier d'entrée pour la transcription automatique hors ligne en masse (chinois uniquement)",
|
36 |
+
"ASR进程输出信息": "Informations de processus ASR",
|
37 |
+
"0d-语音文本校对标注工具": "0d-Outil de correction et d'annotation de texte vocal",
|
38 |
+
"是否开启打标WebUI": "Activer l'interface Web d'annotation",
|
39 |
+
"打标数据标注文件路径": "Chemin du fichier d'annotation des données annotées",
|
40 |
+
"打标工具进程输出信息": "Informations de processus de l'outil d'annotation",
|
41 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
42 |
+
"*实验/模型名": "*Nom de l'expérience/modèle",
|
43 |
+
"显卡信息": "Informations sur la carte graphique",
|
44 |
+
"预训练的SoVITS-G模型路径": "Chemin du modèle SoVITS-G pré-entraîné",
|
45 |
+
"预训练的SoVITS-D模型路径": "Chemin du modèle SoVITS-D pré-entraîné",
|
46 |
+
"预训练的GPT模型路径": "Chemin du modèle GPT pré-entraîné",
|
47 |
+
"1A-训练集格式化工具": "1A-Outil de formatage du jeu de données d'entraînement",
|
48 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Les fichiers et dossiers commençant par 23456 devraient être présents dans le répertoire logs/nom de l'expérience",
|
49 |
+
"*文本标注文件": "*Fichier d'annotation de texte",
|
50 |
+
"*训练集音频文件目录": "*Répertoire des fichiers audio d'entraînement",
|
51 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Répertoire des fichiers audio d'entraînement - concaténer avec les noms de fichiers correspondants dans le fichier de liste",
|
52 |
+
"1Aa-文本内容": "1Aa-Contenu du texte",
|
53 |
+
"GPU卡号以-分割,每个卡号一个进程": "Numéro de carte GPU séparé par des tirets, un processus par numéro de carte",
|
54 |
+
"预训练的中文BERT模型路径": "Chemin du modèle BERT chinois pré-entraîné",
|
55 |
+
"开启文本获取": "Activer l'extraction de texte",
|
56 |
+
"终止文本获取进程": "Arrêter le processus d'extraction de texte",
|
57 |
+
"文本进程输出信息": "Informations de processus de texte",
|
58 |
+
"1Ab-SSL自监督特征提取": "1Ab-Extraction de caractéristiques auto-supervisée SSL",
|
59 |
+
"预训练的SSL模型路径": "Chemin du modèle SSL pré-entraîné",
|
60 |
+
"开启SSL提取": "Activer l'extraction SSL",
|
61 |
+
"终止SSL提取进程": "Arrêter le processus d'extraction SSL",
|
62 |
+
"SSL进程输出信息": "Informations de processus SSL",
|
63 |
+
"1Ac-语义token提取": "1Ac-Extraction de jetons sémantiques",
|
64 |
+
"开启语义token提取": "Activer l'extraction de jetons sémantiques",
|
65 |
+
"终止语义token提取进程": "Arrêter le processus d'extraction de jetons sémantiques",
|
66 |
+
"语义token提取进程输出信息": "Informations de processus d'extraction de jetons sémantiques",
|
67 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Formatage en un clic du jeu de données d'entraînement",
|
68 |
+
"开启一键三连": "Activer l'un clic trois connexions",
|
69 |
+
"终止一键三连": "Arrêter l'un clic trois connexions",
|
70 |
+
"一键三连进程输出信息": "Informations de processus de l'un clic trois connexions",
|
71 |
+
"1B-微调训练": "1B-Entraînement fin",
|
72 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entraînement SoVITS. Les fichiers de modèle destinés au partage sont enregistrés sous SoVITS_weights.",
|
73 |
+
"每张显卡的batch_size": "Taille de lot par carte graphique",
|
74 |
+
"总训练轮数total_epoch,不建议太高": "Nombre total d'époques d'entraînement, pas recommandé d'être trop élevé",
|
75 |
+
"文本模块学习率权重": "Poids du taux d'apprentissage du module de texte",
|
76 |
+
"保存频率save_every_epoch": "Fréquence de sauvegarde (sauvegarder à chaque époque)",
|
77 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Sauvegarder uniquement le dernier fichier ckpt pour économiser de l'espace disque",
|
78 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Sauvegarder le petit modèle final dans le dossier weights à chaque point de sauvegarde",
|
79 |
+
"开启SoVITS训练": "Activer l'entraînement SoVITS",
|
80 |
+
"终止SoVITS训练": "Arrêter l'entraînement SoVITS",
|
81 |
+
"SoVITS训练进程输出信息": "Informations de processus d'entraînement SoVITS",
|
82 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entraînement GPT. Les fichiers de modèle destinés au partage sont enregistrés sous GPT_weights.",
|
83 |
+
"总训练轮数total_epoch": "Nombre total d'époques d'entraînement",
|
84 |
+
"开启GPT训练": "Activer l'entraînement GPT",
|
85 |
+
"终止GPT训练": "Arrêter l'entraînement GPT",
|
86 |
+
"GPT训练进程输出信息": "Informations de processus d'entraînement GPT",
|
87 |
+
"1C-推理": "1C-Inférence",
|
88 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choisissez le modèle entraîné stocké sous SoVITS_weights et GPT_weights. Par défaut, l'un d'eux est un modèle de base pour l'expérience de TTS Zero Shot de 5 secondes.",
|
89 |
+
"*GPT模型列表": "*Liste des modèles GPT",
|
90 |
+
"*SoVITS模型列表": "*Liste des modèles SoVITS",
|
91 |
+
"GPU卡号,只能填1个整数": "Numéro de carte GPU, ne peut contenir qu'un seul entier",
|
92 |
+
"刷新模型路径": "Actualiser le chemin du modèle",
|
93 |
+
"是否开启TTS推理WebUI": "Activer l'interface Web d'inférence TTS",
|
94 |
+
"TTS推理WebUI进程输出信息": "Informations de processus de l'interface Web d'inférence TTS",
|
95 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Modification de la voix",
|
96 |
+
"施工中,请静候佳音": "En construction, veuillez attendre patiemment",
|
97 |
+
"TTS推理进程已开启": "Le processus d'inférence TTS est en cours",
|
98 |
+
"TTS推理进程已关闭": "Le processus d'inférence TTS est terminé",
|
99 |
+
"打标工具WebUI已开启": "L'interface Web de l'outil d'annotation est en cours",
|
100 |
+
"打标工具WebUI已关闭": "L'interface Web de l'outil d'annotation est terminée",
|
101 |
+
"本软件以MIT协���开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité. Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir LICENSE dans le répertoire racine pour plus de détails.",
|
102 |
+
"*请上传并填写参考信息": "*Veuillez télécharger et remplir les informations de référence",
|
103 |
+
"*请填写需要合成的目标文本": "*Veuillez remplir le texte cible à synthétiser",
|
104 |
+
"ASR任务开启:%s": "Tâche ASR activée : %s",
|
105 |
+
"GPT训练完成": "Entraînement GPT terminé",
|
106 |
+
"GPT训练开始:%s": "Entraînement GPT commencé : %s",
|
107 |
+
"SSL提取进程执行中": "Processus d'extraction SSL en cours",
|
108 |
+
"SSL提取进程结束": "Processus d'extraction SSL terminé",
|
109 |
+
"SoVITS训练完成": "Entraînement SoVITS terminé",
|
110 |
+
"SoVITS训练开始:%s": "Entraînement SoVITS commencé : %s",
|
111 |
+
"一键三连中途报错": "Erreur intermédiaire dans la séquence d'un clic trois connexions",
|
112 |
+
"一键三连进程结束": "Processus de séquence d'un clic trois connexions terminé",
|
113 |
+
"中文": "Chinois",
|
114 |
+
"凑50字一切": "Assembler 50 mots tout",
|
115 |
+
"凑五句一切": "Assembler cinq phrases tout",
|
116 |
+
"切分后文本": "Texte après découpage",
|
117 |
+
"切割执行中": "Découpage en cours",
|
118 |
+
"切割结束": "Découpage terminé",
|
119 |
+
"参考音频的文本": "Texte de l'audio de référence",
|
120 |
+
"参考音频的语种": "Langue de l'audio de référence",
|
121 |
+
"合成语音": "Synthèse vocale",
|
122 |
+
"后续将支持混合语种编码文本输入。": "Prise en charge ultérieure du codage de texte avec des langues mixtes.",
|
123 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "Une tâche ASR est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
124 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement GPT est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
125 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction SSL est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
126 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement SoVITS est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
127 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Une tâche d'une séquence d'un clic trois connexions est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
128 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "Une tâche de découpage est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
129 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "Une tâche de texte est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
130 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction de jetons sémantiques est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.",
|
131 |
+
"已终止ASR进程": "Processus ASR arrêté",
|
132 |
+
"已终止GPT训练": "Entraînement GPT arrêté",
|
133 |
+
"已终止SoVITS训练": "Entraînement SoVITS arrêté",
|
134 |
+
"已终止所有1a进程": "Tous les processus 1a ont été arrêtés",
|
135 |
+
"已终止所有1b进程": "Tous les processus 1b ont été arrêtés",
|
136 |
+
"已终止所有一键三连进程": "Tous les processus d'une séquence d'un clic trois connexions ont été arrêtés",
|
137 |
+
"已终止所有切割进程": "Tous les processus de découpage ont été arrêtés",
|
138 |
+
"已终止所有语义token进程": "Tous les processus de jetons sémantiques ont été arrêtés",
|
139 |
+
"按中文句号。切": "Couper selon les points en chinois.",
|
140 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Outil de découpage de texte. Un texte trop long peut ne pas donner un bon résultat, donc il est recommandé de le couper d'abord s'il est trop long. La synthèse se fera en séparant le texte par les sauts de ligne puis en les assemblant.",
|
141 |
+
"文本进程执行中": "Processus de texte en cours",
|
142 |
+
"文本进程结束": "Processus de texte terminé",
|
143 |
+
"日文": "Japonais",
|
144 |
+
"英文": "Anglais",
|
145 |
+
"语义token提取进程执行中": "Processus d'extraction de jetons s��mantiques en cours",
|
146 |
+
"语义token提取进程结束": "Processus d'extraction de jetons sémantiques terminé",
|
147 |
+
"请上传参考音频": "Veuillez télécharger l'audio de référence",
|
148 |
+
"输入路径不存在": "Le chemin d'entrée n'existe pas",
|
149 |
+
"输入路径存在但既不是文件也不是文件夹": "Le chemin d'entrée existe mais n'est ni un fichier ni un dossier",
|
150 |
+
"输出的语音": "Audio de sortie",
|
151 |
+
"进度:1a-done": "Progression : 1a-done",
|
152 |
+
"进度:1a-done, 1b-ing": "Progression : 1a-done, 1b-ing",
|
153 |
+
"进度:1a-ing": "Progression : 1a-ing",
|
154 |
+
"进度:1a1b-done": "Progression : 1a1b-done",
|
155 |
+
"进度:1a1b-done, 1cing": "Progression : 1a1b-done, 1cing",
|
156 |
+
"进度:all-done": "Progression : all-done",
|
157 |
+
"需要合成的切分前文本": "Texte préalable à la synthèse",
|
158 |
+
"需要合成的文本": "Texte à synthétiser",
|
159 |
+
"需要合成的语种": "Langue de synthèse requise",
|
160 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >= 3, utilisez le résultat de la reconnaissance de hauteur de récolte avec un filtre médian, la valeur est le rayon du filtre, son utilisation peut atténuer les sons sourds",
|
161 |
+
"A模型权重": "Poids du modèle A",
|
162 |
+
"A模型路径": "Chemin du modèle A",
|
163 |
+
"B模型路径": "Chemin du modèle B",
|
164 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
165 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0, optionnel, une ligne par hauteur de ton, remplace F0 et la hauteur de ton par défaut",
|
166 |
+
"Index Rate": "Taux d'index",
|
167 |
+
"Onnx导出": "Exportation Onnx",
|
168 |
+
"Onnx输出路径": "Chemin d'exportation Onnx",
|
169 |
+
"RVC模型路径": "Chemin du modèle RVC",
|
170 |
+
"ckpt处理": "Traitement des points de contrôle",
|
171 |
+
"harvest进程数": "Nombre de processus de récolte",
|
172 |
+
"index文件路径不可包含中文": "Le chemin du fichier d'index ne peut pas contenir de caractères chinois",
|
173 |
+
"pth文件路径不可包含中文": "Le chemin du fichier pth ne peut pas contenir de caractères chinois",
|
174 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte rmvpe : séparez les numéros de carte utilisés en entrée par des tirets, par exemple 0-0-1 signifie 2 processus sur la carte 0 et 1 processus sur la carte 1",
|
175 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration de l'expérience. Les données de l'expérience sont stockées dans le dossier logs, chaque expérience a son propre dossier. Vous devez entrer manuellement le chemin du nom de l'expérience, qui contient la configuration de l'expérience, les journaux et les fichiers de modèle entraînés.",
|
176 |
+
"step1:正在处理数据": "Étape 1 : Traitement des données en cours",
|
177 |
+
"step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur tonale et des caractéristiques en cours",
|
178 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers décodables en audio dans le dossier d'entraînement et normalisation par découpage. Deux dossiers wav sont générés dans le répertoire de l'expérience. Actuellement, seule la formation individuelle est prise en charge.",
|
179 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Extraction de la hauteur tonale avec le CPU (si le modèle a une hauteur tonale) et extraction des caractéristiques avec le GPU (choisissez le numéro de la carte)",
|
180 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et commencez l'entraînement du modèle et de l'index",
|
181 |
+
"step3a:正在训练模型": "Étape 3a : Entraînement du modèle en cours",
|
182 |
+
"一键训练": "Entraînement en un clic",
|
183 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Également possible d'entrer en lot des fichiers audio, au choix, privilégiez la lecture du dossier",
|
184 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numéros de carte utilisés en entrée séparés par des tirets, par exemple 0-1-2 Utilisez les cartes 0, 1 et 2",
|
185 |
+
"伴奏人声分离&去混响&去回声": "Séparation de la voix et de l'accompagnement, suppression de la réverbération et de l'écho",
|
186 |
+
"使用模型采样率": "Taux d'échantillonnage du modèle",
|
187 |
+
"使用设备采样率": "Taux d'échantillonnage de l'appareil",
|
188 |
+
"保存名": "Nom de sauvegarde",
|
189 |
+
"保存的文件名, 默认空为和源文件同名": "Nom de fichier sauvegardé, par défaut vide pour avoir le même nom que le fichier source",
|
190 |
+
"保存的模型名不带后缀": "Nom du modèle sauvegardé sans suffixe",
|
191 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes claires et les sons de respiration, éviter les artefacts tels que le déchirement du son électronique, tirer à 0.5 pour désactiver, diminuer pour augmenter la protection mais cela peut réduire l'efficacité de l'indexation",
|
192 |
+
"修改": "Modifier",
|
193 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)",
|
194 |
+
"停止音频转换": "Arrêter la conversion audio",
|
195 |
+
"全流程结束!": "Processus complet terminé !",
|
196 |
+
"刷新音色列表和索引路径": "Actualiser la liste des timbres et les chemins d'index",
|
197 |
+
"加载模型": "Charger le modèle",
|
198 |
+
"加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D",
|
199 |
+
"加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G",
|
200 |
+
"单次推理": "Inférence unique",
|
201 |
+
"卸载音色省显存": "Décharger le timbre pour économiser la mémoire vidéo",
|
202 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Changer la tonalité (entier, quantité de demi-tons, monter d'une octave 12, descendre d'une octave -12)",
|
203 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Re-échantillonnage en post-traitement à la fréquence d'échantillonnage finale, 0 pour ne pas effectuer de re-échantillonnage",
|
204 |
+
"否": "Non",
|
205 |
+
"启用相位声码器": "Activer le codeur de phase",
|
206 |
+
"响应阈值": "Seuil de réponse",
|
207 |
+
"响度因子": "Facteur de volume sonore",
|
208 |
+
"处理数据": "Traiter les données",
|
209 |
+
"导出Onnx模型": "Exporter le modèle Onnx",
|
210 |
+
"导出文件格式": "Format d'exportation du fichier",
|
211 |
+
"常见问题解答": "Questions fréquemment posées",
|
212 |
+
"常规设置": "Paramètres généraux",
|
213 |
+
"开始音频转换": "Démarrer la conversion audio",
|
214 |
+
"性能设置": "Paramètres de performance",
|
215 |
+
"批量推理": "Inférence en lot",
|
216 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot, entrez le dossier audio à convertir, ou téléchargez plusieurs fichiers audio, les fichiers convertis seront enregistrés dans le dossier spécifié (opt par défaut).",
|
217 |
+
"指定输出主人声文件夹": "Spécifier le dossier de sortie pour la voix principale",
|
218 |
+
"指定输出文件夹": "Spécifier le dossier de sortie",
|
219 |
+
"指定输出非主人声文件夹": "Spécifier le dossier de sortie pour la non-voix principale",
|
220 |
+
"推理时间(ms):": "Temps d'inférence (ms) :",
|
221 |
+
"推理音色": "Timbre d'inférence",
|
222 |
+
"提取": "Extraire",
|
223 |
+
"提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour extraire la hauteur tonale et traiter les données",
|
224 |
+
"是": "Oui",
|
225 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache ou non tous les ensembles d'entraînement dans la mémoire vidéo. Pour les petites données de moins de 10 minutes, la mise en cache peut accélérer l'entraînement, mais pour les grandes données, la mise en cache peut épuiser la mémoire vidéo sans améliorer considérablement la vitesse.",
|
226 |
+
"查看": "Voir",
|
227 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Voir les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)",
|
228 |
+
"检索特征占比": "Pourcentage des caractéristiques extraites",
|
229 |
+
"模型": "Modèle",
|
230 |
+
"模型推理": "Inférence du modèle",
|
231 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin du modèle volumineux sous le dossier logs), utilisé lorsque l'entraînement est à mi-chemin, que vous ne voulez pas continuer l'entraînement, que le modèle n'a pas été automatiquement extrait et sauvegardé en tant que petit fichier, ou que vous souhaitez tester le modèle intermédiaire.",
|
232 |
+
"模型是否带音高指导": "Le modèle inclut-il un guidage en hauteur tonale",
|
233 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Le modèle inclut-il un guidage en hauteur tonale (nécessaire pour le chant, facultatif pour la parole)",
|
234 |
+
"模型是否带音高指导,1是0否": "Le modèle inclut-il un guidage en hauteur tonale, 1 pour oui, 0 pour non",
|
235 |
+
"模型版本型号": "Numéro de version du modèle",
|
236 |
+
"模型融合, 可用于测试音色融合": "Fusion de modèles, utilisée pour tester la fusion des timbres",
|
237 |
+
"模型路径": "Chemin du modèle",
|
238 |
+
"淡入淡出长度": "Longueur du fondu enchaîné",
|
239 |
+
"版本": "Version",
|
240 |
+
"特征提取": "Extraction des caractéristiques",
|
241 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Chemin du fichier de bibliothèque de recherche de caractéristiques, laisser vide pour utiliser le résultat de la liste déroulante",
|
242 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommandation pour la transformation homme vers femme +12 clés, femme vers homme -12 clés, ajustez vous-même si l'étendue du son explose et provoque une distorsion de la voix.",
|
243 |
+
"目标采样率": "Taux d'échantillonnage cible",
|
244 |
+
"算法延迟(ms):": "Retard de l'algorithme (ms):",
|
245 |
+
"自动检测index路径,下拉式选择(dropdown)": "Détection automatique du chemin de l'index, choix dans la liste déroulante",
|
246 |
+
"融合": "Fusion",
|
247 |
+
"要改的模型信息": "Informations du modèle à modifier",
|
248 |
+
"要置入的模型信息": "Informations du modèle à insérer",
|
249 |
+
"训练": "Entraînement",
|
250 |
+
"训练模型": "Entraîner le modèle",
|
251 |
+
"训练特征索引": "Entraîner l'index des caractéristiques",
|
252 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé, vous pouvez consulter les journaux d'entraînement de la console ou le fichier train.log dans le dossier d'expérience",
|
253 |
+
"请指定说话人id": "Veuillez spécifier l'ID du locuteur",
|
254 |
+
"请选择index文件": "Veuillez choisir le fichier d'index",
|
255 |
+
"请选择pth文件": "Veuillez choisir le fichier pth",
|
256 |
+
"请选择说话人id": "Veuillez choisir l'ID du locuteur",
|
257 |
+
"转换": "Conversion",
|
258 |
+
"输入实验名": "Nom de l'expérience d'entrée",
|
259 |
+
"输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter",
|
260 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers)",
|
261 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin du fichier audio à traiter (par défaut, c'est un exemple de format correct)",
|
262 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Entrez le taux de fusion pour remplacer l'enveloppe de volume source par l'enveloppe de volume de sortie, plus proche de 1, plus l'enveloppe de sortie est utilisée",
|
263 |
+
"输入监听": "Entrée d'écoute",
|
264 |
+
"输入训练文件夹路径": "Entrez le chemin du dossier d'entraînement",
|
265 |
+
"输入设备": "Entrée de l'appareil",
|
266 |
+
"输入降噪": "Entrée de réduction du bruit",
|
267 |
+
"输出信息": "Sortie d'information",
|
268 |
+
"输出变声": "Sortie de la transformation de la voix",
|
269 |
+
"输出设备": "Sortie de l'appareil",
|
270 |
+
"输出降噪": "Sortie de réduction du bruit",
|
271 |
+
"输出音频(右下角三个点,点了可以下载)": "Sortie audio (trois points en bas à droite, cliquez pour télécharger)",
|
272 |
+
"选择.index文件": "Choisissez le fichier .index",
|
273 |
+
"选择.pth文件": "Choisissez le fichier .pth",
|
274 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU",
|
275 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU, rmvpe a le meilleur effet et utilise légèrement le GPU",
|
276 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Choisissez l'algorithme d'extraction de hauteur tonale : utilisez pm pour accélérer l'entrée de la voix, une voix de haute qualité mais nécessite une meilleure CPU ; utilisez dio pour accélérer, harvest a une meilleure qualité mais est lent, rmvpe a le meilleur effet et utilise légèrement la CPU/GPU",
|
277 |
+
"采样率:": "Taux d'échantillonnage:",
|
278 |
+
"采样长度": "Longueur d'échantillonnage",
|
279 |
+
"重载设备列表": "Recharger la liste des appareils",
|
280 |
+
"音调设置": "Paramètres de tonalité",
|
281 |
+
"音频设备(请使用同种类驱动)": "Appareil audio (veuillez utiliser un pilote de même type)",
|
282 |
+
"音高算法": "Algorithme de hauteur tonale",
|
283 |
+
"额外推理时长": "Durée d'inférence supplémentaire"
|
284 |
+
}
|
i18n/locale/it_IT.json
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Purtroppo non hai una scheda grafica utilizzabile per supportare il tuo addestramento",
|
3 |
+
"UVR5已开启": "UVR5 è attivato",
|
4 |
+
"UVR5已关闭": "UVR5 è disattivato",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. <br>Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale<b>LICENSE</b> per i dettagli.",
|
6 |
+
"0-前置数据集获取工具": "0-Strumento di acquisizione del dataset preliminare",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Strumento di separazione voce e accompagnamento UVR5 & Rimozione riverbero e ritardo",
|
8 |
+
"是否开启UVR5-WebUI": "Attivare UVR5-WebUI",
|
9 |
+
"UVR5进程输出信息": "Informazioni sull'output del processo UVR5",
|
10 |
+
"0b-语音切分工具": "0b-Strumento di segmentazione vocale",
|
11 |
+
"音频自动切分输入路径,可文件可文件夹": "Percorso di input per la segmentazione automatica dell'audio, può essere un file o una cartella",
|
12 |
+
"切分后的子音频的输出根目录": "Directory radice di output per gli audio segmentati",
|
13 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold: Punto di taglio alternativo considerato silenzioso se il volume è inferiore a questo valore",
|
14 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: Lunghezza minima di ogni segmento. Se il primo segmento è troppo corto, verrà unito agli segmenti successivi fino a superare questo valore",
|
15 |
+
"min_interval:最短切割间隔": "min_interval: Intervallo minimo di taglio",
|
16 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: Come calcolare la curva del volume. Più piccolo è, maggiore è la precisione ma aumenta la complessità computazionale (non significa che una maggiore precisione dà risultati migliori)",
|
17 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: Massima durata del silenzio dopo il taglio",
|
18 |
+
"开启语音切割": "Attivare la segmentazione vocale",
|
19 |
+
"终止语音切割": "Terminare la segmentazione vocale",
|
20 |
+
"max:归一化后最大值多少": "max: Massimo valore dopo la normalizzazione",
|
21 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Quanta proporzione dell'audio normalizzato deve essere miscelata",
|
22 |
+
"切割使用的进程数": "Numero di processi utilizzati per il taglio",
|
23 |
+
"语音切割进程输出信息": "Informazioni sull'output del processo di segmentazione vocale",
|
24 |
+
"0c-中文批量离线ASR工具": "0c-Strumento di ASR offline batch in cinese",
|
25 |
+
"开启离线批量ASR": "Attivare ASR offline batch",
|
26 |
+
"终止ASR进程": "Terminare il processo ASR",
|
27 |
+
"批量ASR(中文only)输入文件夹路径": "Percorso della cartella di input per ASR offline batch (solo cinese)",
|
28 |
+
"ASR进程输出信息": "Informazioni sull'output del processo ASR",
|
29 |
+
"0d-语音文本校对标注工具": "0d-Strumento di correzione e annotazione testo vocale",
|
30 |
+
"是否开启打标WebUI": "Attivare l'interfaccia utente Web di annotazione",
|
31 |
+
"打标数据标注文件路径": "Percorso del file di annotazione dei dati contrassegnati",
|
32 |
+
"打标工具进程输出信息": "Informazioni sull'output del processo di annotazione",
|
33 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
34 |
+
"*实验/模型名": "*Nome dell'esperimento/modello",
|
35 |
+
"显卡信息": "Informazioni sulla scheda grafica",
|
36 |
+
"预训练的SoVITS-G模型路径": "Percorso del modello preaddestrato SoVITS-G",
|
37 |
+
"预训练的SoVITS-D模型路径": "Percorso del modello preaddestrato SoVITS-D",
|
38 |
+
"预训练的GPT模型路径": "Percorso del modello preaddestrato GPT",
|
39 |
+
"1A-训练集格式化工具": "1A-Strumento di formattazione del set di addestramento",
|
40 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Nella cartella logs/nome dell'esperimento dovrebbero esserci file e cartelle che iniziano con 23456",
|
41 |
+
"*文本标注文件": "*File di annotazione del testo",
|
42 |
+
"*训练集音频文件目录": "*Directory dei file audio del set di addestramento",
|
43 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directory dei file audio del set di addestramento, concatenare il nome del file corrispondente nella lista",
|
44 |
+
"1Aa-文本内容": "1Aa-Contenuto del testo",
|
45 |
+
"GPU卡号以-分割,每个卡号一个进程": "Numero di GPU separati da '-'; ogni numero corrisponde a un processo",
|
46 |
+
"预训练的中文BERT模型路径": "Percorso del modello BERT cinese preaddestrato",
|
47 |
+
"开启文本获取": "Attivare l'estrazione del testo",
|
48 |
+
"终止文本获取进程": "Terminare il processo di estrazione del testo",
|
49 |
+
"文本进程输出信息": "Informazioni sull'output del processo di estrazione del testo",
|
50 |
+
"1Ab-SSL自监督特征提取": "1Ab-Estrazione di caratteristiche auto-supervisionata SSL",
|
51 |
+
"预训练的SSL模型路径": "Percorso del modello SSL preaddestrato",
|
52 |
+
"开启SSL提取": "Attivare l'estrazione SSL",
|
53 |
+
"终止SSL提取进程": "Terminare il processo di estrazione SSL",
|
54 |
+
"SSL进程输出信息": "Informazioni sull'output del processo SSL",
|
55 |
+
"1Ac-语义token提取": "1Ac-Estrazione del token semantico",
|
56 |
+
"开启语义token提取": "Attivare l'estrazione del token semantico",
|
57 |
+
"终止语义token提取进程": "Terminare il processo di estrazione del token semantico",
|
58 |
+
"语义token提取进程输出信息": "Informazioni sull'output del processo di estrazione del token semantico",
|
59 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-Strumento di formattazione del set di addestramento con tre passaggi",
|
60 |
+
"开启一键三连": "Attivare la formattazione con tre passaggi",
|
61 |
+
"终止一键三连": "Terminare la formattazione con tre passaggi",
|
62 |
+
"一键三连进程输出信息": "Informazioni sull'output del processo di 'One Click Three Connect'",
|
63 |
+
"1B-微调训练": "1B-Allenamento di affinamento",
|
64 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Allenamento di SoVITS. I file del modello destinati alla condivisione sono salvati in SoVITS_weights.",
|
65 |
+
"每张显卡的batch_size": "Batch size per ogni scheda grafica",
|
66 |
+
"总训练轮数total_epoch,不建议太高": "Numero totale di epoche di addestramento, non raccomandato troppo alto",
|
67 |
+
"文本模块学习率权重": "Peso del tasso di apprendimento del modulo di testo",
|
68 |
+
"保存频率save_every_epoch": "Frequenza di salvataggio ogni epoca",
|
69 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Salvare solo il file ckpt più recente per risparmiare spazio su disco",
|
70 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Salvare il modello finale più piccolo nella cartella weights ad ogni punto di salvataggio",
|
71 |
+
"开启SoVITS训练": "Attivare l'allenamento di SoVITS",
|
72 |
+
"终止SoVITS训练": "Terminare l'allenamento di SoVITS",
|
73 |
+
"SoVITS训练进程输出信息": "Informazioni sull'output del processo di allenamento di SoVITS",
|
74 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Allenamento di GPT. I file del modello destinati alla condivisione sono salvati in GPT_weights.",
|
75 |
+
"总训练轮数total_epoch": "Numero totale di epoche di addestramento",
|
76 |
+
"开启GPT训练": "Attivare l'allenamento di GPT",
|
77 |
+
"终止GPT训练": "Terminare l'allenamento di GPT",
|
78 |
+
"GPT训练进程输出信息": "Informazioni sull'output del processo di allenamento di GPT",
|
79 |
+
"1C-推理": "1C-Inferenza",
|
80 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Scegli il modello salvato in SoVITS_weights e GPT_weights dopo l'addestramento. Uno di default è il modello di base, utilizzato per l'esperienza di Zero Shot TTS in 5 secondi.",
|
81 |
+
"*GPT模型列表": "*Lista dei modelli GPT",
|
82 |
+
"*SoVITS模型列表": "*Lista dei modelli SoVITS",
|
83 |
+
"GPU卡号,只能填1个整数": "Numero della scheda grafica, può essere inserito solo un numero intero",
|
84 |
+
"刷新模型路径": "Aggiorna il percorso del modello",
|
85 |
+
"是否开启TTS推理WebUI": "Attivare l'interfaccia utente Web per l'inferenza TTS",
|
86 |
+
"TTS推理WebUI进程输出信息": "Informazioni sull'output del processo dell'interfaccia utente Web per l'inferenza TTS",
|
87 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voce modificata",
|
88 |
+
"施工中,请静候佳音": "In costruzione, attendi pazientemente le buone notizie",
|
89 |
+
"TTS推理进程已开启": "Il processo di inferenza TTS è stato avviato",
|
90 |
+
"TTS推理进程已关闭": "Il processo di inferenza TTS è stato chiuso",
|
91 |
+
"打标工具WebUI已开启": "L'interfaccia utente Web dello strumento di annotazione è stata avviata",
|
92 |
+
"打标工具WebUI已关闭": "L'interfaccia utente Web dello strumento di annotazione è stata chiusa",
|
93 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale LICENSE per i dettagli.",
|
94 |
+
"*请上传并填写参考信息": "*Carica e compila le informazioni di riferimento",
|
95 |
+
"*请填写需要合成的目标文本": "*Compila il testo di destinazione da sintetizzare",
|
96 |
+
"ASR任务开启:%s": "Attività ASR avviata: %s",
|
97 |
+
"GPT训练完成": "Allenamento di GPT completato",
|
98 |
+
"GPT训练开始:%s": "Inizio dell'allenamento di GPT: %s",
|
99 |
+
"SSL提取进程执行中": "Processo di estrazione SSL in corso",
|
100 |
+
"SSL提取进程结束": "Processo di estrazione SSL completato",
|
101 |
+
"SoVITS训练完成": "Allenamento di SoVITS completato",
|
102 |
+
"SoVITS训练开始:%s": "Inizio dell'allenamento di SoVITS: %s",
|
103 |
+
"一键三连中途报错": "Errore durante 'One Click Three Connect'",
|
104 |
+
"一键三连进程结束": "Processo di 'One Click Three Connect' completato",
|
105 |
+
"中文": "Cinese",
|
106 |
+
"凑50字一切": "Riempire con 50 caratteri per tutto",
|
107 |
+
"凑五句一切": "Riempire con cinque frasi per tutto",
|
108 |
+
"切分后文本": "Testo dopo il taglio",
|
109 |
+
"切割执行中": "Taglio in corso",
|
110 |
+
"切割结束": "Taglio completato",
|
111 |
+
"参考音频的文本": "Testo dell'audio di riferimento",
|
112 |
+
"参考音频的语种": "Lingua dell'audio di riferimento",
|
113 |
+
"合成语音": "Sintesi vocale",
|
114 |
+
"后续将支持混合语种编码文本输入。": "In futuro sarà supportata l'input di testi con codifica mista di lingue.",
|
115 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "È già in corso un'attività ASR. Devi interromperla prima di avviare una nuova attività.",
|
116 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di GPT. Devi interromperla prima di avviare una nuova attività.",
|
117 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "È già in corso un'attività di estrazione SSL. Devi interromperla prima di avviare una nuova attività.",
|
118 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di SoVITS. Devi interromperla prima di avviare una nuova attività.",
|
119 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "È già in corso un'attività di 'One Click Three Connect'. Devi interromperla prima di avviare una nuova attività.",
|
120 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "È già in corso un'attività di taglio. Devi interromperla prima di avviare una nuova attività.",
|
121 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "È già in corso un'attività di testo. Devi interromperla prima di avviare una nuova attività.",
|
122 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "È già in corso un'attività di estrazione di token semantici. Devi interromperla prima di avviare una nuova attività.",
|
123 |
+
"已终止ASR进程": "Il processo ASR è stato terminato",
|
124 |
+
"已终止GPT训练": "L'allenamento di GPT è stato terminato",
|
125 |
+
"已终止SoVITS训练": "Allenamento SoVITS terminato",
|
126 |
+
"已终止所有1a进程": "Processi 1a terminati",
|
127 |
+
"已终止所有1b进程": "Processi 1b terminati",
|
128 |
+
"已终止所有一键三连进程": "Processi One Click Three Connect terminati",
|
129 |
+
"已终止所有切割进程": "Processi di taglio terminati",
|
130 |
+
"已终止所有语义token进程": "Processi di estrazione token semantici terminati",
|
131 |
+
"按中文句号。切": "Taglia secondo il punto cinese.",
|
132 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Strumento di divisione del testo. I testi troppo lunghi potrebbero non avere un buon effetto di sintesi, quindi è consigliabile dividerli prima della sintesi. La sintesi verrà separata in base ai ritorni a capo nel testo e successivamente ricomposta.",
|
133 |
+
"文本进程执行中": "Processo di testo in esecuzione",
|
134 |
+
"文本进程结束": "Processo di testo terminato",
|
135 |
+
"日文": "Giapponese",
|
136 |
+
"英文": "Inglese",
|
137 |
+
"语义token提取进程执行中": "Processo di estrazione token semantici in esecuzione",
|
138 |
+
"语义token提取进程结束": "Processo di estrazione token semantici terminato",
|
139 |
+
"请上传参考音频": "Carica l'audio di riferimento",
|
140 |
+
"输入路径不存在": "Il percorso di input non esiste",
|
141 |
+
"输入路径存在但既不是文件也不是文件夹": "Il percorso di input esiste ma non è né un file né una cartella",
|
142 |
+
"输出的语音": "Audio di output",
|
143 |
+
"进度:1a-done": "Progresso: 1a-done",
|
144 |
+
"进度:1a-done, 1b-ing": "Progresso: 1a-done, 1b-ing",
|
145 |
+
"进度:1a-ing": "Progresso: 1a-ing",
|
146 |
+
"进度:1a1b-done": "Progresso: 1a1b-done",
|
147 |
+
"进度:1a1b-done, 1cing": "Progresso: 1a1b-done, 1cing",
|
148 |
+
"进度:all-done": "Progresso: all-done",
|
149 |
+
"需要合成的切分前文本": "Testo da sintetizzare prima del taglio",
|
150 |
+
"需要合成的文本": "Testo da sintetizzare",
|
151 |
+
"需要合成的语种": "Lingua da sintetizzare",
|
152 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Se >=3, usa il filtraggio mediano sui risultati del riconoscimento dell'altezza di harvest, il valore è il raggio del filtro. L'uso di questo valore può attenuare i suoni muti.",
|
153 |
+
"A模型权重": "Peso del modello A",
|
154 |
+
"A模型路径": "Percorso del modello A",
|
155 |
+
"B模型路径": "Percorso del modello B",
|
156 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
157 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File della curva F0, opzionale, una riga per un'altezza, sostituisce il F0 predefinito e le variazioni di tono",
|
158 |
+
"Index Rate": "Tasso di indice",
|
159 |
+
"Onnx导出": "Esporta in Onnx",
|
160 |
+
"Onnx输出路径": "Percorso di output Onnx",
|
161 |
+
"RVC模型路径": "Percorso del modello RVC",
|
162 |
+
"ckpt处理": "Elaborazione del ckpt",
|
163 |
+
"harvest进程数": "Numero di processi harvest",
|
164 |
+
"index文件路径不可包含中文": "Il percorso del file di indice non può contenere caratteri cinesi",
|
165 |
+
"pth文件路径不可包含中文": "Il percorso del file pth non può contenere caratteri cinesi",
|
166 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configurazione dei numeri delle schede rmvpe: separa con - i numeri delle schede dei diversi processi utilizzati in input. Ad esempio, 0-0-1 utilizza 2 processi sulla scheda 0 e 1 processo sulla scheda 1",
|
167 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passo 1: Compila la configurazione sperimentale. I dati sperimentali sono salvati in logs, ogni esperimento in una cartella. È necessario inserire manualmente il percorso del nome dell'esperimento, contenente configurazione sperimentale, log e file di modello addestrato.",
|
168 |
+
"step1:正在处理数据": "Passo 1: Elaborazione dei dati in corso",
|
169 |
+
"step2:正在提取音高&正在提取特征": "Passo 2: Estrazione dell'altezza e delle caratteristiche in corso",
|
170 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passo 2a: Attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio e li normalizza a fette. Nella cartella sperimentale vengono generate due cartelle wav; Al momento supporta solo l'addestramento singolo.",
|
171 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passo 2b: Usa la CPU per estrarre l'altezza (se il modello la include) e la GPU per estrarre le caratteristiche (scegliendo il numero della scheda)",
|
172 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Passo 3: Compila le impostazioni di addestramento, inizia ad addestrare il modello e l'indice",
|
173 |
+
"step3a:正在训练模型": "Passo 3a: Addestramento del modello in corso",
|
174 |
+
"一键训练": "Allenamento One-Click",
|
175 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "È possibile anche inserire file audio in batch, una delle due opzioni, con priorità alla lettura della cartella",
|
176 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numeri delle schede separati da - utilizzati in input, ad esempio 0-1-2, utilizzando le schede 0, 1 e 2",
|
177 |
+
"伴奏人声分离&去混响&去回声": "Separazione tra accompagnamento e voce & Rimozione dell'eco & Rimozione dell'eco",
|
178 |
+
"使用模型采样率": "Frequenza di campionamento del modello",
|
179 |
+
"使用设备采样率": "Frequenza di campionamento del dispositivo",
|
180 |
+
"保存名": "Nome del salvataggio",
|
181 |
+
"保存的文件名, 默认空为和源文件同名": "Nome del file salvato, vuoto di default è lo stesso del file sorgente",
|
182 |
+
"保存的模型名不带后缀": "Nome del modello salvato senza estensione",
|
183 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protegge le consonanti chiare e i suoni di respirazione, evita artifact come la rottura del suono elettronico, tirare a 0.5 per disattivare, abbassare per aumentare la protezione ma potrebbe ridurre l'effetto di indicizzazione",
|
184 |
+
"修改": "Modifica",
|
185 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)",
|
186 |
+
"停止音频转换": "Interrompi la conversione audio",
|
187 |
+
"全流程结束!": "Processo completo!",
|
188 |
+
"刷新音色列表和索引路径": "Aggiorna la lista dei toni e il percorso dell'indice",
|
189 |
+
"加载模型": "Carica il modello",
|
190 |
+
"加载预训练底模D路径": "Carica il percorso del modello di fondo preaddestrato D",
|
191 |
+
"加载预训练底模G路径": "Carica il percorso del modello di fondo preaddestrato G",
|
192 |
+
"单次推理": "Inferenza singola",
|
193 |
+
"卸载音色省显存": "Scarica il tono per risparmiare memoria video",
|
194 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Modifica del tono (numero intero, quantità di semitoni, 12 per un'ottava in su, -12 per un'ottava in giù)",
|
195 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Ricampiona in modo post-elaborazione alla frequenza di campionamento finale, 0 per non eseguire il ricampionamento",
|
196 |
+
"否": "No",
|
197 |
+
"启用相位声码器": "Abilita il codificatore di fase",
|
198 |
+
"响应阈值": "Soglia di risposta",
|
199 |
+
"响度因子": "Fattore di risposta",
|
200 |
+
"处理数据": "Elaborazione dati",
|
201 |
+
"导出Onnx模型": "Esporta il modello Onnx",
|
202 |
+
"导出文件格式": "Formato di esportazione del file",
|
203 |
+
"常见问题解答": "Domande frequenti",
|
204 |
+
"常规设置": "Impostazioni generali",
|
205 |
+
"开始音频转换": "Inizia la conversione audio",
|
206 |
+
"性能设置": "Impostazioni di performance",
|
207 |
+
"批量推理": "Inferenza batch",
|
208 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione in batch, inserisci la cartella con i file audio da convertire o carica più file audio, i file convertiti verranno salvati nella cartella specificata (per impostazione predefinita opt).",
|
209 |
+
"指定输出主人声文件夹": "Specifica la cartella di output per la voce principale",
|
210 |
+
"指定输出文件夹": "Specifica la cartella di output",
|
211 |
+
"指定输出非主人声文件夹": "Specifica la cartella di output per la non voce principale",
|
212 |
+
"推理时间(ms):": "Tempo di inferenza (ms):",
|
213 |
+
"推理音色": "Tono di inferenza",
|
214 |
+
"提取": "Estrai",
|
215 |
+
"提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione dell'altezza del suono e l'elaborazione dei dati",
|
216 |
+
"是": "Sì",
|
217 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Se memorizzare nella cache tutto l'insieme di addestramento nella memoria video. Piccoli set di dati inferiori a 10 minuti possono essere memorizzati nella cache per accelerare l'addestramento, la memorizzazione nella cache di grandi set di dati può esaurire la memoria video e non accelerare di molto",
|
218 |
+
"查看": "Visualizza",
|
219 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)",
|
220 |
+
"检索特征占比": "Percentuale di caratteristiche di ricerca",
|
221 |
+
"模型": "Modello",
|
222 |
+
"模型推理": "Inferenza del modello",
|
223 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserisci il percorso del modello di grandi dimensioni nella cartella logs), adatto per i modelli a metà addestramento che non si desidera continuare ad addestrare, i modelli non estratti automaticamente vengono salvati come modelli di piccole dimensioni o per testare la situazione del modello intermedio",
|
224 |
+
"模型是否带音高指导": "Il modello include o meno la guida all'altezza del suono",
|
225 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Il modello include o meno la guida all'altezza del suono (necessario per il canto, opzionale per la voce)",
|
226 |
+
"模型是否带音高指导,1是0否": "Il modello include o meno la guida all'altezza del suono, 1 sì, 0 no",
|
227 |
+
"模型版本型号": "Versione e modello del modello",
|
228 |
+
"模型融合, 可用于测试音色融合": "Fusione dei modelli, utile per testare la fusione dei toni",
|
229 |
+
"模型路径": "Percorso del modello",
|
230 |
+
"淡入淡出长度": "Lunghezza del fading in/fading out",
|
231 |
+
"版本": "Versione",
|
232 |
+
"特征提取": "Estrazione delle caratteristiche",
|
233 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file della libreria di ricerca delle caratteristiche, se vuoto usa la selezione a discesa",
|
234 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Consigliato +12 toni per la trasformazione da uomo a donna, -12 toni per la trasformazione da donna a uomo. Se l'intervallo tonale esplode causando distorsioni nel timbro, è possibile regolarlo manualmente nell'intervallo adatto.",
|
235 |
+
"目标采样率": "Frequenza di campionamento obiettivo",
|
236 |
+
"算法延迟(ms):": "Ritardo dell'algoritmo (ms):",
|
237 |
+
"自动检测index路径,下拉式选择(dropdown)": "Rilevamento automatico del percorso dell'indice, selezione a discesa (dropdown)",
|
238 |
+
"融合": "Fusione",
|
239 |
+
"要改的模型信息": "Informazioni del modello da modificare",
|
240 |
+
"要置入的模型信息": "Informazioni del modello da inserire",
|
241 |
+
"训练": "Addestramento",
|
242 |
+
"训练模��": "Addestra il modello",
|
243 |
+
"训练特征索引": "Addestramento dell'indice delle caratteristiche",
|
244 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Fine dell'addestramento, puoi visualizzare il registro di addestramento sulla console o il file train.log nella cartella dell'esperimento",
|
245 |
+
"请指定说话人id": "Si prega di specificare l'ID del parlante",
|
246 |
+
"请选择index文件": "Seleziona il file di indice",
|
247 |
+
"请选择pth文件": "Seleziona il file pth",
|
248 |
+
"请选择说话人id": "Seleziona l'ID del parlante",
|
249 |
+
"转换": "Converti",
|
250 |
+
"输入实验名": "Inserisci il nome dell'esperimento",
|
251 |
+
"输入待处理音频文件夹路径": "Inserisci il percorso della cartella dei file audio da elaborare",
|
252 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Inserisci il percorso della cartella dei file audio da elaborare (copialo dalla barra degli indirizzi del gestore dei file)",
|
253 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Inserisci il percorso del file audio da elaborare (esempio di formato corretto predefinito)",
|
254 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Inserisci la proporzione di fusione della sostituzione dell'involucro del volume di ingresso con l'involucro del volume di uscita, più vicino a 1 più utilizza l'involucro di uscita",
|
255 |
+
"输入监听": "Inserisci l'ascolto",
|
256 |
+
"输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento",
|
257 |
+
"输入设备": "Dispositivo di input",
|
258 |
+
"输入降噪": "Inserisci la riduzione del rumore",
|
259 |
+
"输出信息": "Informazioni di output",
|
260 |
+
"输出变声": "Variazione della voce in output",
|
261 |
+
"输出设备": "Dispositivo di output",
|
262 |
+
"输出降噪": "Riduzione del rumore in output",
|
263 |
+
"输出音频(右下角三个点,点了可以下载)": "Audio in output (tre punti nell'angolo in basso a destra, fare clic per scaricare)",
|
264 |
+
"选择.index文件": "Seleziona il file .index",
|
265 |
+
"选择.pth文件": "Seleziona il file .pth",
|
266 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU",
|
267 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU, rmvpe ha il miglior effetto ed è leggermente esigente sulla GPU",
|
268 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono: l'input vocale può utilizzare pm per velocizzare, la qualità del suono è elevata ma richiede molte risorse della CPU; l'input vocale può utilizzare dio per velocizzare, harvest ha una qualità del suono migliore ma è lento, rmvpe ha il miglior effetto ed è leggermente esigente sulla CPU/GPU",
|
269 |
+
"采样率:": "Frequenza di campionamento:",
|
270 |
+
"采样长度": "Lunghezza del campionamento",
|
271 |
+
"重载设备列表": "Ricarica la lista dei dispositivi",
|
272 |
+
"音调设置": "Impostazioni del tono",
|
273 |
+
"音频设备(请使用同种类驱动)": "Dispositivo audio (usa driver della stessa categoria)",
|
274 |
+
"音高算法": "Algoritmo dell'altezza del suono",
|
275 |
+
"额外推理时长": "Tempo di inferenza extra"
|
276 |
+
}
|
i18n/locale/ja_JP.json
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "残念ながら、トレーニングをサポートする利用可能なグラフィックカードがありません",
|
3 |
+
"UVR5已开启": "UVR5がオンになっています",
|
4 |
+
"UVR5已关闭": "UVR5がオフになっています",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェアを使用する者、ソフトウェアから導出される音声を広める者は、自己責任で行ってください。<br>この条件を認めない場合、ソフトウェアパッケージ内の任意のコードやファイルを使用または引用することはできません。詳細はルートディレクトリの<b>LICENSE</b>を参照してください。",
|
6 |
+
"0-前置数据集获取工具": "0-データセット取得ツールの事前処理",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5ボーカルアカンパニメント分離&リバーブおよびディレイ除去ツール",
|
8 |
+
"是否开启UVR5-WebUI": "UVR5-WebUIをオンにしますか",
|
9 |
+
"UVR5进程输出信息": "UVR5プロセスの出力情報",
|
10 |
+
"0b-语音切分工具": "0b-音声分割ツール",
|
11 |
+
".list标注文件的路径": ".listアノテーションファイルのパス",
|
12 |
+
"GPT模型列表": "GPTモデルリスト",
|
13 |
+
"SoVITS模型列表": "SoVITSモデルリスト",
|
14 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "音声を切り取った後の音声が保存されているディレクトリ!読み取られる音声ファイルの完全なパス=このディレクトリ-連結-リストファイル内の波形に対応するファイル名(フルパスではない)。",
|
15 |
+
"音频自动切分输入路径,可文件可文件夹": "オーディオの自動分割入力パス、ファイルまたはフォルダを指定できます",
|
16 |
+
"切分后的子音频的输出根目录": "分割後のサブオーディオの出力ルートディレクトリ",
|
17 |
+
"怎么切": "どうやって切るか",
|
18 |
+
"不切": "切らない",
|
19 |
+
"凑四句一切": "4つの文で埋める",
|
20 |
+
"按英文句号.切": "英文のピリオドで切ってください",
|
21 |
+
"threshold:音量小于这个值视作静音的备选切割点": "閾値:この値未満の音量は静音と見なされ、代替のカットポイントとして扱われます",
|
22 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:各セグメントの最小長さ。最初のセグメントが短すぎる場合、連続して後続のセグメントに接続され、この値を超えるまで続きます。",
|
23 |
+
"min_interval:最短切割间隔": "min_interval:最短カット間隔",
|
24 |
+
"hop_size:怎么算音量曲线,越小精度越大計算量越高(不是精度越大效果越好)": "hop_size:音量曲線を計算する方法。値が小さいほど精度が高くなり、計算量が増加します(精度が高いほど効果が良いわけではありません)。",
|
25 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切り終えた後、最大でどれだけ静かにするか",
|
26 |
+
"开启语音切割": "音声の分割を開始",
|
27 |
+
"终止语音切割": "音声の分割を停止",
|
28 |
+
"max:归一化后最大值多少": "max:正規化後の最大値",
|
29 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:正規化後のオーディオが入る割合",
|
30 |
+
"切割使用的进程数": "分割に使用されるプロセス数",
|
31 |
+
"语音切割进程输出信息": "音声分割プロセスの出力情報",
|
32 |
+
"0c-中文批量离线ASR工具": "0c-中国語バッチオフラインASRツール",
|
33 |
+
"开启离线批量ASR": "オフラインバッチASRを開始",
|
34 |
+
"终止ASR进程": "ASRプロセスを停止",
|
35 |
+
"批量ASR(中文only)输入文件夹路径": "バッチASR(中国語のみ)の入力フォルダパス",
|
36 |
+
"ASR进程输出信息": "ASRプロセスの出力情報",
|
37 |
+
"0d-语音文本校对标注工具": "0d-音声テキストの校正アノテーションツール",
|
38 |
+
"是否开启打标WebUI": "WebUIを使用したアノテーションを開始しますか",
|
39 |
+
"打标数据标注文件路径": "アノテーションデータのファイルパス",
|
40 |
+
"打标工具进程输出信息": "アノテーションツールプロセスの出力情報",
|
41 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
42 |
+
"*实验/模型名": "*実験/モデル名",
|
43 |
+
"显卡信息": "グラフィックカード��報",
|
44 |
+
"预训练的SoVITS-G模型路径": "事前にトレーニングされたSoVITS-Gモデルのパス",
|
45 |
+
"预训练的SoVITS-D模型路径": "事前にトレーニングされたSoVITS-Dモデルのパス",
|
46 |
+
"预训练的GPT模型路径": "事前にトレーニングされたGPTモデルのパス",
|
47 |
+
"1A-训练集格式化工具": "1A-トレーニングデータのフォーマットツール",
|
48 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/実験名ディレクトリには23456で始まるファイルとフォルダが含まれている必要があります",
|
49 |
+
"*文本标注文件": "*テキスト注釈ファイル",
|
50 |
+
"*训练集音频文件目录": "*トレーニングデータのオーディオファイルディレクトリ",
|
51 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "トレーニングデータのオーディオファイルディレクトリ。リストファイル内の波形に対応するファイル名を連結します。",
|
52 |
+
"1Aa-文本内容": "1Aa-テキストの内容",
|
53 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPUカード番号はハイフンで区切り、各カード番号ごとに1つのプロセスが実行されます",
|
54 |
+
"预训练的中文BERT模型路径": "事前にトレーニングされた中文BERTモデルのパス",
|
55 |
+
"开启文本获取": "テキストの取得を開始",
|
56 |
+
"终止文本获取进程": "テキスト取得プロセスを停止",
|
57 |
+
"文本进程输出信息": "テキストプロセスの出力情報",
|
58 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSLセルフスーパーバイズ特徴抽出",
|
59 |
+
"预训练的SSL模型路径": "事前にトレーニングされたSSLモデルのパス",
|
60 |
+
"开启SSL提取": "SSL抽出を開始",
|
61 |
+
"终止SSL提取进程": "SSL抽出プロセスを停止",
|
62 |
+
"SSL进程输出信息": "SSLプロセスの出力情報",
|
63 |
+
"1Ac-语义token提取": "1Ac-セマンティックトークン抽出",
|
64 |
+
"开启语义token提取": "セマンティックトークン抽出を開始",
|
65 |
+
"终止语义token提取进程": "セマンティックトークン抽出プロセスを停止",
|
66 |
+
"语义token提取进程输出信息": "セマンティックトークン抽出プロセスの出力情報",
|
67 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-トレーニングデータのフォーマットワンクリック三連",
|
68 |
+
"开启一键三连": "ワンクリック三連を開始",
|
69 |
+
"终止一键三连": "ワンクリック三連を停止",
|
70 |
+
"一键三连进程输出信息": "ワンクリック三連プロセスの出力情報",
|
71 |
+
"1B-微调训练": "1B-ファインチューニングトレーニング",
|
72 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITSトレーニング。共有用のモデルファイルはSoVITS_weightsディレクトリに出力されます。",
|
73 |
+
"每张显卡的batch_size": "各グラフィックカードのバッチサイズ",
|
74 |
+
"总训练轮数total_epoch,不建议太高": "総トレーニングエポック数total_epoch、高すぎないようにお勧めします",
|
75 |
+
"文本模块学习率权重": "テキストモジュールの学習率の重み",
|
76 |
+
"保存频率save_every_epoch": "保存頻度save_every_epoch",
|
77 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "最新のckptファイルのみを保存してディスクスペースを節約するかどうか",
|
78 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時間点で最終的な小さなモデルをweightsフォルダに保存するかどうか",
|
79 |
+
"开启SoVITS训练": "SoVITSトレーニングを開始",
|
80 |
+
"终止SoVITS训练": "SoVITSトレーニングを停止",
|
81 |
+
"SoVITS训练进程输出信息": "SoVITSトレーニングプロセスの出力情報",
|
82 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPTトレーニング。共有用のモデルファイルはGPT_weightsディレクトリに出力されます。",
|
83 |
+
"总训练轮数total_epoch": "総トレーニングエポック数total_epoch",
|
84 |
+
"开启GPT训练": "GPTトレーニングを開始",
|
85 |
+
"终止GPT训练": "GPTトレーニングを停止",
|
86 |
+
"GPT训练进程输出信息": "GPTトレーニングプロセスの出力情報",
|
87 |
+
"1C-推理": "1C-推論",
|
88 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weightsおよびGPT_weightsに保存されたモデルを選択します。デフォルトのものはプレトレインであり、ゼロショットTTSを体験できます。",
|
89 |
+
"*GPT模型列表": "*GPTモデルリスト",
|
90 |
+
"*SoVITS模型列表": "*SoVITSモデルリスト",
|
91 |
+
"GPU卡号,只能填1个整数": "GPU番号、1つの整数しか入力できません",
|
92 |
+
"刷新模型路径": "モデルのパスを更新",
|
93 |
+
"是否开启TTS推理WebUI": "TTS推論WebUIを開く",
|
94 |
+
"TTS推理WebUI进程输出信息": "TTS推論WebUIプロセスの出力情報",
|
95 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-ボイスチェンジャー",
|
96 |
+
"施工中,请静候佳音": "施工中、お待ちください",
|
97 |
+
"TTS推理进程已开启": "TTS推論プロセスが開始されました",
|
98 |
+
"TTS推理进程已关闭": "TTS推論プロセスが終了しました",
|
99 |
+
"打标工具WebUI已开启": "校正ツールWebUIが開始されました",
|
100 |
+
"打标工具WebUI已关闭": "校正ツールWebUIが終了しました",
|
101 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェアを使用する者、ソフトウェアからエクスポートされた音声を伝播する者は、自己の責任を負います。この条件を受け入れない場合は、ソフトウェアパッケージ内の任意のコードやファイルを使用または引用することはできません。詳細はLICENSEを参照してください。",
|
102 |
+
"*请上传并填写参考信息": "*参照情報をアップロードして記入してください",
|
103 |
+
"*请填写需要合成的目标文本": "*合成が必要な対象のテキストを記入してください",
|
104 |
+
"ASR任务开启:%s": "ASRタスクが開始されました:%s",
|
105 |
+
"GPT训练完成": "GPTトレーニングが完了しました",
|
106 |
+
"GPT训练开始:%s": "GPTトレーニングが開始されました:%s",
|
107 |
+
"SSL提取进程执行中": "SSL抽出プロセス実行中",
|
108 |
+
"SSL提取进程结束": "SSL抽出プロセスが終了しました",
|
109 |
+
"SoVITS训练完成": "SoVITSトレーニングが完了しました",
|
110 |
+
"SoVITS训练开始:%s": "SoVITSトレーニングが開始されました:%s",
|
111 |
+
"一键三连中途报错": "ワンクリックフォーマット中にエラーが発生しました",
|
112 |
+
"一键三连进程结束": "ワンクリックフォーマットが終了しました",
|
113 |
+
"中文": "中国語",
|
114 |
+
"凑50字一切": "50文字ずつカット",
|
115 |
+
"凑五句一切": "5つの文ごとにカット",
|
116 |
+
"切分后文本": "分割後のテキスト",
|
117 |
+
"切割执行中": "オーディオの分割中",
|
118 |
+
"切割结束": "オーディオの分割が完了しました",
|
119 |
+
"参考音频的文本": "参照オーディオのテキスト",
|
120 |
+
"参考音频的语种": "参照オーディオの言語",
|
121 |
+
"合成语音": "推論を開始",
|
122 |
+
"后续将支持混合语种编码文本输入。": "後で混合言語コードテキストの入力がサポートされるようになります。",
|
123 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "すでに進行中のASRタスクがあります。次のタスクを開始する前に停止してください",
|
124 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "すでに進行中のGPTトレーニングタスクがあります。次のタスクを開始する前に停止してください",
|
125 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "すでに進行中のSSL抽出タスクがあります。次のタスクを開始する前に停止してください",
|
126 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "すでに進行中のSoVITSトレーニングタスクがあります。次のタスクを開始する前に停止してください",
|
127 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "すでに進行中のワンクリックフォーマットタスクがあります。次のタスクを開始する前に停止してください",
|
128 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "すでに進行中のオーディオの分割タスクがあります。次のタスクを開始する前に停止してください",
|
129 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "すでに進行中のTTS校正タスクがあります。次のタスクを開始する前に停止してください",
|
130 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "すでに進行中の意味トークン抽出タスクがあります。次のタスクを開始する前に停止してください",
|
131 |
+
"已终止ASR进程": "ASRタスクが終了しました",
|
132 |
+
"已终止GPT训练": "GPTトレーニングが終了しました",
|
133 |
+
"已终止SoVITS训练": "SoVITSトレーニングが終了しました",
|
134 |
+
"已终止所有1a进程": "すべての1aタスクが終了しました",
|
135 |
+
"已终止所有1b进程": "すべての1bタスクが終了しました",
|
136 |
+
"已终止所有一键三连进程": "すべてのワンクリックフォーマットタスクが終了しました",
|
137 |
+
"已终止所有切割进程": "すべてのオーディオの分割タスクが終了しました",
|
138 |
+
"已终止所有语义token进程": "すべての意味トークンタスクが終了しました",
|
139 |
+
"按中文句号。切": "中国語の句点でカット",
|
140 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "テキストスライサーツール。長文を変換すると効果が不安定になる可能性があるため、長文の場合は事前に切り分けることをお勧めします。推論時には、テキストを個別に推論し、それを組み合わせて再構築します。",
|
141 |
+
"文本进程执行中": "テキスト処理中",
|
142 |
+
"文本进程结束": "テキスト処理が終了しました",
|
143 |
+
"日文": "日本語",
|
144 |
+
"英文": "英語",
|
145 |
+
"语义token提取进程执行中": "意味トークン抽出実行中",
|
146 |
+
"语义token提取进程结束": "意味トークン抽出が終了しました",
|
147 |
+
"请上传参考音频": "参照オーディオをアップロードしてください",
|
148 |
+
"输入路径不存在": "入力パスが存在しません",
|
149 |
+
"输入路径存在但既不是文件也不是文件夹": "入力ディレクトリが存在しますが、ファイルでもフォルダでもありません",
|
150 |
+
"输出的语音": "推論結果",
|
151 |
+
"进度:1a-done": "進捗:1a完了",
|
152 |
+
"进度:1a-done, 1b-ing": "進捗:1a完了、1b進行中",
|
153 |
+
"进度:1a-ing": "進捗:1a進行中",
|
154 |
+
"进度:1a1b-done": "進捗:1a1b完了",
|
155 |
+
"进度:1a1b-done, 1cing": "進捗:1a1b完了、1c進行中",
|
156 |
+
"进度:all-done": "進捗:all-done",
|
157 |
+
"需要合成的切分前文本": "推論が必要な分割前のテキスト",
|
158 |
+
"需要合成的文本": "推論テキスト",
|
159 |
+
"需要合成的语种": "推論テキストの言語",
|
160 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "3以上の場合:収穫音高の認識結果に中央値フィルタリングを適用します。値はフィルターの半径を表し、息遣いを減少させることができます。",
|
161 |
+
"A模型权重": "モデルAの重み (w):",
|
162 |
+
"A模型路径": "モデルAのパス:",
|
163 |
+
"B模型路径": "モデルBのパス:",
|
164 |
+
"E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src",
|
165 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線ファイル(オプション)。1行に1つの音高があります。デフォルトのF0とピッチ変調の代わりに使用します:",
|
166 |
+
"Index Rate": "インデックスレート",
|
167 |
+
"Onnx导出": "Onnxエクスポート",
|
168 |
+
"Onnx输出路径": "Onnxエクスポートパス:",
|
169 |
+
"RVC模型路径": "RVCモデルパス:",
|
170 |
+
"ckpt处理": "ckpt処理",
|
171 |
+
"harvest进程数": "harvestピッチアルゴリズムに使用するCPUプロセス数",
|
172 |
+
"index文件路径不可包含中文": "インデックスファイルパスには中文を含めないでください",
|
173 |
+
"pth文件路径不可包含中文": "pthファイルパスには中文を含めないでください",
|
174 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "異なるプロセスカードの入力に使用するGPUインデックスを'-'で区切って入力します。例:0-0-1はGPU0で2つのプロセスを実行し、GPU1で1つのプロセスを実行します",
|
175 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "ステップ1:実験構成を記入します。実験データは「logs」フォルダに保存され、各実験には別々のフォルダがあります。実験名のパスを手動で入力する必要があり、実験構成、ログ、トレーニングされたモデルファイルが含まれています。",
|
176 |
+
"step1:正在处理数据": "ステップ1:データ処理中",
|
177 |
+
"step2:正在提取音高&正在提取特征": "ステップ2:ピッチ抽出と特徴抽出中",
|
178 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "ステップ2a:トレーニングフォルダ内のデコード可能なすべてのファイルを自動的にトラバースし、スライス正規化を実行します。実験ディレクトリに2つのwavフォルダが生成されます。現時点では、単一の歌手/スピーカーのトレーニングのみがサポートされています。",
|
179 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "ステップ2b:CPUを使用してピッチを抽出します��モデルにピッチがある場合)、GPUを使用して特徴を抽出します(GPUインデックスを選択します):",
|
180 |
+
"step3: 填写训练设置, 开始训练模型和索引": "ステップ3:トレーニング設定を入力し、モデルとインデックスのトレーニングを開始します",
|
181 |
+
"step3a:正在训练模型": "ステップ3a:モデルのトレーニングが開始されました",
|
182 |
+
"一键训练": "ワンクリックトレーニング",
|
183 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "複数のオーディオファイルもインポートできます。フォルダパスが存在する場合、この入力は無視されます。",
|
184 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "GPUインデックスを'-'で区切って入力します。例:0-1-2はGPU 0、1、および2を使用します。",
|
185 |
+
"伴奏人声分离&去混响&去回声": "ボーカル/伴奏の分離と残響の除去",
|
186 |
+
"使用模型采样率": "使用するモデルのサンプルレート",
|
187 |
+
"使用设备采样率": "使用デバイスのサンプルレート",
|
188 |
+
"保存名": "保存名:",
|
189 |
+
"保存的文件名, 默认空为和源文件同名": "保存ファイル名(デフォルト:元のファイルと同じ):",
|
190 |
+
"保存的模型名不带后缀": "保存されるモデル名(拡張子なし):",
|
191 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "清濁音と呼吸音を保護し、電子音楽の撕裂などのアーティファクトを防ぎます。0.5まで引っ張ると無効になり、保護力を高めるには値を下げますが、索引の精度が低下する可能性があります。",
|
192 |
+
"修改": "変更",
|
193 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報の変更('weights'フォルダから抽出された小さなモデルファイルのみサポート)",
|
194 |
+
"停止音频转换": "オーディオ変換を停止",
|
195 |
+
"全流程结束!": "すべてのプロセスが完了しました!",
|
196 |
+
"刷新音色列表和索引路径": "ボイスリストとインデックスパスをリフレッシュ",
|
197 |
+
"加载模型": "モデルの読み込み",
|
198 |
+
"加载预训练底模D路径": "事前にトレーニングされたベースモデルDのパスをロード:",
|
199 |
+
"加载预训练底模G路径": "事前にトレーニングされたベースモデルGのパスをロード:",
|
200 |
+
"单次推理": "単一推論",
|
201 |
+
"卸载音色省显存": "GPUメモリを節約するためにボイスをアンロード:",
|
202 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "トランスポーズ(整数、半音の数、8度上げ: 12、8度下げ: -12):",
|
203 |
+
"后处理重采样至最终采样率,0为不进行重采样": "後処理でオーディオを最終のサンプルレートに再サンプリングします。リサンプリングを行わない場合は0に設定してください:",
|
204 |
+
"否": "いいえ",
|
205 |
+
"启用相位声码器": "位相音声コーダーを有効にする",
|
206 |
+
"响应阈值": "応答閾値",
|
207 |
+
"响度因子": "音量ファクター",
|
208 |
+
"处理数据": "データ処理",
|
209 |
+
"导出Onnx模型": "Onnxモデルのエクスポート",
|
210 |
+
"导出文件格式": "エクスポートファイル形式",
|
211 |
+
"常见问题解答": "よくある質問 (FAQ)",
|
212 |
+
"常规设置": "一般的な設定",
|
213 |
+
"开始音频转换": "オーディオ変換を開始",
|
214 |
+
"性能设置": "性能設定",
|
215 |
+
"批量推理": "一括推論",
|
216 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換。変換するオーディオファイルが含まれるフォルダを入力するか、複数のオーディオファイルをアップロードします。変換されたオーディオは指定されたフォルダ (デフォルト: 'opt') に出力されます。",
|
217 |
+
"指定输出主人声文件夹": "ボーカルの出力フォルダを指定:",
|
218 |
+
"指定输出文件夹": "出力フォルダの指定:",
|
219 |
+
"指定输出非主人声文件夹": "伴奏の出力フォルダを指定:",
|
220 |
+
"推理时间(ms):": "推論時間 (ms):",
|
221 |
+
"推理音色": "推論ボイス:",
|
222 |
+
"提取": "抽出",
|
223 |
+
"提取音高和处理数据使用的CPU进程数": "ピッチ抽出およびデータ処理に使用されるCPUプロセスの数:",
|
224 |
+
"是": "はい",
|
225 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "すべてのトレーニングセットをGPUメモリにキャッシュするかどうか。小さなデータセット (10分以下) をキャッシュするとトレーニングが高速化されますが、大きなデータセットをキャッシュするとGPUメモリが消費され、あまり速度が向上しないか���しれません:",
|
226 |
+
"查看": "表示",
|
227 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報を表示します ( 'weights' フォルダから抽出された小さなモデルファイルにのみ対応):",
|
228 |
+
"检索特征占比": "特徴の検索比率 (アクセントの強度を制御、高すぎるとアーティファクトが発生します):",
|
229 |
+
"模型": "モデル",
|
230 |
+
"模型推理": "モデル推論",
|
231 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "モデル抽出 ( 'logs' フォルダ内の大きなファイルモデルのパスを入力)。トレーニングを途中で停止して手動で小さなモデルファイルを抽出および保存したい場合、または中間モデルをテストしたい場合に使用します:",
|
232 |
+
"模型是否带音高指导": "モデルにピッチガイダンスがあるかどうか:",
|
233 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "モデルにピッチガイダンスがあるかどうか (歌唱には必須、音声にはオプション):",
|
234 |
+
"模型是否带音高指导,1是0否": "モデルにピッチガイダンスがあるかどうか (1: はい、0: いいえ):",
|
235 |
+
"模型版本型号": "モデルアーキテクチャバージョン:",
|
236 |
+
"模型融合, 可用于测试音色融合": "モデルフュージョン、音色フュージョンをテストするために使用できます",
|
237 |
+
"模型路径": "モデルへのパス:",
|
238 |
+
"淡入淡出长度": "フェードの長さ",
|
239 |
+
"版本": "バージョン",
|
240 |
+
"特徴提取": "特徴抽出",
|
241 |
+
"特徴检索库文件路径,为空则使用下拉的选择结果": "特徴インデックスファイルへのパス。空白の場合はドロップダウンから選択された結果が使用されます:",
|
242 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性から女性への変換では+12キーが推奨され、女性から男性への変換では-12キーが推奨されます。音域が広すぎて音声が歪む場合は、適切な音域に手動で調整することもできます。",
|
243 |
+
"目标采样率": "目標サンプルレート:",
|
244 |
+
"算法延迟(ms):": "アルゴリズムの遅延(ms):",
|
245 |
+
"自动检测index路径,下拉式选择(dropdown)": "indexパスを自動検出し、ドロップダウンから選択します:",
|
246 |
+
"融合": "フュージョン",
|
247 |
+
"要改的模型信息": "変更するモデル情報:",
|
248 |
+
"要置入的模型信息": "挿入するモデル情報:",
|
249 |
+
"训练": "トレーニング",
|
250 |
+
"训练模型": "モデルのトレーニング",
|
251 |
+
"训练特征索引": "特徴索引のトレーニング",
|
252 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "トレーニングが完了しました。トレーニングログはコンソールまたは実験フォルダの 'train.log' ファイルで確認できます。",
|
253 |
+
"请指定说话人id": "話者/歌手のIDを指定してください:",
|
254 |
+
"请选择index文件": ".index ファイルを選択してください",
|
255 |
+
"请选择pth文件": ".pth ファイルを選択してください",
|
256 |
+
"请选择说话人id": "話者/歌手のIDを選択してください:",
|
257 |
+
"转换": "変換",
|
258 |
+
"输入实验名": "実験名を入力:",
|
259 |
+
"输入待处理音频文件夹路径": "処理するオーディオフォルダのパスを入力してください:",
|
260 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理するオーディオフォルダのパスを入力してください (ファイルマネージャのアドレスバーからコピーしてください):",
|
261 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "処理するオーディオファイルのパスを入力してください (デフォルトは正しい形式の例です):",
|
262 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "音量エンベロープのスケーリングを調整します。0に近いほど、元のボーカルの音量に似ます。相対的に低い値に設定すると、ノイズをマスキングし、音量がより自然に聞こえるようになります。1に近いほど、一貫して大きな音量になります:",
|
263 |
+
"输入监听": "入力ボイスモニター",
|
264 |
+
"输入训练文件夹路径": "トレーニングフォルダのパスを入力してください:",
|
265 |
+
"输入设备": "入力デバイス",
|
266 |
+
"输入降噪": "ノイズリダクションの入力",
|
267 |
+
"输出信息": "出力情報",
|
268 |
+
"输出变声": "変換されたボイスの出力",
|
269 |
+
"输出设备": "出力デバイス",
|
270 |
+
"输出降噪": "ノイズリダクションの出力",
|
271 |
+
"输出音频(右下角三个点,点了可以下载)": "オーディオの出力 (右下隅の三点をクリックしてダウンロード)",
|
272 |
+
"选择.index文件": ".index ファイルを選択してください",
|
273 |
+
"选择.pth文件": ".pth ファイルを選択してください",
|
274 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "音高抽出アルゴリズムを選択します。歌声を抽出する場合は 'pm' を使用して高速化できます。高品質な音声でパフォーマンスが向上するが、CPUの使用が悪化する場合は 'dio' を使用できます。 'harvest' は品質が向上しますが、遅いです。 'rmvpe' は最高の品質で、少ないGPUが必要です",
|
275 |
+
"选择音高提取算法,输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "音高抽出アルゴリズムを選択します。歌声を抽出する場合は 'pm' を使用して高速化できます。高品質な音声でパフォーマンスが向上するが、CPUの使用が悪化する場合は 'dio' を使用できます。 'harvest' は品質が向上しますが、遅いです。 'rmvpe' は最高の品質で、CPU/GPUの使用が少ないです",
|
276 |
+
"采样率:": "サンプルレート:",
|
277 |
+
"采样长度": "サンプル長",
|
278 |
+
"重载设备列表": "デバイスリストを再読み込み",
|
279 |
+
"音调设置": "ピッチ設定",
|
280 |
+
"音频设备(请使用同种类驱动)": "オーディオデバイス (同じタイプのドライバを使用してください)",
|
281 |
+
"音高算法": "音程検出アルゴリズム",
|
282 |
+
"额外推理时长": "追加推論時間"
|
283 |
+
}
|
i18n/locale/ko_KR.json
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "죄송합니다. 훈련을 지원할 수 있는 그래픽 카드가 없습니다.",
|
3 |
+
"UVR5已开启": "UVR5가 활성화되었습니다",
|
4 |
+
"UVR5已关闭": "UVR5가 비활성화되었습니다",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다. <br>이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 <b>LICENSE</b>를 참조하십시오.",
|
6 |
+
"0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구",
|
8 |
+
"是否开启UVR5-WebUI": "UVR5-WebUI를 여시겠습니까?",
|
9 |
+
"UVR5进程输出信息": "UVR5 프로세스 출력 정보",
|
10 |
+
"0b-语音切分工具": "0b-음성 분리 도구",
|
11 |
+
".list标注文件的路径": ".list 주석 파일 경로",
|
12 |
+
"GPT模型路径": "GPT 모델 경로",
|
13 |
+
"SoVITS模型列表": "SoVITS 모델 목록",
|
14 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "분리된 오디오가 있는 디렉터리를 입력하십시오! 읽은 오디오 파일의 전체 경로 = 해당 디렉터리-연결-목록 파일에 해당하는 원본 이름 (전체 경로가 아님).",
|
15 |
+
"音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능",
|
16 |
+
"切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리",
|
17 |
+
"怎么切": "자르기 옵션",
|
18 |
+
"不切": "자르지 않음",
|
19 |
+
"凑四句一切": "네 문장의 세트를 완성하세요.",
|
20 |
+
"按英文句号.切": "영어 문장으로 분리하기",
|
21 |
+
"threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.",
|
22 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "최소 길이: 각 세그먼트의 최소 길이. 첫 번째 세그먼트가 너무 짧으면 계속해서 뒷부분과 연결하여 이 값 이상이 될 때까지",
|
23 |
+
"min_interval:最短切割间隔": "최소 분리 간격",
|
24 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop 크기: 볼륨 곡선을 계산하는 방법. 작을수록 정확도가 높아지지만 계산량이 높아집니다 (정확도가 높다고 효과가 좋아지지 않음)",
|
25 |
+
"max_sil_kept:切完后静音最多留多长": "최대 유지되는 정적 길이 (분리 후)",
|
26 |
+
"开启语音切割": "음성 분리 활성화",
|
27 |
+
"终止语音切割": "음성 분리 종료",
|
28 |
+
"max:归一化后最大值多少": "최대 값 (정규화 후)",
|
29 |
+
"alpha_mix:混多少比例归一化后音频进来": "알파 믹스: 정규화된 오디오가 들어오는 비율",
|
30 |
+
"切割使用的进程数": "사용되는 프로세스 수로 자르기",
|
31 |
+
"语音切割进程输出信息": "음성 분리 프로세스 출력 정보",
|
32 |
+
"0c-中文批量离线ASR工具": "0c-중국어 대량 오프라인 ASR 도구",
|
33 |
+
"开启离线批量ASR": "오프라인 대량 ASR 활성화",
|
34 |
+
"终止ASR进程": "ASR 프로세스 종료",
|
35 |
+
"批量ASR(中文only)输入文件夹路径": "대량 ASR (중국어 전용) 입력 폴더 경로",
|
36 |
+
"ASR进程输出信息": "ASR 프로세스 출력 정보",
|
37 |
+
"0d-语音文本校对标注工具": "0d-음성 텍스트 교정 주석 도구",
|
38 |
+
"是否开启打标WebUI": "웹 기반 주석 활성화 여부",
|
39 |
+
"打标数据标注文件路径": "주석 데이터 주석 파일 경로",
|
40 |
+
"打标工具进程输出信息": "주석 도구 프로세스 출력 정보",
|
41 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
42 |
+
"*实验/模型名": "*실험/모델 이름",
|
43 |
+
"显卡信息": "그래픽 카드 정보",
|
44 |
+
"预训练的SoVITS-G模型路径": "사전 훈련된 SoVITS-G 모델 경로",
|
45 |
+
"预训练的SoVITS-D模型路径": "사전 훈련된 SoVITS-D 모델 경로",
|
46 |
+
"预训练的GPT模型路径": "사전 훈련된 GPT 모델 경로",
|
47 |
+
"1A-训练集格式化工具": "1A-훈련 세트 형식 지정 도구",
|
48 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/실험 이름 디렉터리에는 23456으로 시작하는 파일과 폴더가 있어야 함",
|
49 |
+
"*文本标注文件": "*텍스트 주석 파일",
|
50 |
+
"*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터��",
|
51 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "훈련 세트 오디오 파일 디렉터리 - 목록 파일에 해당하는 원형 이름 연결",
|
52 |
+
"1Aa-文本内容": "1Aa-텍스트 내용",
|
53 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함",
|
54 |
+
"预训练的中文BERT模型路径": "사전 훈련된 중국어 BERT 모델 경로",
|
55 |
+
"开启文本获取": "텍스트 추출 활성화",
|
56 |
+
"终止文本获取进程": "텍스트 추출 프로세스 종료",
|
57 |
+
"文本进程输出信息": "텍스트 프로세스 출력 정보",
|
58 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL 자기 지도 특징 추출",
|
59 |
+
"预训练的SSL模型路径": "사전 훈련된 SSL 모델 경로",
|
60 |
+
"开启SSL提取": "SSL 추출 활성화",
|
61 |
+
"终止SSL提取进程": "SSL 추출 프로세스 종료",
|
62 |
+
"SSL进程输出信息": "SSL 프로세스 출력 정보",
|
63 |
+
"1Ac-语义token提取": "1Ac-의미 토큰 추출",
|
64 |
+
"开启语义token提取": "의미 토큰 추출 활성화",
|
65 |
+
"终止语义token提取进程": "의미 토큰 추출 프로세스 종료",
|
66 |
+
"语义token提取进程输出信息": "의미 토큰 추출 프로세스 출력 정보",
|
67 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-훈련 세트 형식 지정 일괄 처리",
|
68 |
+
"开启一键三连": "일괄 처리 활성화",
|
69 |
+
"终止一键三连": "일괄 처리 종료",
|
70 |
+
"一键三连进程输出信息": "일괄 처리 프로세스 출력 정보",
|
71 |
+
"1B-微调训练": "1B-미세 조정 훈련",
|
72 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS 훈련. 공유 용 모델 파일은 SoVITS_weights 하위에 출력됩니다.",
|
73 |
+
"每张显卡的batch_size": "각 그래픽 카드의 배치 크기",
|
74 |
+
"总训练轮数total_epoch,不建议太高": "총 훈련 라운드 수 (total_epoch), 너무 높지 않게 권장됨",
|
75 |
+
"文本模块学习率权重": "텍스트 모듈 학습률 가중치",
|
76 |
+
"保存频率save_every_epoch": "저장 빈도 (각 라운드마다)",
|
77 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 ckpt 파일만 저장할지 여부",
|
78 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부",
|
79 |
+
"开启SoVITS训练": "SoVITS 훈련 활성화",
|
80 |
+
"终止SoVITS训练": "SoVITS 훈련 종료",
|
81 |
+
"SoVITS训练进程输出信息": "SoVITS 훈련 프로세스 출력 정보",
|
82 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT 훈련. 공유 용 모델 파일은 GPT_weights 하위에 출력됩니다.",
|
83 |
+
"总训练轮数total_epoch": "총 훈련 라운드 수 (total_epoch)",
|
84 |
+
"开启GPT训练": "GPT 훈련 활성화",
|
85 |
+
"终止GPT训练": "GPT 훈련 종료",
|
86 |
+
"GPT训练进程输出信息": "GPT 훈련 프로세스 출력 정보",
|
87 |
+
"1C-推理": "1C-추론",
|
88 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weights 및 GPT_weights에 저장된 훈련 완료된 모델 중 선택. 기본적으로 하나는 기본 모델이며 5초 Zero Shot TTS를 체험할 수 있습니다.",
|
89 |
+
"*GPT模型列表": "*GPT 모델 목록",
|
90 |
+
"*SoVITS模型列表": "*SoVITS 모델 목록",
|
91 |
+
"GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능",
|
92 |
+
"刷新模型路径": "모델 경로 새로 고침",
|
93 |
+
"是否开启TTS推理WebUI": "TTS 추론 WebUI 활성화 여부",
|
94 |
+
"TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보",
|
95 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환",
|
96 |
+
"施工中,请静候佳音": "공사 중입니다. 기다려주십시오.",
|
97 |
+
"参考音频在3~10秒范围外,请更换!": "참고 오디오가 3~10초 범위를 벗어났습니다. 다른 것으로 바꾸십시오!",
|
98 |
+
"请上传3~10秒内参考音频,超过会报错!": "3~10초 이내의 참고 오디오를 업로드하십시오. 초과하면 오류가 발생합니다!",
|
99 |
+
"TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다",
|
100 |
+
"TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다",
|
101 |
+
"打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다",
|
102 |
+
"打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다",
|
103 |
+
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*합성할 대상 텍스트를 입력하십시오. 중국어와 영어를 혼합하면 중국어를 선택하고 일본어와 영어를 혼합하면 일본어를 선택하십시오. 중국어와 일본어를 혼합하는 것은 아직 지원되지 않으며 대상 언어가 아닌 텍스트는 자동으로 버려집니다.",
|
104 |
+
"*请填写需要合成的目标文本": "*합성할 대상 텍스트를 입력하십시오",
|
105 |
+
"ASR任务开启:%s": "ASR 작업 시작: %s",
|
106 |
+
"GPT训练完成": "GPT 훈련 완료",
|
107 |
+
"GPT训练开始:%s": "GPT 훈련 시작: %s",
|
108 |
+
"SSL提取进程执行中": "SSL 추출 프로세스 실행 중",
|
109 |
+
"SSL提取进程结束": "SSL 추출 프로세스 종료",
|
110 |
+
"SoVITS训练完成": "SoVITS 훈련 완료",
|
111 |
+
"SoVITS训练开始:%s": "SoVITS 훈련 시작: %s",
|
112 |
+
"一键三连中途报错": "일괄 처리 중 오류 발생",
|
113 |
+
"一键三连进程结束": "일괄 처리 프로세스 종료",
|
114 |
+
"中文": "중국어",
|
115 |
+
"凑50字一切": "50자를 채우십시오",
|
116 |
+
"凑五句一切": "다섯 문장을 채우십시오",
|
117 |
+
"切分后文本": "분리된 텍스트",
|
118 |
+
"切割执行中": "분리 진행 중",
|
119 |
+
"切割结束": "분리 종료",
|
120 |
+
"参考音频的文本": "참고 오디오의 텍스트",
|
121 |
+
"参考音频的语种": "참고 오디오의 언어",
|
122 |
+
"合成语音": "합성 음성",
|
123 |
+
"后续将支持混合语种编码文本输入。": "향후 혼합 언어 코딩 텍스트 입력을 지원할 예정입니다.",
|
124 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "이미 진행 중인 ASR 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
125 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "이미 진행 중인 GPT 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
126 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "이미 진행 중인 SSL 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
127 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "이미 진행 중인 SoVITS 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
128 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "이미 진행 중인 일괄 처리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
129 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "이미 진행 중인 분리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
130 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "이미 진행 중인 텍스트 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
131 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "이미 진행 중인 의미 토큰 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
|
132 |
+
"已终止ASR进程": "ASR 프로세스 종료됨",
|
133 |
+
"已终止GPT训练": "GPT 훈련 종료됨",
|
134 |
+
"已终止SoVITS训练": "SoVITS 훈련 종료됨",
|
135 |
+
"已终止所有1a进程": "모든 1a 프로세스 종료됨",
|
136 |
+
"已终止所有1b进程": "모든 1b 프로세스 종료됨",
|
137 |
+
"已终止所有一键三连进程": "모든 일괄 처리 프로세스 종료됨",
|
138 |
+
"已终止所有切割进程": "모든 분리 프로세스 종료됨",
|
139 |
+
"已终止所有语义token进程": "모든 의미 토큰 프로세스 종료됨",
|
140 |
+
"按中文句号。切": "중국어 문장으로 분리하십시오.",
|
141 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "텍스트 분리 도구. 너무 긴 텍스트는 합성 결과가 항상 좋지 않을 수 있으므로 너무 길면 먼저 분리하는 것이 좋습니다. 합성은 텍스트 줄 바꿈을 기준으로 분리되어 다시 조합됩니다.",
|
142 |
+
"文本进程执行中": "텍스트 프로세스 실행 중",
|
143 |
+
"文本进程结束": "텍스트 프로세스 종료",
|
144 |
+
"日文": "일본어",
|
145 |
+
"英文": "영어",
|
146 |
+
"语义token提取进程执行中": "의미 토큰 추출 프로세스 실행 중",
|
147 |
+
"语义token提取进程结束": "의미 토큰 추출 프로세스 종료",
|
148 |
+
"请上传参考音频": "참고 오디오를 업로드하십시오",
|
149 |
+
"输入路径不存在": "입력 경로가 존재하지 않습니다",
|
150 |
+
"输入路径存在但既不是文件也不是文件夹": "입력 경로가 파일이나 폴더가 아닙니다",
|
151 |
+
"输出的语音": "출력 음성",
|
152 |
+
"进度:1a-done": "진행: 1a-done",
|
153 |
+
"进度:1a-done, 1b-ing": "진행: 1a-done, 1b-ing",
|
154 |
+
"进度:1a-ing": "진행: 1a-ing",
|
155 |
+
"进度:1a1b-done": "진행: 1a1b-done",
|
156 |
+
"进度:1a1b-done, 1cing": "진행: 1a1b-done, 1cing",
|
157 |
+
"进度:all-done": "진행: all-done",
|
158 |
+
"需要合成的切分前文本": "합성해야 할 분할 전 텍스트",
|
159 |
+
"需要合成的文本": "합성해야 할 텍스트",
|
160 |
+
"需要合成的语种": "합성해야 할 언어",
|
161 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3이면 harvest 음고 인식 결과에 중앙값 필터를 사용하며, 값은 필터 반경이며 사용하면 소리를 약하게 할 수 있습니다",
|
162 |
+
"A模型权重": "A 모델 가중치",
|
163 |
+
"A模型路径": "A 모델 경로",
|
164 |
+
"B模型路径": "B 모델 경로",
|
165 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오 + 주석\\Miyuki Kenshi\\src",
|
166 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음고, 기본 F0 및 음조 대신 사용",
|
167 |
+
"Index Rate": "인덱스 비율",
|
168 |
+
"Onnx导出": "Onnx 내보내기",
|
169 |
+
"Onnx输出路径": "Onnx 출력 경로",
|
170 |
+
"RVC模型路径": "RVC 모델 경로",
|
171 |
+
"ckpt处理": "ckpt 처리",
|
172 |
+
"harvest进程数": "harvest 프로세스 수",
|
173 |
+
"index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다",
|
174 |
+
"pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다",
|
175 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: 각 입력에 사용되는 다른 프로세스 카드를 -로 구분하여 입력하십시오. 예: 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행합니다",
|
176 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 구성 입력. 실험 데이터는 logs 하위에 있으며 각 실험에 대한 폴더가 있어야합니다. 실험 이름 경로를 수동으로 입력해야하며 실험 구성, 로그, 훈련된 모델 파일이 포함되어 있습니다.",
|
177 |
+
"step1:正在处理数据": "step1: 데이터 처리 중",
|
178 |
+
"step2:正在提取音高&正在提取特征": "step2: 음고 추출 및 특징 추출 중",
|
179 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 자동으로 훈련 폴더에서 오디오로 디코딩할 수 있는 모든 파일을 반복하고 슬라이스 정규화를 수행하여 실험 디렉토리에 2 개의 wav 폴더를 생성합니다. 현재 단일 훈련만 지원됩니다.",
|
180 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU로 음고 추출(모델이 음고를 지원하는 경우), GPU로 특징 추출(카드 번호 선택)",
|
181 |
+
"step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정 입력, 모델 및 인덱스 훈련 시작",
|
182 |
+
"step3a:正在训练模型": "step3a: 모델 훈련 중",
|
183 |
+
"一键训练": "일괄 훈련",
|
184 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "오디오 파일을 일괄로 입력할 수도 있습니다. 둘 중 하나를 선택하고 폴더를 읽기를 우선합니다.",
|
185 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력에 사용되는 카드 번호를 지정하십시오. 예 : 0-1-2는 카드 0, 1 및 2를 사용합니다",
|
186 |
+
"伴奏人声分离&去混响&去回声": "반주 및 보컬 분리 & 리버브 제거 & 에코 제거",
|
187 |
+
"使用模型采样率": "모델 샘플링 속도 사용",
|
188 |
+
"使用设备采样率": "기기 샘플링 속도 사용",
|
189 |
+
"保存名": "저장 이름",
|
190 |
+
"保存的文件名, 默认空为和源文件同名": "저장할 파일 이름, 기본적으로 공백은 원본 파일과 동일한 이름입니다",
|
191 |
+
"保存的模型名不带后缀": "저장할 모델 이름에는 확장자가 없습니다",
|
192 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "클리어 자음 및 숨소를 보호하여 전자 음향 찢김과 같은 아티팩트를 방지하려면 0.5로 설정하되, 보호 강도를 높이려면 0.5로 당기지 않고 낮추면 인덱스 효과가 감소할 수 있습니다",
|
193 |
+
"修改": "수정",
|
194 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정 (weights 폴더에서 추출된 작은 모델 파일만 지원됨)",
|
195 |
+
"停止音频转换": "오디오 변환 중지",
|
196 |
+
"全流程结束!": "전체 프로세스 완료!",
|
197 |
+
"刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로 고침",
|
198 |
+
"加载模型": "모델 로드",
|
199 |
+
"加载预训练底模D路径": "사전 훈련된 기본 모델 D 경로 로드",
|
200 |
+
"加载预训练底模G路径": "사전 훈련된 기본 모델 G 경로 로드",
|
201 |
+
"单次推理": "단일 추론",
|
202 |
+
"卸载音色省显存": "음색 언로드 및 GPU 메모리 절약",
|
203 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "음높이 변경(정수, 반음 수, 올림 높이 12 내림 높이 -12)",
|
204 |
+
"后处理重采样至最终采样率,0为不进行重采样": "후 처리를 통한 최종 샘플링률 재샘플링, 0은 재샘플링 미실행",
|
205 |
+
"否": "아니오",
|
206 |
+
"启用相位声码器": "페이즈 보코더 사용",
|
207 |
+
"响应阈值": "응답 임계���",
|
208 |
+
"响度因子": "음량 요소",
|
209 |
+
"处理数据": "데이터 처리",
|
210 |
+
"导出Onnx模型": "Onnx 모델 내보내기",
|
211 |
+
"导出文件格式": "내보내기 파일 형식",
|
212 |
+
"常见问题解答": "자주 묻는 질문 해결",
|
213 |
+
"常规设置": "일반 설정",
|
214 |
+
"开始音频转换": "오디오 변환 시작",
|
215 |
+
"性能设置": "성능 설정",
|
216 |
+
"批量推理": "일괄 추론",
|
217 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "일괄 변환, 변환 대기 중인 오디오 폴더를 입력하거나 여러 오디오 파일을 업로드하고 지정된 폴더(opt 기본값)에 변환된 오디오를 출력합니다.",
|
218 |
+
"指定输出主人声文件夹": "지정된 주인 목소리 출력 폴더",
|
219 |
+
"指定输出文件夹": "지정된 출력 폴더",
|
220 |
+
"指定输出非主人声文件夹": "지정된 비주인 목소리 출력 폴더",
|
221 |
+
"推理时间(ms):": "추론 시간(ms):",
|
222 |
+
"推理音色": "추론 음색",
|
223 |
+
"提取": "추출",
|
224 |
+
"提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수 추출",
|
225 |
+
"是": "예",
|
226 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 GPU 메모리에 캐시할지 여부. 10분 미만의 소량 데이터는 훈련 속도를 높이기 위해 캐시할 수 있지만, 대량 데이터를 캐시하면 메모리가 터지고 속도가 크게 향상되지 않을 수 있습니다.",
|
227 |
+
"查看": "보기",
|
228 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보보기(작은 모델 파일로 추출된 weights 폴더에서만 지원)",
|
229 |
+
"检索特征占比": "특징 비율 검색",
|
230 |
+
"模型": "모델",
|
231 |
+
"模型推理": "모델 추론",
|
232 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더에 대형 파일 모델 경로 입력), 반 훈련하고 싶지 않거나 모델이 자동으로 작은 파일 모델로 추출되지 않았거나 중간 모델을 테스트하려는 경우에 사용",
|
233 |
+
"模型是否带音高指导": "모델에 음높이 안내가 있는지 여부",
|
234 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델에 음높이 안내가 있는지 여부(노래에는 필수, 음성은 선택 사항)",
|
235 |
+
"模型是否带音高指导,1是0否": "모델에 음높이 안내가 있는지 여부, 1이면 있음 0이면 없음",
|
236 |
+
"模型版本型号": "모델 버전 및 모델 번호",
|
237 |
+
"模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능",
|
238 |
+
"模型路径": "모델 경로",
|
239 |
+
"淡入淡出长度": "페이드 인/아웃 길이",
|
240 |
+
"版本": "버전",
|
241 |
+
"特征提取": "특성 추출",
|
242 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "특성 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용",
|
243 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성을 여성으로 추천 +12키, 여성을 남성으로 추천 -12키, 음역 폭발로 음색이 왜곡되면 적절한 음역으로 직접 조절 가능",
|
244 |
+
"目标采样率": "목표 샘플링률",
|
245 |
+
"算法延迟(ms):": "알고리즘 지연 시간(ms):",
|
246 |
+
"自动检测index路径,下拉式选择(dropdown)": "자동으로 index 경로 감지, 드롭다운 선택",
|
247 |
+
"融合": "융합",
|
248 |
+
"要改的模型信息": "수정할 모델 정보",
|
249 |
+
"要置入的模型信息": "삽입할 모델 정보",
|
250 |
+
"训练": "훈련",
|
251 |
+
"训练模型": "모델 훈련",
|
252 |
+
"训练特征索引": "특성 인덱스 훈련",
|
253 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련 종료, 콘솔 훈련 로그 또는 실험 폴더의 train.log를 확인할 수 있습니다",
|
254 |
+
"请指定说话人id": "화자 ID 지정",
|
255 |
+
"请选择index文件": "index 파일 선택",
|
256 |
+
"请选择pth文件": "pth 파일 선택",
|
257 |
+
"请选择说话人id": "화자 ID 선택",
|
258 |
+
"转换": "변환",
|
259 |
+
"输入实验名": "실험명 입력",
|
260 |
+
"输入待处理音频文件夹路径": "처리 대기 중인 오디오 폴더 경로 입력",
|
261 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리 대기 중인 오디오 폴더 경로 입력(파일 관리자 주소 표시 줄에서 복사하면 됨)",
|
262 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "처리 대기 중인 오디오 파일 경로 입력(기본적으로 올바른 형식의 예제)",
|
263 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "소스 음량 에너벌롭을 입력하여 출력 음량 에너벌롭 합성 비율을 대체하면 1에 가까울수록 출력 에너벌롭 사용",
|
264 |
+
"输入���听": "입력 모니터링",
|
265 |
+
"输入训练文件夹路径": "훈련 폴더 경로 입력",
|
266 |
+
"输入设备": "입력 장치",
|
267 |
+
"输入降噪": "노이즈 감소 입력",
|
268 |
+
"输出信息": "출력 정보",
|
269 |
+
"输出变声": "음성 출력",
|
270 |
+
"输出设备": "출력 장치",
|
271 |
+
"输出降噪": "노이즈 감소 출력",
|
272 |
+
"输出音频(右下角三个点,点了可以下载)": "출력 오디오(우하단 세 점, 클릭하면 다운로드 가능)",
|
273 |
+
"选择.index文件": "index 파일 선택",
|
274 |
+
"选择.pth文件": "pth 파일 선택",
|
275 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용",
|
276 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용, rmvpe 효과가 가장 좋으며 약간의 GPU 사용",
|
277 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음높이 추출 알고리즘 선택: 노래 입력에 pm 사용 가능, 고품질 음성이지만 CPU가 낮음, dio 사용 가능, harvest 품질이 더 좋지만 느림, rmvpe 효과가 최고이며 CPU/GPU 약간 사용",
|
278 |
+
"采样率:": "샘플링률:",
|
279 |
+
"采样长度": "샘플링 길이",
|
280 |
+
"重载设备列表": "장치 목록 다시로드",
|
281 |
+
"音调设置": "음조 설정",
|
282 |
+
"音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버 사용 권장)",
|
283 |
+
"音高算法": "음높이 알고리즘",
|
284 |
+
"额外推理时长": "추가 추론 시간"
|
285 |
+
}
|
i18n/locale/pt_BR.json
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Infelizmente, você não possui uma placa de vídeo funcional para suportar seu treinamento",
|
3 |
+
"UVR5已开启": "UVR5 está ativado",
|
4 |
+
"UVR5已关闭": "UVR5 está desativado",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software é de código aberto sob a licença MIT. O autor não tem controle sobre o software. Aqueles que usam o software e difundem os sons exportados pelo software são totalmente responsáveis. <br>Se você não concorda com esta cláusula, não pode usar ou citar nenhum código e arquivo dentro do pacote de software. Consulte o diretório raiz <b>LICENSE</b> para mais detalhes.<br><br> Traduzido por Rafael Godoy Ebert",
|
6 |
+
"0-前置数据集获取工具": "0- Ferramenta de aquisição de conjunto de dados pré-frontal",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0A-UVR5 separação de voz e acompanhamento instrumental & ferramenta para remover reverberação e atraso",
|
8 |
+
"是否开启UVR5-WebUI": "Se deseja ativar a UVR5-WEBUI",
|
9 |
+
"UVR5进程输出信息": "Informações de saída do processo UVR5",
|
10 |
+
"0b-语音切分工具": "0b- Ferramenta de corte de voz",
|
11 |
+
"音频自动切分输入路径,可文件可文件夹": "Caminho de entrada automático de corte de áudio, pode ser um arquivo ou uma pasta",
|
12 |
+
"切分后的子音频的输出根目录": "Diretório raiz de saída do sub-áudio após o corte",
|
13 |
+
"threshold:音量小于这个值视作静音的备选切割点": "Limiar: O volume menor que este valor é considerado como um ponto de corte mudo alternativo",
|
14 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: O comprimento mínimo de cada parágrafo, se o primeiro for muito curto, conecte-o continuamente aos próximos até ultrapassar este valor",
|
15 |
+
"min_interval:最短切割间隔": "min_interval: O intervalo de corte mínimo",
|
16 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "HOP_SIZE: Como calcular a curva de volume, quanto menor a precisão, maior a quantidade de cálculos (não significa que quanto maior a precisão, melhor o efeito)",
|
17 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: Depois de cortar, por quanto tempo no máximo o silêncio é mantido",
|
18 |
+
"开启语音切割": "Ativar corte de voz",
|
19 |
+
"终止语音切割": "Encerrar corte de voz",
|
20 |
+
"max:归一化后最大值多少": "MAX: Qual é o valor máximo após a normalização?",
|
21 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Em que proporção o áudio normalizado é misturado de volta",
|
22 |
+
"切割使用的进程数": "Número de processos para corte",
|
23 |
+
"语音切割进程输出信息": "Informações de saída do processo de corte de voz",
|
24 |
+
"0c-中文批量离线ASR工具": "0c- Ferramenta chinesa de ASR offline em lote",
|
25 |
+
"开启离线批量ASR": "Ativar ASR offline em lote",
|
26 |
+
"终止ASR进程": "Encerrar processo ASR",
|
27 |
+
"批量ASR(中文only)输入文件夹路径": "Caminho da pasta de entrada para ASR em lote (apenas chinês)",
|
28 |
+
"ASR进程输出信息": "Informações de saída do processo ASR",
|
29 |
+
"0d-语音文本校对标注工具": "0d- Ferramenta de correção e marcação de texto de voz",
|
30 |
+
"是否开启打标WebUI": "Se deseja abrir o webui de marcação",
|
31 |
+
"打标数据标注文件路径": "Caminho do arquivo de marcação de dados de marcação",
|
32 |
+
"打标工具进程输出信息": "Informações de saída do processo da ferramenta de marcação",
|
33 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS",
|
34 |
+
"*实验/模型名": "*Nome do experimento/modelo",
|
35 |
+
"显卡信息": "Informações da placa de vídeo",
|
36 |
+
"预训练的SoVITS-G模型路径": "Caminho do modelo SoVITS-G pre-train",
|
37 |
+
"预训练的SoVITS-D模型路径": "Caminho do modelo SoVITS-D pre-train",
|
38 |
+
"预训练的GPT模型路径": "Caminho do modelo GPT pre-train",
|
39 |
+
"1A-训练集格式化工具": "1A-Ferramenta de formatação de conjunto de dados de treinamento",
|
40 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Logs de saída/deve haver arquivos e pastas começando com 23456 no diretório do nome do experimento",
|
41 |
+
"*文本标注文件": "*Arquivo de marcação de texto",
|
42 |
+
"*训练集音频文件目录": "*Diretório de arquivos de áudio do conjunto de treinamento",
|
43 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Diretório de arquivos de áudio do conjunto de treinamento. Concatene o nome do arquivo correspondente à forma de onda no arquivo de lista",
|
44 |
+
"1Aa-文本内容": "1AA-Conteúdo do texto",
|
45 |
+
"GPU卡号以-分割,每个卡号一个进程": "Número da placa de vídeo dividido por-, cada número de placa é um processo",
|
46 |
+
"预训练的中文BERT模型路径": "Caminho do modelo BERT chinês pre-train",
|
47 |
+
"开启文本获取": "Ativar obtenção de texto",
|
48 |
+
"终止文本获取进程": "Encerrar processo de obtenção de texto",
|
49 |
+
"文本进程输出信息": "Informações de saída do processo de texto",
|
50 |
+
"1Ab-SSL自监督特征提取": "1AB-Extração de características auto-supervisionadas SSL",
|
51 |
+
"预训练的SSL模型路径": "Caminho do modelo SSL pre-train",
|
52 |
+
"开启SSL提取": "Ativar extração SSL",
|
53 |
+
"终止SSL提取进程": "Encerrar processo de extração SSL",
|
54 |
+
"SSL进程输出信息": "Informações de saída do processo SSL",
|
55 |
+
"1Ac-语义token提取": "1AC-Extração de token semântico",
|
56 |
+
"开启语义token提取": "Ativar extração de token semântico",
|
57 |
+
"终止语义token提取进程": "Encerrar processo de extração de token semântico",
|
58 |
+
"语义token提取进程输出信息": "Informações de saída do processo de extração de token semântico",
|
59 |
+
"1Aabc-训练集格式化一键三连": "1AABC-Formatação de conjunto de treinamento em um clique",
|
60 |
+
"开启一键三连": "Ativar um clique",
|
61 |
+
"终止一键三连": "Encerrar um clique",
|
62 |
+
"一键三连进程输出信息": "Informações de saída do processo de um clique",
|
63 |
+
"1B-微调训练": "1B-Treinamento de ajuste fino",
|
64 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1ba-Treinamento SoVITS. O arquivo de modelo para compartilhamento é gerado em SOVITS_WEIGHTS",
|
65 |
+
"每张显卡的batch_size": "Tamanho do lote de cada placa de vídeo",
|
66 |
+
"总训练轮数total_epoch,不建议太高": "Total de epoch de treinamento, não é recomendável um valor muito alto",
|
67 |
+
"文本模块学习率权重": "Weight da taxa de aprendizado do módulo de texto",
|
68 |
+
"保存频率save_every_epoch": "Frequência de salvamento save_every_epoch",
|
69 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Se deve salvar apenas o último arquivo CKPT para economizar espaço em disco",
|
70 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Se deve salvar o modelo pequeno final na pasta Weights em cada ponto de salvamento de tempo",
|
71 |
+
"开启SoVITS训练": "Ativar treinamento SoVITS",
|
72 |
+
"终止SoVITS训练": "Encerrar treinamento SoVITS",
|
73 |
+
"SoVITS训练进程输出信息": "Informações de saída do processo de treinamento SoVITS",
|
74 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1BB-Treinamento GPT. O arquivo de modelo para compartilhamento é gerado em GPT_WEIGHTS",
|
75 |
+
"总训练轮数total_epoch": "Total de epoch de treinamento",
|
76 |
+
"开启GPT训练": "Ativar treinamento GPT",
|
77 |
+
"终止GPT训练": "Encerrar treinamento GPT",
|
78 |
+
"GPT训练进程输出信息": "Informações de saída do processo de treinamento GPT",
|
79 |
+
"1C-推理": "1C-raciocínio",
|
80 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Selecione os modelos armazenados em Sovits_weights e GPT_WEIGHTS. O padrão é o modelo inferior, experiência para 5 segundos de Zero Shot TTS",
|
81 |
+
"*GPT模型列表": "*Lista de modelos GPT",
|
82 |
+
"*SoVITS模型列表": "*Lista de modelos Sovits",
|
83 |
+
"GPU卡号,只能填1个整数": "Número da placa de vídeo, só é possível preencher com um número inteiro",
|
84 |
+
"刷新模型路径": "Atualizar caminho do modelo",
|
85 |
+
"是否开启TTS推理WebUI": "Se deseja ativar o webui de raciocínio TTS",
|
86 |
+
"TTS推理WebUI进程输出信息": "Informações de saída do processo webui de raciocínio TTS",
|
87 |
+
"2-GPT-SoVITS-变声": "2-gpt-sovits-mudança de voz",
|
88 |
+
"施工中,请静候佳音": "Em construção, por favor, aguarde por um bom som",
|
89 |
+
"TTS推理进程已开启": "O processo de inferência TTS foi iniciado",
|
90 |
+
"TTS推理进程已关闭": "O processo de inferência TTS foi desativado",
|
91 |
+
"打标工具WebUI已开启": "A ferramenta de marcação WebUI está ativada",
|
92 |
+
"打标工具WebUI已关闭": "A ferramenta de marcação WebUI foi desativado"
|
93 |
+
}
|
i18n/locale/ru_RU.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Если значение больше 3: применить медианную фильтрацию к вытащенным тональностям. Значение контролирует радиус фильтра и может уменьшить излишнее дыхание.",
|
3 |
+
"A模型权重": "Весы (w) модели А:",
|
4 |
+
"A模型路径": "Путь к модели А:",
|
5 |
+
"B模型路径": "Путь к модели Б:",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Файл дуги F0 (не обязательно). Одна тональность на каждую строчку. Заменяет обычный F0 и модуляцию тональности:",
|
8 |
+
"Index Rate": "Темп индекса",
|
9 |
+
"Onnx导出": "Экспорт ONNX",
|
10 |
+
"Onnx输出路径": "Путь для сохранения модели в формате ONNX:",
|
11 |
+
"RVC模型路径": "Путь к модели RVC:",
|
12 |
+
"ckpt处理": "Обработка ckpt",
|
13 |
+
"harvest进程数": "Количество процессор harvest",
|
14 |
+
"index文件路径不可包含中文": "Путь к файлу индекса",
|
15 |
+
"pth文件路径不可包含中文": "Путь к файлу pth",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Введите номера графических процессоров, разделенные символом «-», например, 0-0-1, чтобы запустить два процесса на GPU 0 и один процесс на GPU 1:",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Шаг 1. Конфигурирование модели. Данные обучения модели сохраняются в папку 'logs', и для каждой модели создаётся отдельная папка. Введите вручную путь к настройкам для модели, в которой находятся логи и тренировочные файлы.",
|
18 |
+
"step1:正在处理数据": "Шаг 1. Переработка данных",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Шаг 2А. Автоматическая обработка исходных аудиозаписей для обучения и выполнение нормализации среза. Создаст 2 папки wav в папке модели. В данный момент поддерживается обучение только на одноголосных записях.",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Шаг 2Б. Оценка и извлечение тональности в аудиофайлах с помощью процессора (если включена поддержка изменения высоты звука), извлечение черт с помощью GPU (выберите номер GPU):",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Шаг 3. Заполнение дополнительных настроек обучения и запуск обучения модели и индекса",
|
23 |
+
"step3a:正在训练模型": "Шаг 3. Запуск обучения модели",
|
24 |
+
"一键训练": "Обучение в одно нажатие",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Можно также импортировать несколько аудиофайлов. Если путь к папке существует, то этот ввод игнорируется.",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Пакетная обработка для разделения вокального сопровождения с использованием модели UVR5.<br>Пример допустимого формата пути к папке: D:\\path\\to\\input\\folder<br> Модель разделена на три категории:<br>1. Сохранить вокал: выберите этот вариант для звука без гармоний. Он сохраняет вокал лучше, чем HP5. Он включает в себя две встроенные модели: HP2 и HP3. HP3 может немного пропускать инструментал, но сохраняет вокал немного лучше, чем HP2.<br>2. Сохранить только основной вокал: выберите этот вариант для звука с гармониями. Это может ослабить основной вокал. Он включает одну встроенную модель: HP5.<br>3. Модели удаления реверберации и задержки (от FoxJoy):<br> (1) MDX-Net: лучший выбор для удаления стереореверберации, но он не может удалить монореверберацию;<br> (234) DeEcho: удаляет эффекты задержки. Агрессивный режим удаляет более тщательно, чем Нормальный режим. DeReverb дополнительно удаляет реверберацию и может удалять монореверберацию, но не очень эффективно для сильно реверберированного высокочастотного контента.<br>Примечания по удалению реверберации/задержки:<br>1. Время обработки для модели DeEcho-DeReverb примерно в два раза больше, чем для двух других моделей DeEcho.<br>2. Модель MDX-Net-Dereverb довольно медленная.<br>3. Рекомендуемая самая чистая конфигурация — сначала применить MDX-Net, а затем DeEcho-Aggressive.",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Введите, какие(-ую) GPU(-у) хотите использовать через '-', например 0-1-2, чтобы использовать GPU с номерами 0, 1 и 2:",
|
28 |
+
"伴奏人声分离&去混响&去回声": "Разделение вокала/аккомпанемента и удаление эхо",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "Имя файла для сохранения:",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "Название сохранённого файла (по умолчанию: такое же, как и у входного):",
|
33 |
+
"保存的模型名不带后缀": "Имя файла модели для сохранения (без расширения):",
|
34 |
+
"保存频率save_every_epoch": "Частота сохранения (save_every_epoch):",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Защитить глухие согласные и звуки дыхания для предотвращения артефактов, например, разрывания в электронной музыке. Поставьте на 0.5, чтобы выключить. Уменьшите значение для повышения защиты, но учтите, что при этом может ухудшиться точность индексирования:",
|
36 |
+
"修改": "Изменить",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Изменить информацию о модели (работает только с маленькими моделями, взятыми из папки 'weights')",
|
38 |
+
"停止音频转换": "Закончить конвертацию аудио",
|
39 |
+
"全流程结束!": "Все процессы завершены!",
|
40 |
+
"刷新音色列表和索引路径": "Обновить список голосов и индексов",
|
41 |
+
"加载模型": "Загрузить модель",
|
42 |
+
"加载预训练底模D路径": "Путь к предварительно обученной базовой модели D:",
|
43 |
+
"加载预训练底模G路径": "Путь к предварительно обученной базовой модели G:",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "Выгрузить модель из памяти GPU для освобождения ресурсов",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Изменить высоту голоса (укажите количество полутонов; чтобы поднять голос на октаву, выберите 12, понизить на октаву — -12):",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Изменить частоту дискретизации в выходном файле на финальную. Поставьте 0, чтобы ничего не изменялось:",
|
48 |
+
"否": "Нет",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "Порог ответа",
|
51 |
+
"响度因子": "коэффициент громкости",
|
52 |
+
"处理数据": "Обработать данные",
|
53 |
+
"导出Onnx模型": "Экспортировать модель",
|
54 |
+
"导出文件格式": "Формат выходных файлов",
|
55 |
+
"常见问题解答": "ЧаВо (часто задаваемые вопросы)",
|
56 |
+
"常规设置": "Основные настройки",
|
57 |
+
"开始音频转换": "Начать конвертацию аудио",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "К сожалению, у вас нету графического процессора, который поддерживает обучение моделей.",
|
59 |
+
"性能设置": "Настройки быстроты",
|
60 |
+
"总训练轮数total_epoch": "Полное количество эпох (total_epoch):",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Массовое преобразование. Введите путь к папке, в которой находятся файлы для преобразования голоса или выгрузите несколько аудиофайлов. Сконвертированные файлы будут сохранены в указанной папке (по умолчанию: 'opt').",
|
63 |
+
"指定输出主人声文件夹": "Путь к папке для сохранения вокала:",
|
64 |
+
"指定输出文件夹": "Папка для результатов:",
|
65 |
+
"指定输出非主人声文件夹": "Путь к папке для сохранения аккомпанемента:",
|
66 |
+
"推理时间(ms):": "Время переработки (мс):",
|
67 |
+
"推理音色": "Желаемый голос:",
|
68 |
+
"提取": "Создать модель",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "Число процессов ЦП, используемое для оценки высоты голоса и обработки данных:",
|
70 |
+
"是": "Да",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Сохранять только последний файл '.ckpt', чтобы сохранить место на диске:",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Сохранять маленькую финальную модель в папку 'weights' на каждой точке сохранения:",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Кэшировать все тренировочные сеты в видеопамять. Кэширование маленький датасетов (меньше 10 минут) может ускорить тренировку, но кэширование больших, наоборот, займёт много видеопамяти и не сильно ускорит тренировку:",
|
74 |
+
"显卡信息": "Информация о графических процессорах (GPUs):",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Это программное обеспечение с открытым исходным кодом распространяется по лицензии MIT. Автор никак не контролирует это программное обеспечение. Пользователи, которые используют эту программу и распространяют аудиозаписи, полученные с помощью этой программы, несут полную ответственность за это. Если вы не согласны с этим, вы не можете использовать какие-либо коды и файлы в рамках этой программы или ссылаться на них. Подробнее в файле <b>Agreement-LICENSE.txt</b> в корневом каталоге программы.",
|
76 |
+
"查看": "Просмотреть информацию",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Просмотреть информацию о модели (работает только с маленькими моделями, взятыми из папки 'weights')",
|
78 |
+
"检索特征占比": "Соотношение поиска черт:",
|
79 |
+
"模型": "Модели",
|
80 |
+
"模型推理": "Изменение голоса",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Создание модели из данных, полученных в процессе обучения (введите путь к большому файлу модели в папке 'logs'). Может пригодиться, если вам нужно завершить обучение и получить маленький файл готовой модели, или если вам нужно проверить недообученную модель:",
|
82 |
+
"模型是否带音高指导": "Поддерживает ли модель изменение высоты голоса (1: да, 0: нет):",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Поддержка изменения высоты звука (обязательно для пения, необязательно для речи):",
|
84 |
+
"模型是否带音高指导,1是0否": "Поддерживает ли модель изменение высоты голоса (1: да, 0: нет):",
|
85 |
+
"模型版本型号": "Версия архитектуры модели:",
|
86 |
+
"模型融合, 可用于测试音色融合": "Слияние моделей, может быть использовано для проверки слияния тембра",
|
87 |
+
"模型路径": "Путь к папке:",
|
88 |
+
"每张显卡的batch_size": "Размер пачки для GPU:",
|
89 |
+
"淡入淡出长度": "Длина затухания",
|
90 |
+
"版本": "Версия архитектуры модели:",
|
91 |
+
"特征提取": "Извлечь черты",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Путь к файлу индекса черт. Оставьте пустым, чтобы использовать выбранный вариант из списка ниже:",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Рекомендуется выбрать +12 для конвертирования мужского голоса в женский и -12 для конвертирования женского в мужской. Если диапазон голоса слишком велик, и голос искажается, можно выбрать значение на свой вкус.",
|
94 |
+
"目标采样率": "Частота дискретизации аудио:",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "Автоматически найденные файлы индексов черт (выберите вариант из списка):",
|
97 |
+
"融合": "Запустить слияние",
|
98 |
+
"要改的模型信息": "Информация, которая будет изменена:",
|
99 |
+
"要置入的模型信息": "Информация о модели:",
|
100 |
+
"训练": "Обучение модели",
|
101 |
+
"训练模型": "Обучить модель",
|
102 |
+
"训练特征索引": "Обучить индекс черт",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Обучение модели завершено. Журнал обучения можно просмотреть в консоли или в файле 'train.log' в папке с моделью.",
|
104 |
+
"请指定说话人id": "Номер говорящего/поющего:",
|
105 |
+
"请选择index文件": "Пожалуйста, выберите файл индекса",
|
106 |
+
"请选择pth文件": "Пожалуйста, выберите файл pth",
|
107 |
+
"请选择说话人id": "Номер говорящего:",
|
108 |
+
"转换": "Преобразовать",
|
109 |
+
"输入实验名": "Название модели:",
|
110 |
+
"输入待处理音频文件夹路径": "Путь к папке с аудиофайлами для обработки:",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Путь к папке с аудиофайлами для переработки (можно скопировать путь из адресной строки файлового менеджера):",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Путь к аудиофайлу, который хотите обработать (ниже указан пример пути к файлу):",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Использовать громкость входного файла для замены или перемешивания с громкостью выходного файла. Чем ближе соотношение к 1, тем больше используется звука из выходного файла:",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "Путь к папке с аудиозаписями, на которых будет обучаться модель:",
|
116 |
+
"输入设备": "Входное устройство",
|
117 |
+
"输入降噪": "Уменьшение входного шума",
|
118 |
+
"输出信息": "Статистика",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "Выходное устройство",
|
121 |
+
"输出降噪": "Уменьшение выходного шума",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "Аудиофайл (чтобы скачать, нажмите на три точки справа в плеере)",
|
123 |
+
"选择.index文件": "Выбрать файл .index",
|
124 |
+
"选择.pth文件": "Выбрать файл .pth",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Выберите алгоритм оценки высоты голоса ('pm': работает быстро, но даёт низкое качество речи; 'harvest': басы лучше, но работает очень медленно; 'crepe': лучшее качество, но сильно нагружает GPU; 'rmvpe': лучшее качество и минимальная нагрузка на GPU):",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "Длина сэмпла",
|
130 |
+
"重载设备列表": "Обновить список устройств",
|
131 |
+
"音调设置": "Настройка высоты звука",
|
132 |
+
"音频设备(请使用同种类驱动)": "Аудиоустройство (пожалуйста, используйте такой же тип драйвера)",
|
133 |
+
"音高算法": "Алгоритм оценки высоты звука",
|
134 |
+
"额外推理时长": "Доп. время переработки"
|
135 |
+
}
|
i18n/locale/tr_TR.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Eğer >=3 ise, elde edilen pitch sonuçlarına median filtreleme uygula. Bu değer, filtre yarıçapını temsil eder ve nefesliliği azaltabilir.",
|
3 |
+
"A模型权重": "A Modeli Ağırlığı:",
|
4 |
+
"A模型路径": "A Modeli Yolu:",
|
5 |
+
"B模型路径": "B Modeli Yolu:",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 eğrisi dosyası (isteğe bağlı). Her satırda bir pitch değeri bulunur. Varsayılan F0 ve pitch modülasyonunu değiştirir:",
|
8 |
+
"Index Rate": "Index Oranı",
|
9 |
+
"Onnx导出": "Onnx Dışa Aktar",
|
10 |
+
"Onnx输出路径": "Onnx Dışa Aktarım Yolu:",
|
11 |
+
"RVC模型路径": "RVC Model Yolu:",
|
12 |
+
"ckpt处理": "ckpt İşleme",
|
13 |
+
"harvest进程数": "harvest进程数",
|
14 |
+
"index文件路径不可包含中文": ".index dosya yolu Çince karakter içeremez",
|
15 |
+
"pth文件路径不可包含中文": ".pth dosya yolu Çince karakter içeremez",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Adım 1: Deneysel yapılandırmayı doldurun. Deneysel veriler 'logs' klasöründe saklanır ve her bir deney için ayrı bir klasör vardır. Deneysel adı yolu manuel olarak girin; bu yol, deneysel yapılandırmayı, günlükleri ve eğitilmiş model dosyalarını içerir.",
|
18 |
+
"step1:正在处理数据": "Adım 1: Veri işleme",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Adım 2a: Eğitim klasöründe ses dosyalarını otomatik olarak gezinerek dilimleme normalizasyonu yapın. Deney dizini içinde 2 wav klasörü oluşturur. Şu anda sadece tek kişilik eğitim desteklenmektedir.",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Adım 2b: Ses yüksekliği (Pitch) çıkartmak için CPU kullanın (eğer model ses yüksekliği içeriyorsa), özellikleri çıkartmak için GPU kullanın (GPU indeksini seçin):",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Adım 3: Eğitim ayarlarını doldurun ve modeli ve dizini eğitmeye başlayın",
|
23 |
+
"step3a:正在训练模型": "Adım 3a: Model eğitimi başladı",
|
24 |
+
"一键训练": "Tek Tuşla Eğit",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Ses dosyaları ayrıca toplu olarak, iki seçimle, öncelikli okuma klasörüyle içe aktarılabilir",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch işleme kullanarak vokal eşlik ayrımı için UVR5 modeli kullanılır.<br>Geçerli bir klasör yol formatı örneği: D:\\path\\to\\input\\folder (dosya yöneticisi adres çubuğundan kopyalanır).<br>Model üç kategoriye ayrılır:<br>1. Vokalleri koru: Bu seçeneği, harmoni içermeyen sesler için kullanın. HP5'ten daha iyi bir şekilde vokalleri korur. İki dahili model içerir: HP2 ve HP3. HP3, eşlik sesini hafifçe sızdırabilir, ancak vokalleri HP2'den biraz daha iyi korur.<br>2. Sadece ana vokalleri koru: Bu seçeneği, harmoni içeren sesler için kullanın. Ana vokalleri zayıflatabilir. Bir dahili model içerir: HP5.<br>3. Reverb ve gecikme modelleri (FoxJoy tarafından):<br> (1) MDX-Net: Stereo reverb'i kaldırmak için en iyi seçenek, ancak mono reverb'i kaldıramaz;<br> (234) DeEcho: Gecikme efektlerini kaldırır. Agresif mod, Normal moda göre daha kapsamlı bir şekilde kaldırma yapar. DeReverb ayrıca reverb'i kaldırır ve mono reverb'i kaldırabilir, ancak yoğun yankılı yüksek frekanslı içerikler için çok etkili değildir.<br>Reverb/gecikme notları:<br>1. DeEcho-DeReverb modelinin işleme süresi diğer iki DeEcho modeline göre yaklaşık olarak iki kat daha uzundur.<br>2. MDX-Net-Dereverb modeli oldukça yavaştır.<br>3. Tavsiye edilen en temiz yapılandırma önce MDX-Net'i uygulamak ve ardından DeEcho-Aggressive uygulamaktır.",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "GPU indekslerini '-' ile ayırarak girin, örneğin 0-1-2, GPU 0, 1 ve 2'yi kullanmak için:",
|
28 |
+
"伴奏人声分离&去混响&去回声": "Vokal/Müzik Ayrıştırma ve Yankı Giderme",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "Kaydetme Adı:",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "Kaydedilecek dosya adı (varsayılan: kaynak dosya ile aynı):",
|
33 |
+
"保存的模型名不带后缀": "Kaydedilecek model adı (uzantı olmadan):",
|
34 |
+
"保存频率save_every_epoch": "Kaydetme sıklığı (save_every_epoch):",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Sessiz ünsüzleri ve nefes seslerini koruyarak elektronik müzikte yırtılma gibi sanal hataların oluşmasını engeller. 0.5 olarak ayarlandığında devre dışı kalır. Değerin azaltılması korumayı artırabilir, ancak indeksleme doğruluğunu azaltabilir:",
|
36 |
+
"修改": "Düzenle",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Model bilgilerini düzenle (sadece 'weights' klasöründen çıkarılan küçük model dosyaları desteklenir)",
|
38 |
+
"停止音频转换": "Ses dönüştürmeyi durdur",
|
39 |
+
"全流程结束!": "Tüm işlemler tamamlandı!",
|
40 |
+
"刷新音色列表和索引路径": "Ses listesini ve indeks yolunu yenile",
|
41 |
+
"加载模型": "Model yükle",
|
42 |
+
"加载预训练底模D路径": "Önceden eğitilmiş temel D modelini yükleme yolu:",
|
43 |
+
"加载预训练底模G路径": "Önceden eğitilmiş temel G modelini yükleme yolu:",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "GPU bellek kullanımını azaltmak için sesi kaldır",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Transpoze et (tamsayı, yarıton sayısıyla; bir oktav yükseltmek için: 12, bir oktav düşürmek için: -12):",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Son işleme aşamasında çıktı sesini son örnekleme hızına yeniden örnekle. 0 değeri için yeniden örnekleme yapılmaz:",
|
48 |
+
"否": "Hayır",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "Tepki eşiği",
|
51 |
+
"响度因子": "ses yüksekliği faktörü",
|
52 |
+
"处理数据": "Verileri işle",
|
53 |
+
"导出Onnx模型": "Onnx Modeli Dışa Aktar",
|
54 |
+
"导出文件格式": "Dışa aktarma dosya formatı",
|
55 |
+
"常见问题解答": "Sıkça Sorulan Sorular (SSS)",
|
56 |
+
"常规设置": "Genel ayarlar",
|
57 |
+
"开始音频转换": "Ses dönüştürmeyi başlat",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Maalesef, eğitiminizi desteklemek için uyumlu bir GPU bulunmamaktadır.",
|
59 |
+
"性能设置": "Performans ayarları",
|
60 |
+
"总训练轮数total_epoch": "Toplam eğitim turu (total_epoch):",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Toplu dönüştür. Dönüştürülecek ses dosyalarının bulunduğu klasörü girin veya birden çok ses dosyasını yükleyin. Dönüştürülen ses dosyaları belirtilen klasöre ('opt' varsayılan olarak) dönüştürülecektir",
|
63 |
+
"指定输出主人声文件夹": "Vokal için çıkış klasörünü belirtin:",
|
64 |
+
"指定输出文件夹": "Çıkış klasörünü belirt:",
|
65 |
+
"指定输出非主人声文件夹": "Müzik ve diğer sesler için çıkış klasörünü belirtin:",
|
66 |
+
"推理时间(ms):": "Çıkarsama süresi (ms):",
|
67 |
+
"推理音色": "Ses çıkartma (Inference):",
|
68 |
+
"提取": "Çıkart",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "Ses yüksekliği çıkartmak (Pitch) ve verileri işlemek için kullanılacak CPU işlemci sayısı:",
|
70 |
+
"是": "Evet",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Sadece en son '.ckpt' dosyasını kaydet:",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Her kaydetme noktasında son küçük bir modeli 'weights' klasörüne kaydetmek için:",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Tüm eğitim verilerini GPU belleğine önbelleğe alıp almayacağınızı belirtin. Küçük veri setlerini (10 dakikadan az) önbelleğe almak eğitimi hızlandırabilir, ancak büyük veri setlerini önbelleğe almak çok fazla GPU belleği tüketir ve çok fazla hız artışı sağlamaz:",
|
74 |
+
"显卡信息": "GPU Bilgisi",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Bu yazılım, MIT lisansı altında açık kaynaklıdır. Yazarın yazılım üzerinde herhangi bir kontrolü yoktur. Yazılımı kullanan ve yazılım tarafından dışa aktarılan sesleri dağıtan kullanıcılar sorumludur. <br>Eğer bu maddeyle aynı fikirde değilseniz, yazılım paketi içindeki herhangi bir kod veya dosyayı kullanamaz veya referans göremezsiniz. Detaylar için kök dizindeki <b>Agreement-LICENSE.txt</b> dosyasına bakınız.",
|
76 |
+
"查看": "Görüntüle",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Model bilgilerini görüntüle (sadece 'weights' klasöründen çıkarılan küçük model dosyaları desteklenir)",
|
78 |
+
"检索特征占比": "Arama özelliği oranı (vurgu gücünü kontrol eder, çok yüksek olması sanal etkilere neden olur)",
|
79 |
+
"模型": "Model",
|
80 |
+
"模型推理": "Model çıkartma (Inference)",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model çıkartma (büyük dosya modeli yolunu 'logs' klasöründe girin). Bu, eğitimi yarıda bırakmak istediğinizde ve manuel olarak küçük bir model dosyası çıkartmak ve kaydetmek istediğinizde veya bir ara modeli test etmek istediğinizde kullanışlıdır:",
|
82 |
+
"模型是否带音高指导": "Modelin ses yüksekliği rehberi içerip içermediği:",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Modelin ses yüksekliği (Pitch) rehberliği içerip içermediği (şarkı söyleme için şarttır, konuşma için isteğe bağlıdır):",
|
84 |
+
"模型是否带音高指导,1是0否": "Modelin ses yüksekliği rehberi içerip içermediği (1: evet, 0: hayır):",
|
85 |
+
"模型版本型号": "Model mimari versiyonu:",
|
86 |
+
"模型融合, 可用于测试音色融合": "Model birleştirme, ses rengi birleştirmesi için kullanılabilir",
|
87 |
+
"模型路径": "Model Yolu:",
|
88 |
+
"每张显卡的batch_size": "Her GPU için yığın boyutu (batch_size):",
|
89 |
+
"淡入淡出长度": "Geçiş (Fade) uzunluğu",
|
90 |
+
"版本": "Sürüm",
|
91 |
+
"特征提取": "Özellik çıkartma",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Özellik indeksi dosyasının yolunu belirtin. Seçilen sonucu kullanmak için boş bırakın veya açılır menüden seçim yapın.",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Erkekten kadına çevirmek için +12 tuş önerilir, kadından erkeğe çevirmek için ise -12 tuş önerilir. Eğer ses aralığı çok fazla genişler ve ses bozulursa, isteğe bağlı olarak uygun aralığa kendiniz de ayarlayabilirsiniz.",
|
94 |
+
"目标采样率": "Hedef örnekleme oranı:",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "İndeks yolunu otomatik olarak tespit et ve açılır menüden seçim yap.",
|
97 |
+
"融合": "Birleştir",
|
98 |
+
"要改的模型信息": "Düzenlenecek model bilgileri:",
|
99 |
+
"要置入的模型信息": "Eklemek için model bilgileri:",
|
100 |
+
"训练": "Eğitim",
|
101 |
+
"训练模型": "Modeli Eğit",
|
102 |
+
"训练特征索引": "Özellik Dizinini Eğit",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Eğitim tamamlandı. Eğitim günlüklerini konsolda veya deney klasörü altındaki train.log dosyasında kontrol edebilirsiniz.",
|
104 |
+
"请指定说话人id": "Lütfen konuşmacı/sanatçı no belirtin:",
|
105 |
+
"请选择index文件": "Lütfen .index dosyası seçin",
|
106 |
+
"请选择pth文件": "Lütfen .pth dosyası seçin",
|
107 |
+
"请选择说话人id": "Konuşmacı/Şarkıcı No seçin:",
|
108 |
+
"转换": "Dönüştür",
|
109 |
+
"输入实验名": "Deneysel adı girin:",
|
110 |
+
"输入待处理音频文件夹路径": "İşlenecek ses klasörünün yolunu girin:",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "İşlenecek ses klasörünün yolunu girin (dosya yöneticisinin adres çubuğundan kopyalayın):",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "İşlenecek ses dosyasının yolunu girin (varsayılan doğru format örneğidir):",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Sesin hacim zarfını ayarlayın. 0'a yakın değerler, sesin orijinal vokallerin hacmine benzer olmasını sağlar. Düşük bir değerle ses gürültüsünü maskeleyebilir ve hacmi daha doğal bir şekilde duyulabilir hale getirebilirsiniz. 1'e yaklaştıkça sürekli bir yüksek ses seviyesi elde edilir:",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "Eğitim klasörünün yolunu girin:",
|
116 |
+
"输入设备": "Giriş cihazı",
|
117 |
+
"输入降噪": "Giriş gürültü azaltma",
|
118 |
+
"输出信息": "Çıkış bilgisi",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "Çıkış cihazı",
|
121 |
+
"输出降噪": "Çıkış gürültü azaltma",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "Ses dosyasını dışa aktar (indirmek için sağ alt köşedeki üç noktaya tıklayın)",
|
123 |
+
"选择.index文件": ".index dosyası seç",
|
124 |
+
"选择.pth文件": ".pth dosyası seç",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Pitch algoritmasını seçin ('pm': daha hızlı çıkarır ancak daha düşük kaliteli konuşma; 'harvest': daha iyi konuşma sesi ancak son derece yavaş; 'crepe': daha da iyi kalite ancak GPU yoğunluğu gerektirir):",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "Örnekleme uzunluğu",
|
130 |
+
"重载设备列表": "Cihaz listesini yeniden yükle",
|
131 |
+
"音调设置": "Pitch ayarları",
|
132 |
+
"音频设备(请使用同种类驱动)": "Ses cihazı (aynı tür sürücüyü kullanın)",
|
133 |
+
"音高算法": "音高算法",
|
134 |
+
"额外推理时长": "Ekstra çıkartma süresi"
|
135 |
+
}
|
i18n/locale/zh_CN.json
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
3 |
+
"UVR5已开启": "UVR5已开启",
|
4 |
+
"UVR5已关闭": "UVR5已关闭",
|
5 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.",
|
6 |
+
"0-前置数据集获取工具": "0-前置数据集获取工具",
|
7 |
+
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",
|
8 |
+
"是否开启UVR5-WebUI": "是否开启UVR5-WebUI",
|
9 |
+
"UVR5进程输出信息": "UVR5进程输出信息",
|
10 |
+
"0b-语音切分工具": "0b-语音切分工具",
|
11 |
+
".list标注文件的路径": ".list标注文件的路径",
|
12 |
+
"GPT模型列表": "GPT模型列表",
|
13 |
+
"SoVITS模型列表": "SoVITS模型列表",
|
14 |
+
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。",
|
15 |
+
"音频自动切分输入路径,可文件可文件夹": "音频自动切分输入路径,可文件可文件夹",
|
16 |
+
"切分后的子音频的输出根目录": "切分后的子音频的输出根目录",
|
17 |
+
"怎么切": "怎么切",
|
18 |
+
"不切": "不切",
|
19 |
+
"凑四句一切": "凑四句一切",
|
20 |
+
"按英文句号.切": "按英文句号.切",
|
21 |
+
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小于这个值视作静音的备选切割点",
|
22 |
+
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值",
|
23 |
+
"min_interval:最短切割间隔": "min_interval:最短切割间隔",
|
24 |
+
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)",
|
25 |
+
"max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完后静音最多留多长",
|
26 |
+
"开启语音切割": "开启语音切割",
|
27 |
+
"终止语音切割": "终止语音切割",
|
28 |
+
"max:归一化后最大值多少": "max:归一化后最大值多少",
|
29 |
+
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例归一化后音频进来",
|
30 |
+
"切割使用的进程数": "切割使用的进程数",
|
31 |
+
"语音切割进程输出信息": "语音切割进程输出信息",
|
32 |
+
"0c-中文批量离线ASR工具": "0c-中文批量离线ASR工具",
|
33 |
+
"开启离线批量ASR": "开启离线批量ASR",
|
34 |
+
"终止ASR进程": "终止ASR进程",
|
35 |
+
"批量ASR(中文only)输入文件夹路径": "批量ASR(中文only)输入文件夹路径",
|
36 |
+
"ASR进程输出信息": "ASR进程输出信息",
|
37 |
+
"0d-语音文本校对标注工具": "0d-语音文本校对标注工具",
|
38 |
+
"是否开启打标WebUI": "是否开启打标WebUI",
|
39 |
+
"打标数据标注文件路径": "打标数据标注文件路径",
|
40 |
+
"打标工具进程输出信息": "打标工具进程输出信息",
|
41 |
+
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
|
42 |
+
"*实验/模型名": "*实验/模型名",
|
43 |
+
"显卡信息": "显卡信息",
|
44 |
+
"预训练的SoVITS-G模型路径": "预训练的SoVITS-G模型路径",
|
45 |
+
"预训练的SoVITS-D模型路径": "预训练的SoVITS-D模型路径",
|
46 |
+
"预训练的GPT模型路径": "预训练的GPT模型路径",
|
47 |
+
"1A-训练集格式化工具": "1A-训练集格式化工具",
|
48 |
+
"输出logs/实验名目录下应有23456开头的文件和文件夹": "输出logs/实验名目录下应有23456开头的文件和文件夹",
|
49 |
+
"*文本标注文件": "*文本标注文件",
|
50 |
+
"*训练集音频文件目录": "*训练集音频文件目录",
|
51 |
+
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "训练集音频文件目录 拼接 list文件里波形对应的文件名。",
|
52 |
+
"1Aa-文本内容": "1Aa-文本内容",
|
53 |
+
"GPU卡号以-分割,每个卡号一个进程": "GPU卡号以-分割,每个卡号一个进程",
|
54 |
+
"预训练的中文BERT模型路径": "预训练的中文BERT模型路径",
|
55 |
+
"开启文本获取": "开启文本获取",
|
56 |
+
"终止文本获取进程": "终止文本获取进程",
|
57 |
+
"文本进程输出信息": "文本进程输出信息",
|
58 |
+
"1Ab-SSL自监督特征提取": "1Ab-SSL自监督特征提取",
|
59 |
+
"预训练的SSL模型路径": "预训练的SSL模型路径",
|
60 |
+
"开启SSL提取": "开启SSL提取",
|
61 |
+
"终止SSL提取进程": "终止SSL提取进程",
|
62 |
+
"SSL进程输出信息": "SSL进程输出信息",
|
63 |
+
"1Ac-语义token提取": "1Ac-���义token提取",
|
64 |
+
"开启语义token提取": "开启语义token提取",
|
65 |
+
"终止语义token提取进程": "终止语义token提取进程",
|
66 |
+
"语义token提取进程输出信息": "语义token提取进程输出信息",
|
67 |
+
"1Aabc-训练集格式化一键三连": "1Aabc-训练集格式化一键三连",
|
68 |
+
"开启一键三连": "开启一键三连",
|
69 |
+
"终止一键三连": "终止一键三连",
|
70 |
+
"一键三连进程输出信息": "一键三连进程输出信息",
|
71 |
+
"1B-微调训练": "1B-微调训练",
|
72 |
+
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。",
|
73 |
+
"每张显卡的batch_size": "每张显卡的batch_size",
|
74 |
+
"总训练轮数total_epoch,不建议太高": "总训练轮数total_epoch,不建议太高",
|
75 |
+
"文本模块学习率权重": "文本模块学习率权重",
|
76 |
+
"保存频率save_every_epoch": "保存频率save_every_epoch",
|
77 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否仅保存最新的ckpt文件以节省硬盘空间",
|
78 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹",
|
79 |
+
"开启SoVITS训练": "开启SoVITS训练",
|
80 |
+
"终止SoVITS训练": "终止SoVITS训练",
|
81 |
+
"SoVITS训练进程输出信息": "SoVITS训练进程输出信息",
|
82 |
+
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。",
|
83 |
+
"总训练轮数total_epoch": "总训练轮数total_epoch",
|
84 |
+
"开启GPT训练": "开启GPT训练",
|
85 |
+
"终止GPT训练": "终止GPT训练",
|
86 |
+
"GPT训练进程输出信息": "GPT训练进程输出信息",
|
87 |
+
"1C-推理": "1C-推理",
|
88 |
+
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。",
|
89 |
+
"*GPT模型列表": "*GPT模型列表",
|
90 |
+
"*SoVITS模型列表": "*SoVITS模型列表",
|
91 |
+
"GPU卡号,只能填1个整数": "GPU卡号,只能填1个整数",
|
92 |
+
"刷新模型路径": "刷新模型路径",
|
93 |
+
"是否开启TTS推理WebUI": "是否开启TTS推理WebUI",
|
94 |
+
"TTS推理WebUI进程输出信息": "TTS推理WebUI进程输出信息",
|
95 |
+
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-变声",
|
96 |
+
"施工中,请静候佳音": "施工中,请静候佳音",
|
97 |
+
"参考音频在3~10秒范围外,请更换!": "参考音频在3~10秒范围外,请更换!",
|
98 |
+
"请上传3~10秒内参考音频,超过会报错!": "请上传3~10秒内参考音频,超过会报错!",
|
99 |
+
"TTS推理进程已开启": "TTS推理进程已开启",
|
100 |
+
"TTS推理进程已关闭": "TTS推理进程已关闭",
|
101 |
+
"打标工具WebUI已开启": "打标工具WebUI已开启",
|
102 |
+
"打标工具WebUI已关闭": "打标工具WebUI已关闭",
|
103 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.",
|
104 |
+
"*请上传并填写参考信息": "*请上传并填写参考信息",
|
105 |
+
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。",
|
106 |
+
"ASR任务开启:%s": "ASR任务开启:%s",
|
107 |
+
"GPT训练完成": "GPT训练完成",
|
108 |
+
"GPT训练开始:%s": "GPT训练开始:%s",
|
109 |
+
"SSL提取进程执行中": "SSL提取进程执行中",
|
110 |
+
"SSL提取进程结束": "SSL提取进程结束",
|
111 |
+
"SoVITS训练完成": "SoVITS训练完成",
|
112 |
+
"SoVITS训练开始:%s": "SoVITS训练开始:%s",
|
113 |
+
"一键三连中途报错": "一键三连中途报错",
|
114 |
+
"一键三连进程结束": "一键三连进程结束",
|
115 |
+
"中文": "中文",
|
116 |
+
"凑50字一切": "凑50字一切",
|
117 |
+
"凑五句一切": "凑五句一切",
|
118 |
+
"切分后文本": "切分后文本",
|
119 |
+
"切割执行中": "切割执行中",
|
120 |
+
"切割结束": "切割结束",
|
121 |
+
"参考音频的文本": "参考音频的文本",
|
122 |
+
"参考音频的语种": "参考音频的语种",
|
123 |
+
"合成语音": "合成语音",
|
124 |
+
"后续将支持混合语种编码文本输入。": "后续将支持混合语种编码文本输入。",
|
125 |
+
"已有正在进行的ASR任务,需先终止才能开启下一次任务": "已有正在进行的ASR任务,需先终止才能开启下一次任务",
|
126 |
+
"已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",
|
127 |
+
"已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "已有正在进行的SSL提取任务,需先终止才能开启下一次任务",
|
128 |
+
"已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",
|
129 |
+
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "已有正在进行的一键三连任务,需先终止才能开启下一次任务",
|
130 |
+
"已有正在进行的切割任务,需先终止才能开启下一次任务": "已有正在进行的切割任务,需先终止才能开启下一次任务",
|
131 |
+
"已有正在进行的文本任务,需先终止才能开启下一次任务": "已有正在进行的文本任务,需先终止才能开启下一次任务",
|
132 |
+
"已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "已有正在进行的语义token提取任务,需先终止才能开启下一次任务",
|
133 |
+
"已终止ASR进程": "已终止ASR进程",
|
134 |
+
"已终止GPT训练": "已终止GPT训练",
|
135 |
+
"已终止SoVITS训练": "已终止SoVITS训练",
|
136 |
+
"已终止所有1a进程": "已终止所有1a进程",
|
137 |
+
"已终止所有1b进程": "已终止所有1b进程",
|
138 |
+
"已终止所有一键三连进程": "已终止所有一键三连进程",
|
139 |
+
"已终止所有切割进程": "已终止所有切割进程",
|
140 |
+
"已终止所有语义token进程": "已终止所有语义token进程",
|
141 |
+
"按中文句号。切": "按中文句号。切",
|
142 |
+
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。",
|
143 |
+
"文本进程执行中": "文本进程执行中",
|
144 |
+
"文本进程结束": "文本进程结束",
|
145 |
+
"日文": "日文",
|
146 |
+
"英文": "英文",
|
147 |
+
"语义token提取进程执行中": "语义token提取进程执行中",
|
148 |
+
"语义token提取进程结束": "语义token提取进程结束",
|
149 |
+
"请上传参考音频": "请上传参考音频",
|
150 |
+
"输入路径不存在": "输入路径不存在",
|
151 |
+
"输入路径存在但既不是文件也不是文件夹": "输入路径存在但既不是文件也不是文件夹",
|
152 |
+
"输出的语音": "输出的语音",
|
153 |
+
"进度:1a-done": "进度:1a-done",
|
154 |
+
"进度:1a-done, 1b-ing": "进度:1a-done, 1b-ing",
|
155 |
+
"进度:1a-ing": "进度:1a-ing",
|
156 |
+
"进度:1a1b-done": "进度:1a1b-done",
|
157 |
+
"进度:1a1b-done, 1cing": "进度:1a1b-done, 1cing",
|
158 |
+
"进度:all-done": "进度:all-done",
|
159 |
+
"需要合成的切分前文本": "需要合成的切分前文本",
|
160 |
+
"需要合成的文本": "需要合成的文本",
|
161 |
+
"需要合成的语种": "需要合成的语种",
|
162 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音",
|
163 |
+
"A模型权重": "A模型权重",
|
164 |
+
"A模型路径": "A模型路径",
|
165 |
+
"B模型路径": "B模型路径",
|
166 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
167 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调",
|
168 |
+
"Index Rate": "Index Rate",
|
169 |
+
"Onnx导出": "Onnx导出",
|
170 |
+
"Onnx输出路径": "Onnx输出路径",
|
171 |
+
"RVC模型路径": "RVC模型路径",
|
172 |
+
"ckpt处理": "ckpt处理",
|
173 |
+
"harvest进程数": "harvest进程数",
|
174 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
175 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
176 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程",
|
177 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ",
|
178 |
+
"step1:正在处理数据": "step1:正在处理数据",
|
179 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
180 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ",
|
181 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)",
|
182 |
+
"step3: 填写训练设置, 开始训练模型和索引": "step3: 填写训练设置, 开始训练模型和索引",
|
183 |
+
"step3a:正在训练模型": "step3a:正在训练模型",
|
184 |
+
"一键训练": "一键训练",
|
185 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
186 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。",
|
187 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2",
|
188 |
+
"伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声",
|
189 |
+
"使用模型采样率": "使用模型采样率",
|
190 |
+
"使用设备采样率": "使用设备采样率",
|
191 |
+
"保存名": "保存名",
|
192 |
+
"保存的文件名, 默认空为和源文件同名": "保存的文件名, 默认空为和源文件同名",
|
193 |
+
"保存的模型名不带后缀": "保存的模型名不带后缀",
|
194 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果",
|
195 |
+
"修改": "修改",
|
196 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型信息(仅支持weights文件夹下提取的小模型文件)",
|
197 |
+
"停止音频转换": "停止音频转换",
|
198 |
+
"全流程结束!": "全流程结束!",
|
199 |
+
"刷新音色列表和索引路径": "刷新音色列表和索引路径",
|
200 |
+
"加载模型": "加载模型",
|
201 |
+
"加载预训练底模D路径": "加载预训练底模D路径",
|
202 |
+
"加载预训练底模G路径": "加载预训练底模G路径",
|
203 |
+
"单次推理": "单次推理",
|
204 |
+
"卸载音色省显存": "卸载音色省显存",
|
205 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)",
|
206 |
+
"后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样",
|
207 |
+
"否": "否",
|
208 |
+
"启用相位声码器": "启用相位声码器",
|
209 |
+
"响应阈值": "响应阈值",
|
210 |
+
"响度因子": "响度因子",
|
211 |
+
"处理数据": "处理数据",
|
212 |
+
"导出Onnx模型": "导出Onnx模型",
|
213 |
+
"导出文件格式": "导出文件格式",
|
214 |
+
"常见问题解答": "常见问题解答",
|
215 |
+
"常规设置": "常规设置",
|
216 |
+
"开始音频转换": "开始音频转换",
|
217 |
+
"性能设置": "性能设置",
|
218 |
+
"批量推理": "批量推理",
|
219 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ",
|
220 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
221 |
+
"指定输出文件夹": "指定输出文件夹",
|
222 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
223 |
+
"推理时间(ms):": "推理时间(ms):",
|
224 |
+
"推理音色": "推理音色",
|
225 |
+
"提取": "提取",
|
226 |
+
"提取音高和处理数据使用的CPU进程数": "提取音高和处理数据使用的CPU进程数",
|
227 |
+
"是": "是",
|
228 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速",
|
229 |
+
"查看": "查看",
|
230 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型信息(仅支持weights文件夹下提取的小模型文件)",
|
231 |
+
"检索特征占比": "检索特征占比",
|
232 |
+
"模型": "模型",
|
233 |
+
"模型推理": "模型推理",
|
234 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况",
|
235 |
+
"模型是否带音高指导": "模型是否带音高指导",
|
236 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否带音高指导(唱歌一定要, 语音可以不要)",
|
237 |
+
"模型是否带音高指导,1是0否": "模型是否带音高指导,1是0否",
|
238 |
+
"模型版本型号": "模型版本型号",
|
239 |
+
"模型融合, 可用于测试音色融合": "模型融合, 可用于测试音色融合",
|
240 |
+
"模型路径": "模型路径",
|
241 |
+
"淡入淡出长度": "淡入淡出长度",
|
242 |
+
"版本": "版本",
|
243 |
+
"特征提取": "特征提取",
|
244 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "特征检索库文件路径,为空则使用下拉的选择结果",
|
245 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ",
|
246 |
+
"目标采样率": "目标采样率",
|
247 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
248 |
+
"自动检测index路径,下拉式选择(dropdown)": "自动检测index路径,下拉式选择(dropdown)",
|
249 |
+
"融合": "融合",
|
250 |
+
"要改的模型信息": "要改的模型信息",
|
251 |
+
"要置入的模型信息": "要置入的模型信息",
|
252 |
+
"训练": "训练",
|
253 |
+
"训练模型": "训练模型",
|
254 |
+
"训练特征索引": "训练特征索引",
|
255 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log",
|
256 |
+
"请指定说话人id": "请指定说话人id",
|
257 |
+
"请选择index文件": "请选择index文件",
|
258 |
+
"请选择pth文件": "请选择pth文件",
|
259 |
+
"请选择说话人id": "请选择说话人id",
|
260 |
+
"转换": "转换",
|
261 |
+
"输入实验名": "输入实验名",
|
262 |
+
"输入待处理音频文件夹路径": "输入待处理音频文件夹路径",
|
263 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)",
|
264 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "输入待处理音频文件路径(默认是正确格式示例)",
|
265 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络",
|
266 |
+
"输入监听": "输入监听",
|
267 |
+
"输入训练文件夹路径": "输入训练文件夹路径",
|
268 |
+
"输入设备": "输入设备",
|
269 |
+
"输入降噪": "输入降噪",
|
270 |
+
"输出信息": "输出信息",
|
271 |
+
"输出变声": "输出变声",
|
272 |
+
"输出设备": "输出设备",
|
273 |
+
"输出降噪": "输出降噪",
|
274 |
+
"输出音频(右下角三个点,点了可以下载)": "输出音频(右下角三个点,点了可以下载)",
|
275 |
+
"选择.index文件": "选择.index文件",
|
276 |
+
"选择.pth文件": "选择.pth文件",
|
277 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
278 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
279 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
280 |
+
"采样率:": "采样率:",
|
281 |
+
"采样长度": "采样长度",
|
282 |
+
"重载设备列表": "重��设备列表",
|
283 |
+
"音调设置": "音调设置",
|
284 |
+
"音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)",
|
285 |
+
"音高算法": "音高算法",
|
286 |
+
"额外推理时长": "额外推理时长"
|
287 |
+
}
|
i18n/locale/zh_HK.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音",
|
3 |
+
"A模型权重": "A模型權重",
|
4 |
+
"A模型路径": "A模型路徑",
|
5 |
+
"B模型路径": "B模型路徑",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
8 |
+
"Index Rate": "Index Rate",
|
9 |
+
"Onnx导出": "Onnx导出",
|
10 |
+
"Onnx输出路径": "Onnx输出路径",
|
11 |
+
"RVC模型路径": "RVC模型路径",
|
12 |
+
"ckpt处理": "ckpt處理",
|
13 |
+
"harvest进程数": "harvest進程數",
|
14 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
15 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡號配置:以-分隔輸入使用的不同進程卡號,例如0-0-1使用在卡0上跑2個進程並在卡1上跑1個進程",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。",
|
18 |
+
"step1:正在处理数据": "step1:正在处理数据",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引",
|
23 |
+
"step3a:正在训练模型": "step3a:正在训练模型",
|
24 |
+
"一键训练": "一鍵訓練",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-���隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
28 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "儲存名",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
33 |
+
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
34 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果",
|
36 |
+
"修改": "修改",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
38 |
+
"停止音频转换": "停止音訊轉換",
|
39 |
+
"全流程结束!": "全流程结束!",
|
40 |
+
"刷新音色列表和索引路径": "刷新音色列表和索引路徑",
|
41 |
+
"加载模型": "載入模型",
|
42 |
+
"加载预训练底模D路径": "加載預訓練底模D路徑",
|
43 |
+
"加载预训练底模G路径": "加載預訓練底模G路徑",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "卸載音色節省 VRAM",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
48 |
+
"否": "否",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "響應閾值",
|
51 |
+
"响度因子": "響度因子",
|
52 |
+
"处理数据": "處理資料",
|
53 |
+
"导出Onnx模型": "导出Onnx模型",
|
54 |
+
"导出文件格式": "導出檔格式",
|
55 |
+
"常见问题解答": "常見問題解答",
|
56 |
+
"常规设置": "一般設定",
|
57 |
+
"开始音频转换": "開始音訊轉換",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
59 |
+
"性能设置": "效能設定",
|
60 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。",
|
63 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
64 |
+
"指定输出文件夹": "指定輸出資料夾",
|
65 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
66 |
+
"推理时间(ms):": "推理時間(ms):",
|
67 |
+
"推理音色": "推理音色",
|
68 |
+
"提取": "提取",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數",
|
70 |
+
"是": "是",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度",
|
74 |
+
"显卡信息": "顯示卡資訊",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。<br>如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄<b>使用需遵守的協議-LICENSE.txt</b>。",
|
76 |
+
"查看": "查看",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
78 |
+
"检索特征占比": "檢索特徵佔比",
|
79 |
+
"模型": "模型",
|
80 |
+
"模型推理": "模型推理",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況",
|
82 |
+
"模型是否带音高指导": "模型是否帶音高指導",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可���不要)",
|
84 |
+
"模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否",
|
85 |
+
"模型版本型号": "模型版本型號",
|
86 |
+
"模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合",
|
87 |
+
"模型路径": "模型路徑",
|
88 |
+
"每张显卡的batch_size": "每张显卡的batch_size",
|
89 |
+
"淡入淡出长度": "淡入淡出長度",
|
90 |
+
"版本": "版本",
|
91 |
+
"特征提取": "特徵提取",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。",
|
94 |
+
"目标采样率": "目標取樣率",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)",
|
97 |
+
"融合": "融合",
|
98 |
+
"要改的模型信息": "要改的模型資訊",
|
99 |
+
"要置入的模型信息": "要置入的模型資訊",
|
100 |
+
"训练": "訓練",
|
101 |
+
"训练模型": "訓練模型",
|
102 |
+
"训练特征索引": "訓練特徵索引",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log",
|
104 |
+
"请指定说话人id": "請指定說話人id",
|
105 |
+
"请选择index文件": "请选择index文件",
|
106 |
+
"请选择pth文件": "请选择pth文件",
|
107 |
+
"请选择说话人id": "請選擇說話人ID",
|
108 |
+
"转换": "轉換",
|
109 |
+
"输入实验名": "輸入實驗名稱",
|
110 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "輸入訓練檔案夾路徑",
|
116 |
+
"输入设备": "輸入設備",
|
117 |
+
"输入降噪": "輸入降噪",
|
118 |
+
"输出信息": "輸出訊息",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "輸出設備",
|
121 |
+
"输出降噪": "輸出降噪",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)",
|
123 |
+
"选择.index文件": "選擇 .index 檔案",
|
124 |
+
"选择.pth文件": "選擇 .pth 檔案",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "取樣長度",
|
130 |
+
"重载设备列表": "重載設備列表",
|
131 |
+
"音调设置": "音調設定",
|
132 |
+
"音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)",
|
133 |
+
"音高算法": "音高演算法",
|
134 |
+
"额外推理时长": "額外推理時長"
|
135 |
+
}
|
i18n/locale/zh_SG.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音",
|
3 |
+
"A模型权重": "A模型權重",
|
4 |
+
"A模型路径": "A模型路徑",
|
5 |
+
"B模型路径": "B模型路徑",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
8 |
+
"Index Rate": "Index Rate",
|
9 |
+
"Onnx导出": "Onnx导出",
|
10 |
+
"Onnx输出路径": "Onnx输出路径",
|
11 |
+
"RVC模型路径": "RVC模型路径",
|
12 |
+
"ckpt处理": "ckpt處理",
|
13 |
+
"harvest进程数": "harvest進程數",
|
14 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
15 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡號配置:以-分隔輸入使用的不同進程卡號,例如0-0-1使用在卡0上跑2個進程並在卡1上跑1個進程",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。",
|
18 |
+
"step1:正在处理数据": "step1:正在处理数据",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引",
|
23 |
+
"step3a:正在训练模型": "step3a:正在训练模型",
|
24 |
+
"一键训练": "一鍵訓練",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-���隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
28 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "儲存名",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
33 |
+
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
34 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果",
|
36 |
+
"修改": "修改",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
38 |
+
"停止音频转换": "停止音訊轉換",
|
39 |
+
"全流程结束!": "全流程结束!",
|
40 |
+
"刷新音色列表和索引路径": "刷新音色列表和索引路徑",
|
41 |
+
"加载模型": "載入模型",
|
42 |
+
"加载预训练底模D路径": "加載預訓練底模D路徑",
|
43 |
+
"加载预训练底模G路径": "加載預訓練底模G路徑",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "卸載音色節省 VRAM",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
48 |
+
"否": "否",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "響應閾值",
|
51 |
+
"响度因子": "響度因子",
|
52 |
+
"处理数据": "處理資料",
|
53 |
+
"导出Onnx模型": "导出Onnx模型",
|
54 |
+
"导出文件格式": "導出檔格式",
|
55 |
+
"常见问题解答": "常見問題解答",
|
56 |
+
"常规设置": "一般設定",
|
57 |
+
"开始音频转换": "開始音訊轉換",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
59 |
+
"性能设置": "效能設定",
|
60 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。",
|
63 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
64 |
+
"指定输出文件夹": "指定輸出資料夾",
|
65 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
66 |
+
"推理时间(ms):": "推理時間(ms):",
|
67 |
+
"推理音色": "推理音色",
|
68 |
+
"提取": "提取",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數",
|
70 |
+
"是": "是",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度",
|
74 |
+
"显卡信息": "顯示卡資訊",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。<br>如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄<b>使用需遵守的協議-LICENSE.txt</b>。",
|
76 |
+
"查看": "查看",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
78 |
+
"检索特征占比": "檢索特徵佔比",
|
79 |
+
"模型": "模型",
|
80 |
+
"模型推理": "模型推理",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況",
|
82 |
+
"模型是否带音高指导": "模型是否帶音高指導",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可���不要)",
|
84 |
+
"模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否",
|
85 |
+
"模型版本型号": "模型版本型號",
|
86 |
+
"模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合",
|
87 |
+
"模型路径": "模型路徑",
|
88 |
+
"每张显卡的batch_size": "每张显卡的batch_size",
|
89 |
+
"淡入淡出长度": "淡入淡出長度",
|
90 |
+
"版本": "版本",
|
91 |
+
"特征提取": "特徵提取",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。",
|
94 |
+
"目标采样率": "目標取樣率",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)",
|
97 |
+
"融合": "融合",
|
98 |
+
"要改的模型信息": "要改的模型資訊",
|
99 |
+
"要置入的模型信息": "要置入的模型資訊",
|
100 |
+
"训练": "訓練",
|
101 |
+
"训练模型": "訓練模型",
|
102 |
+
"训练特征索引": "訓練特徵索引",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log",
|
104 |
+
"请指定说话人id": "請指定說話人id",
|
105 |
+
"请选择index文件": "请选择index文件",
|
106 |
+
"请选择pth文件": "请选择pth文件",
|
107 |
+
"请选择说话人id": "請選擇說話人ID",
|
108 |
+
"转换": "轉換",
|
109 |
+
"输入实验名": "輸入實驗名稱",
|
110 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "輸入訓練檔案夾路徑",
|
116 |
+
"输入设备": "輸入設備",
|
117 |
+
"输入降噪": "輸入降噪",
|
118 |
+
"输出信息": "輸出訊息",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "輸出設備",
|
121 |
+
"输出降噪": "輸出降噪",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)",
|
123 |
+
"选择.index文件": "選擇 .index 檔案",
|
124 |
+
"选择.pth文件": "選擇 .pth 檔案",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "取樣長度",
|
130 |
+
"重载设备列表": "重載設備列表",
|
131 |
+
"音调设置": "音調設定",
|
132 |
+
"音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)",
|
133 |
+
"音高算法": "音高演算法",
|
134 |
+
"额外推理时长": "額外推理時長"
|
135 |
+
}
|
i18n/locale/zh_TW.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音",
|
3 |
+
"A模型权重": "A模型權重",
|
4 |
+
"A模型路径": "A模型路徑",
|
5 |
+
"B模型路径": "B模型路徑",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
8 |
+
"Index Rate": "Index Rate",
|
9 |
+
"Onnx导出": "Onnx导出",
|
10 |
+
"Onnx输出路径": "Onnx输出路径",
|
11 |
+
"RVC模型路径": "RVC模型路径",
|
12 |
+
"ckpt处理": "ckpt處理",
|
13 |
+
"harvest进程数": "harvest進程數",
|
14 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
15 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡號配置:以-分隔輸入使用的不同進程卡號,例如0-0-1使用在卡0上跑2個進程並在卡1上跑1個進程",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。",
|
18 |
+
"step1:正在处理数据": "step1:正在处理数据",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引",
|
23 |
+
"step3a:正在训练模型": "step3a:正在训练模型",
|
24 |
+
"一键训练": "一鍵訓練",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-���隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
28 |
+
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "儲存名",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
33 |
+
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
34 |
+
"保存频率save_every_epoch": "保存頻率save_every_epoch",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果",
|
36 |
+
"修改": "修改",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
38 |
+
"停止音频转换": "停止音訊轉換",
|
39 |
+
"全流程结束!": "全流程结束!",
|
40 |
+
"刷新音色列表和索引路径": "刷新音色列表和索引路徑",
|
41 |
+
"加载模型": "載入模型",
|
42 |
+
"加载预训练底模D路径": "加載預訓練底模D路徑",
|
43 |
+
"加载预训练底模G路径": "加載預訓練底模G路徑",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "卸載音色節省 VRAM",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
48 |
+
"否": "否",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "響應閾值",
|
51 |
+
"响度因子": "響度因子",
|
52 |
+
"处理数据": "處理資料",
|
53 |
+
"导出Onnx模型": "导出Onnx模型",
|
54 |
+
"导出文件格式": "導出檔格式",
|
55 |
+
"常见问题解答": "常見問題解答",
|
56 |
+
"常规设置": "一般設定",
|
57 |
+
"开始音频转换": "開始音訊轉換",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
59 |
+
"性能设置": "效能設定",
|
60 |
+
"总训练轮数total_epoch": "總訓練輪數total_epoch",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。",
|
63 |
+
"指定输出主人声文件夹": "指定输出主人声文件夹",
|
64 |
+
"指定输出文件夹": "指定輸出資料夾",
|
65 |
+
"指定输出非主人声文件夹": "指定输出非主人声文件夹",
|
66 |
+
"推理时间(ms):": "推理時間(ms):",
|
67 |
+
"推理音色": "推理音色",
|
68 |
+
"提取": "提取",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數",
|
70 |
+
"是": "是",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度",
|
74 |
+
"显卡信息": "顯示卡資訊",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。<br>如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄<b>使用需遵守的協議-LICENSE.txt</b>。",
|
76 |
+
"查看": "查看",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)",
|
78 |
+
"检索特征占比": "檢索特徵佔比",
|
79 |
+
"模型": "模型",
|
80 |
+
"模型推理": "模型推理",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況",
|
82 |
+
"模型是否带音高指导": "模型是否帶音高指導",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可���不要)",
|
84 |
+
"模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否",
|
85 |
+
"模型版本型号": "模型版本型號",
|
86 |
+
"模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合",
|
87 |
+
"模型路径": "模型路徑",
|
88 |
+
"每张显卡的batch_size": "每张显卡的batch_size",
|
89 |
+
"淡入淡出长度": "淡入淡出長度",
|
90 |
+
"版本": "版本",
|
91 |
+
"特征提取": "特徵提取",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。",
|
94 |
+
"目标采样率": "目標取樣率",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)",
|
97 |
+
"融合": "融合",
|
98 |
+
"要改的模型信息": "要改的模型資訊",
|
99 |
+
"要置入的模型信息": "要置入的模型資訊",
|
100 |
+
"训练": "訓練",
|
101 |
+
"训练模型": "訓練模型",
|
102 |
+
"训练特征索引": "訓練特徵索引",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log",
|
104 |
+
"请指定说话人id": "請指定說話人id",
|
105 |
+
"请选择index文件": "请选择index文件",
|
106 |
+
"请选择pth文件": "请选择pth文件",
|
107 |
+
"请选择说话人id": "請選擇說話人ID",
|
108 |
+
"转换": "轉換",
|
109 |
+
"输入实验名": "輸入實驗名稱",
|
110 |
+
"输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "輸入訓練檔案夾路徑",
|
116 |
+
"输入设备": "輸入設備",
|
117 |
+
"输入降噪": "輸入降噪",
|
118 |
+
"输出信息": "輸出訊息",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "輸出設備",
|
121 |
+
"输出降噪": "輸出降噪",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)",
|
123 |
+
"选择.index文件": "選擇 .index 檔案",
|
124 |
+
"选择.pth文件": "選擇 .pth 檔案",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "取樣長度",
|
130 |
+
"重载设备列表": "重載設備列表",
|
131 |
+
"音调设置": "音調設定",
|
132 |
+
"音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)",
|
133 |
+
"音高算法": "音高演算法",
|
134 |
+
"额外推理时长": "額外推理時長"
|
135 |
+
}
|
old/main.py
ADDED
@@ -0,0 +1,659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
import gradio as gr
|
5 |
+
#import torch
|
6 |
+
|
7 |
+
from functions.core_functions1 import clear_gpu_cache, load_model, run_tts, load_params_tts, process_srt_and_generate_audio, convert_voice
|
8 |
+
# preprocess_dataset, load_params, train_model, optimize_model,
|
9 |
+
from functions.logging_utils import remove_log_file, read_logs
|
10 |
+
from functions.slice_utils import open_slice, close_slice, kill_process
|
11 |
+
from utils.formatter import format_audio_list
|
12 |
+
from utils.gpt_train import train_gpt
|
13 |
+
import traceback
|
14 |
+
import shutil
|
15 |
+
|
16 |
+
from tools.i18n.i18n import I18nAuto
|
17 |
+
from tools import my_utils
|
18 |
+
from multiprocessing import cpu_count
|
19 |
+
from subprocess import Popen
|
20 |
+
from config import python_exec, is_share, webui_port_main
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
# 清除旧的日志文件
|
24 |
+
remove_log_file("logs/main.log")
|
25 |
+
|
26 |
+
parser = argparse.ArgumentParser(
|
27 |
+
description="""XTTS fine-tuning demo\n\n"""
|
28 |
+
"""
|
29 |
+
Example runs:
|
30 |
+
python3 TTS/demos/xtts_ft_demo/xtts_demo.py --port
|
31 |
+
""",
|
32 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
"--port",
|
36 |
+
type=int,
|
37 |
+
help="Port to run the gradio demo. Default: 5003",
|
38 |
+
default=5003,
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"--out_path",
|
42 |
+
type=str,
|
43 |
+
help="Output path (where data and checkpoints will be saved) Default: output/",
|
44 |
+
default=str(Path.cwd() / "finetune_models"),
|
45 |
+
)
|
46 |
+
|
47 |
+
parser.add_argument(
|
48 |
+
"--num_epochs",
|
49 |
+
type=int,
|
50 |
+
help="Number of epochs to train. Default: 6",
|
51 |
+
default=6,
|
52 |
+
)
|
53 |
+
parser.add_argument(
|
54 |
+
"--batch_size",
|
55 |
+
type=int,
|
56 |
+
help="Batch size. Default: 2",
|
57 |
+
default=2,
|
58 |
+
)
|
59 |
+
parser.add_argument(
|
60 |
+
"--grad_acumm",
|
61 |
+
type=int,
|
62 |
+
help="Grad accumulation steps. Default: 1",
|
63 |
+
default=1,
|
64 |
+
)
|
65 |
+
parser.add_argument(
|
66 |
+
"--max_audio_length",
|
67 |
+
type=int,
|
68 |
+
help="Max permitted audio size in seconds. Default: 11",
|
69 |
+
default=11,
|
70 |
+
)
|
71 |
+
|
72 |
+
args = parser.parse_args()
|
73 |
+
i18n = I18nAuto()
|
74 |
+
n_cpu=cpu_count()
|
75 |
+
'''
|
76 |
+
ngpu = torch.cuda.device_count()
|
77 |
+
gpu_infos = []
|
78 |
+
mem = []
|
79 |
+
if_gpu_ok = False
|
80 |
+
'''
|
81 |
+
|
82 |
+
with gr.Blocks() as demo:
|
83 |
+
with gr.Tab("0 - Audio Slicing"):
|
84 |
+
gr.Markdown(value=i18n("0b-语音切分工具"))
|
85 |
+
with gr.Row():
|
86 |
+
slice_inp_path = gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"), value="")
|
87 |
+
slice_opt_root = gr.Textbox(label=i18n("切分后的子音频的输出根目录"), value="output/slicer_opt")
|
88 |
+
threshold = gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"), value="-34")
|
89 |
+
min_length = gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"), value="4000")
|
90 |
+
min_interval = gr.Textbox(label=i18n("min_interval:最短切割间隔"), value="300")
|
91 |
+
hop_size = gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"), value="10")
|
92 |
+
max_sil_kept = gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"), value="500")
|
93 |
+
with gr.Row():
|
94 |
+
open_slicer_button = gr.Button(i18n("开启语音切割"), variant="primary", visible=True)
|
95 |
+
close_slicer_button = gr.Button(i18n("终止语音切割"), variant="primary", visible=False)
|
96 |
+
_max = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("max:归一化后最大值多少"), value=0.9, interactive=True)
|
97 |
+
alpha = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("alpha_mix:混多少比例归一化后音频进来"), value=0.25, interactive=True)
|
98 |
+
n_process = gr.Slider(minimum=1, maximum=n_cpu, step=1, label=i18n("切割使用的进程数"), value=4, interactive=True)
|
99 |
+
slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
|
100 |
+
|
101 |
+
open_slicer_button.click(open_slice, [slice_inp_path, slice_opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, n_process], [slicer_info, open_slicer_button, close_slicer_button])
|
102 |
+
close_slicer_button.click(close_slice, [], [slicer_info, open_slicer_button, close_slicer_button])
|
103 |
+
|
104 |
+
|
105 |
+
with gr.Tab("1 - Data processing"):
|
106 |
+
out_path = gr.Textbox(label="Output path (where data and checkpoints will be saved):", value=args.out_path)
|
107 |
+
upload_file = gr.File(file_count="multiple", label="Select here the audio files that you want to use for XTTS trainining (Supported formats: wav, mp3, and flac)")
|
108 |
+
folder_path = gr.Textbox(label="Or input the path of a folder containing audio files")
|
109 |
+
whisper_model = gr.Dropdown(label="Whisper Model", value="large-v3", choices=["large-v3", "large-v2", "large", "medium", "small"])
|
110 |
+
lang = gr.Dropdown(label="Dataset Language", value="en", choices=["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh", "hu", "ko", "ja"])
|
111 |
+
progress_data = gr.Label(label="Progress:")
|
112 |
+
prompt_compute_btn = gr.Button(value="Step 1 - Create dataset")
|
113 |
+
|
114 |
+
|
115 |
+
def get_audio_files_from_folder(folder_path):
|
116 |
+
audio_files = []
|
117 |
+
for root, dirs, files in os.walk(folder_path):
|
118 |
+
for file in files:
|
119 |
+
if file.endswith(".wav") or file.endswith(".mp3") or file.endswith(".flac") or file.endswith(".m4a") or file.endswith(".webm"):
|
120 |
+
audio_files.append(os.path.join(root, file))
|
121 |
+
return audio_files
|
122 |
+
|
123 |
+
def preprocess_dataset(audio_path, audio_folder, language, whisper_model, out_path, train_csv, eval_csv, progress=gr.Progress(track_tqdm=True)):
|
124 |
+
clear_gpu_cache()
|
125 |
+
|
126 |
+
train_csv = ""
|
127 |
+
eval_csv = ""
|
128 |
+
|
129 |
+
out_path = os.path.join(out_path, "dataset")
|
130 |
+
os.makedirs(out_path, exist_ok=True)
|
131 |
+
|
132 |
+
# 检测输入是单个文件、多个文件还是文件夹
|
133 |
+
if audio_path is not None and audio_path != []:
|
134 |
+
# 处理单个文件或多个文件
|
135 |
+
try:
|
136 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_path, whisper_model=whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
137 |
+
except:
|
138 |
+
traceback.print_exc()
|
139 |
+
error = traceback.format_exc()
|
140 |
+
return f"The data processing was interrupted due to an error! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
141 |
+
elif audio_folder is not None:
|
142 |
+
# 处理文件夹
|
143 |
+
audio_files = get_audio_files_from_folder(audio_folder)
|
144 |
+
try:
|
145 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_files, whisper_model=whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
146 |
+
except:
|
147 |
+
traceback.print_exc()
|
148 |
+
error = traceback.format_exc()
|
149 |
+
return f"The data processing was interrupted due to an error! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
150 |
+
else:
|
151 |
+
return "You should provide either audio files or a folder containing audio files!", "", ""
|
152 |
+
|
153 |
+
# if audio total len is less than 2 minutes raise an error
|
154 |
+
if audio_total_size < 120:
|
155 |
+
message = "The sum of the duration of the audios that you provided should be at least 2 minutes!"
|
156 |
+
print(message)
|
157 |
+
return message, "", ""
|
158 |
+
|
159 |
+
print("Dataset Processed!")
|
160 |
+
return "Dataset Processed!", train_meta, eval_meta
|
161 |
+
#prompt_compute_btn.click(preprocess_dataset, inputs=[upload_file, upload_folder, lang, whisper_model, out_path, train_csv, eval_csv], outputs=[progress_data, train_csv, eval_csv])
|
162 |
+
|
163 |
+
'''
|
164 |
+
def preprocess_dataset(audio_path, language, whisper_model, out_path,train_csv,eval_csv, progress=gr.Progress(track_tqdm=True)):
|
165 |
+
clear_gpu_cache()
|
166 |
+
|
167 |
+
train_csv = ""
|
168 |
+
eval_csv = ""
|
169 |
+
|
170 |
+
out_path = os.path.join(out_path, "dataset")
|
171 |
+
os.makedirs(out_path, exist_ok=True)
|
172 |
+
if audio_path is None:
|
173 |
+
return "You should provide one or multiple audio files! If you provided it, probably the upload of the files is not finished yet!", "", ""
|
174 |
+
else:
|
175 |
+
try:
|
176 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_path, whisper_model = whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
177 |
+
except:
|
178 |
+
traceback.print_exc()
|
179 |
+
error = traceback.format_exc()
|
180 |
+
return f"The data processing was interrupted due an error !! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
181 |
+
|
182 |
+
# clear_gpu_cache()
|
183 |
+
|
184 |
+
# if audio total len is less than 2 minutes raise an error
|
185 |
+
if audio_total_size < 120:
|
186 |
+
message = "The sum of the duration of the audios that you provided should be at least 2 minutes!"
|
187 |
+
print(message)
|
188 |
+
return message, "", ""
|
189 |
+
|
190 |
+
print("Dataset Processed!")
|
191 |
+
return "Dataset Processed!", train_meta, eval_meta
|
192 |
+
'''
|
193 |
+
|
194 |
+
with gr.Tab("2 - Fine-tuning XTTS Encoder"):
|
195 |
+
load_params_btn = gr.Button(value="Load Params from output folder")
|
196 |
+
version = gr.Dropdown(
|
197 |
+
label="XTTS base version",
|
198 |
+
value="v2.0.2",
|
199 |
+
choices=[
|
200 |
+
"v2.0.3",
|
201 |
+
"v2.0.2",
|
202 |
+
"v2.0.1",
|
203 |
+
"v2.0.0",
|
204 |
+
"main"
|
205 |
+
],
|
206 |
+
)
|
207 |
+
train_csv = gr.Textbox(
|
208 |
+
label="Train CSV:",
|
209 |
+
)
|
210 |
+
eval_csv = gr.Textbox(
|
211 |
+
label="Eval CSV:",
|
212 |
+
)
|
213 |
+
custom_model = gr.Textbox(
|
214 |
+
label="(Optional) Custom model.pth file , leave blank if you want to use the base file.",
|
215 |
+
value="",
|
216 |
+
)
|
217 |
+
num_epochs = gr.Slider(
|
218 |
+
label="Number of epochs:",
|
219 |
+
minimum=1,
|
220 |
+
maximum=100,
|
221 |
+
step=1,
|
222 |
+
value=args.num_epochs,
|
223 |
+
)
|
224 |
+
batch_size = gr.Slider(
|
225 |
+
label="Batch size:",
|
226 |
+
minimum=2,
|
227 |
+
maximum=512,
|
228 |
+
step=1,
|
229 |
+
value=args.batch_size,
|
230 |
+
)
|
231 |
+
grad_acumm = gr.Slider(
|
232 |
+
label="Grad accumulation steps:",
|
233 |
+
minimum=2,
|
234 |
+
maximum=128,
|
235 |
+
step=1,
|
236 |
+
value=args.grad_acumm,
|
237 |
+
)
|
238 |
+
max_audio_length = gr.Slider(
|
239 |
+
label="Max permitted audio size in seconds:",
|
240 |
+
minimum=2,
|
241 |
+
maximum=20,
|
242 |
+
step=1,
|
243 |
+
value=args.max_audio_length,
|
244 |
+
)
|
245 |
+
clear_train_data = gr.Dropdown(
|
246 |
+
label="Clear train data, you will delete selected folder, after optimizing",
|
247 |
+
value="run",
|
248 |
+
choices=[
|
249 |
+
"none",
|
250 |
+
"run",
|
251 |
+
"dataset",
|
252 |
+
"all"
|
253 |
+
])
|
254 |
+
|
255 |
+
progress_train = gr.Label(
|
256 |
+
label="Progress:"
|
257 |
+
)
|
258 |
+
|
259 |
+
# demo.load(read_logs, None, logs_tts_train, every=1)
|
260 |
+
train_btn = gr.Button(value="Step 2 - Run the training")
|
261 |
+
optimize_model_btn = gr.Button(value="Step 2.5 - Optimize the model")
|
262 |
+
|
263 |
+
def train_model(custom_model,version,language, train_csv, eval_csv, num_epochs, batch_size, grad_acumm, output_path, max_audio_length):
|
264 |
+
clear_gpu_cache()
|
265 |
+
|
266 |
+
run_dir = Path(output_path) / "run"
|
267 |
+
|
268 |
+
# # Remove train dir
|
269 |
+
if run_dir.exists():
|
270 |
+
os.remove(run_dir)
|
271 |
+
|
272 |
+
# Check if the dataset language matches the language you specified
|
273 |
+
lang_file_path = Path(output_path) / "dataset" / "lang.txt"
|
274 |
+
|
275 |
+
# Check if lang.txt already exists and contains a different language
|
276 |
+
current_language = None
|
277 |
+
if lang_file_path.exists():
|
278 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
279 |
+
current_language = existing_lang_file.read().strip()
|
280 |
+
if current_language != language:
|
281 |
+
print("The language that was prepared for the dataset does not match the specified language. Change the language to the one specified in the dataset")
|
282 |
+
language = current_language
|
283 |
+
|
284 |
+
if not train_csv or not eval_csv:
|
285 |
+
return "You need to run the data processing step or manually set `Train CSV` and `Eval CSV` fields !", "", "", "", ""
|
286 |
+
try:
|
287 |
+
# convert seconds to waveform frames
|
288 |
+
max_audio_length = int(max_audio_length * 22050)
|
289 |
+
speaker_xtts_path,config_path, original_xtts_checkpoint, vocab_file, exp_path, speaker_wav = train_gpt(custom_model,version,language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv, output_path=output_path, max_audio_length=max_audio_length)
|
290 |
+
except:
|
291 |
+
traceback.print_exc()
|
292 |
+
error = traceback.format_exc()
|
293 |
+
return f"The training was interrupted due an error !! Please check the console to check the full error message! \n Error summary: {error}", "", "", "", ""
|
294 |
+
|
295 |
+
# copy original files to avoid parameters changes issues
|
296 |
+
# os.system(f"cp {config_path} {exp_path}")
|
297 |
+
# os.system(f"cp {vocab_file} {exp_path}")
|
298 |
+
|
299 |
+
ready_dir = Path(output_path) / "ready"
|
300 |
+
|
301 |
+
ft_xtts_checkpoint = os.path.join(exp_path, "best_model.pth")
|
302 |
+
|
303 |
+
shutil.copy(ft_xtts_checkpoint, ready_dir / "unoptimize_model.pth")
|
304 |
+
# os.remove(ft_xtts_checkpoint)
|
305 |
+
|
306 |
+
ft_xtts_checkpoint = os.path.join(ready_dir, "unoptimize_model.pth")
|
307 |
+
|
308 |
+
# Reference
|
309 |
+
# Move reference audio to output folder and rename it
|
310 |
+
speaker_reference_path = Path(speaker_wav)
|
311 |
+
speaker_reference_new_path = ready_dir / "reference.wav"
|
312 |
+
shutil.copy(speaker_reference_path, speaker_reference_new_path)
|
313 |
+
|
314 |
+
print("Model training done!")
|
315 |
+
# clear_gpu_cache()
|
316 |
+
return "Model training done!", config_path, vocab_file, ft_xtts_checkpoint,speaker_xtts_path, speaker_reference_new_path
|
317 |
+
|
318 |
+
def optimize_model(out_path, clear_train_data):
|
319 |
+
# print(out_path)
|
320 |
+
out_path = Path(out_path) # Ensure that out_path is a Path object.
|
321 |
+
|
322 |
+
ready_dir = out_path / "ready"
|
323 |
+
run_dir = out_path / "run"
|
324 |
+
dataset_dir = out_path / "dataset"
|
325 |
+
|
326 |
+
# Clear specified training data directories.
|
327 |
+
if clear_train_data in {"run", "all"} and run_dir.exists():
|
328 |
+
try:
|
329 |
+
shutil.rmtree(run_dir)
|
330 |
+
except PermissionError as e:
|
331 |
+
print(f"An error occurred while deleting {run_dir}: {e}")
|
332 |
+
|
333 |
+
if clear_train_data in {"dataset", "all"} and dataset_dir.exists():
|
334 |
+
try:
|
335 |
+
shutil.rmtree(dataset_dir)
|
336 |
+
except PermissionError as e:
|
337 |
+
print(f"An error occurred while deleting {dataset_dir}: {e}")
|
338 |
+
|
339 |
+
# Get full path to model
|
340 |
+
model_path = ready_dir / "unoptimize_model.pth"
|
341 |
+
|
342 |
+
if not model_path.is_file():
|
343 |
+
return "Unoptimized model not found in ready folder", ""
|
344 |
+
|
345 |
+
# Load the checkpoint and remove unnecessary parts.
|
346 |
+
checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
|
347 |
+
del checkpoint["optimizer"]
|
348 |
+
|
349 |
+
for key in list(checkpoint["model"].keys()):
|
350 |
+
if "dvae" in key:
|
351 |
+
del checkpoint["model"][key]
|
352 |
+
|
353 |
+
# Make sure out_path is a Path object or convert it to Path
|
354 |
+
os.remove(model_path)
|
355 |
+
|
356 |
+
# Save the optimized model.
|
357 |
+
optimized_model_file_name="model.pth"
|
358 |
+
optimized_model=ready_dir/optimized_model_file_name
|
359 |
+
|
360 |
+
torch.save(checkpoint, optimized_model)
|
361 |
+
ft_xtts_checkpoint=str(optimized_model)
|
362 |
+
|
363 |
+
clear_gpu_cache()
|
364 |
+
|
365 |
+
return f"Model optimized and saved at {ft_xtts_checkpoint}!", ft_xtts_checkpoint
|
366 |
+
|
367 |
+
def load_params(out_path):
|
368 |
+
path_output = Path(out_path)
|
369 |
+
|
370 |
+
dataset_path = path_output / "dataset"
|
371 |
+
|
372 |
+
if not dataset_path.exists():
|
373 |
+
return "The output folder does not exist!", "", ""
|
374 |
+
|
375 |
+
eval_train = dataset_path / "metadata_train.csv"
|
376 |
+
eval_csv = dataset_path / "metadata_eval.csv"
|
377 |
+
|
378 |
+
# Write the target language to lang.txt in the output directory
|
379 |
+
lang_file_path = dataset_path / "lang.txt"
|
380 |
+
|
381 |
+
# Check if lang.txt already exists and contains a different language
|
382 |
+
current_language = None
|
383 |
+
if os.path.exists(lang_file_path):
|
384 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
385 |
+
current_language = existing_lang_file.read().strip()
|
386 |
+
|
387 |
+
clear_gpu_cache()
|
388 |
+
|
389 |
+
print(current_language)
|
390 |
+
return "The data has been updated", eval_train, eval_csv, current_language
|
391 |
+
|
392 |
+
with gr.Tab("3 - Inference"):
|
393 |
+
with gr.Row():
|
394 |
+
with gr.Column() as col1:
|
395 |
+
load_params_tts_btn = gr.Button(value="Load params for TTS from output folder")
|
396 |
+
xtts_checkpoint = gr.Textbox(
|
397 |
+
label="XTTS checkpoint path:",
|
398 |
+
value="",
|
399 |
+
)
|
400 |
+
xtts_config = gr.Textbox(
|
401 |
+
label="XTTS config path:",
|
402 |
+
value="",
|
403 |
+
)
|
404 |
+
|
405 |
+
xtts_vocab = gr.Textbox(
|
406 |
+
label="XTTS vocab path:",
|
407 |
+
value="",
|
408 |
+
)
|
409 |
+
xtts_speaker = gr.Textbox(
|
410 |
+
label="XTTS speaker path:",
|
411 |
+
value="",
|
412 |
+
)
|
413 |
+
progress_load = gr.Label(
|
414 |
+
label="Progress:"
|
415 |
+
)
|
416 |
+
load_btn = gr.Button(value="Step 3 - Load Fine-tuned XTTS model")
|
417 |
+
|
418 |
+
with gr.Column() as col2:
|
419 |
+
speaker_reference_audio = gr.Textbox(
|
420 |
+
label="Speaker reference audio:",
|
421 |
+
value="",
|
422 |
+
)
|
423 |
+
tts_language = gr.Dropdown(
|
424 |
+
label="Language",
|
425 |
+
value="en",
|
426 |
+
choices=[
|
427 |
+
"en",
|
428 |
+
"es",
|
429 |
+
"fr",
|
430 |
+
"de",
|
431 |
+
"it",
|
432 |
+
"pt",
|
433 |
+
"pl",
|
434 |
+
"tr",
|
435 |
+
"ru",
|
436 |
+
"nl",
|
437 |
+
"cs",
|
438 |
+
"ar",
|
439 |
+
"zh",
|
440 |
+
"hu",
|
441 |
+
"ko",
|
442 |
+
"ja",
|
443 |
+
]
|
444 |
+
)
|
445 |
+
tts_text = gr.Textbox(
|
446 |
+
label="Input Text.",
|
447 |
+
value="This model sounds really good and above all, it's reasonably fast.",
|
448 |
+
)
|
449 |
+
with gr.Accordion("Advanced settings", open=False) as acr:
|
450 |
+
temperature = gr.Slider(
|
451 |
+
label="temperature",
|
452 |
+
minimum=0,
|
453 |
+
maximum=1,
|
454 |
+
step=0.05,
|
455 |
+
value=0.75,
|
456 |
+
)
|
457 |
+
length_penalty = gr.Slider(
|
458 |
+
label="length_penalty",
|
459 |
+
minimum=-10.0,
|
460 |
+
maximum=10.0,
|
461 |
+
step=0.5,
|
462 |
+
value=1,
|
463 |
+
)
|
464 |
+
repetition_penalty = gr.Slider(
|
465 |
+
label="repetition penalty",
|
466 |
+
minimum=1,
|
467 |
+
maximum=10,
|
468 |
+
step=0.5,
|
469 |
+
value=5,
|
470 |
+
)
|
471 |
+
top_k = gr.Slider(
|
472 |
+
label="top_k",
|
473 |
+
minimum=1,
|
474 |
+
maximum=100,
|
475 |
+
step=1,
|
476 |
+
value=50,
|
477 |
+
)
|
478 |
+
top_p = gr.Slider(
|
479 |
+
label="top_p",
|
480 |
+
minimum=0,
|
481 |
+
maximum=1,
|
482 |
+
step=0.05,
|
483 |
+
value=0.85,
|
484 |
+
)
|
485 |
+
speed = gr.Slider(
|
486 |
+
label="speed",
|
487 |
+
minimum=0.2,
|
488 |
+
maximum=4.0,
|
489 |
+
step=0.05,
|
490 |
+
value=1.0,
|
491 |
+
)
|
492 |
+
sentence_split = gr.Checkbox(
|
493 |
+
label="Enable text splitting",
|
494 |
+
value=True,
|
495 |
+
)
|
496 |
+
use_config = gr.Checkbox(
|
497 |
+
label="Use Inference settings from config, if disabled use the settings above",
|
498 |
+
value=False,
|
499 |
+
)
|
500 |
+
tts_btn = gr.Button(value="Step 4 - Inference")
|
501 |
+
|
502 |
+
with gr.Column() as col3:
|
503 |
+
progress_gen = gr.Label(
|
504 |
+
label="Progress:"
|
505 |
+
)
|
506 |
+
tts_output_audio = gr.Audio(label="Generated Audio.")
|
507 |
+
reference_audio = gr.Audio(label="Reference audio used.")
|
508 |
+
|
509 |
+
|
510 |
+
with gr.Column() as col4:
|
511 |
+
srt_upload = gr.File(label="Upload SRT File")
|
512 |
+
generate_srt_audio_btn = gr.Button(value="Generate Audio from SRT")
|
513 |
+
srt_output_audio = gr.Audio(label="Combined Audio from SRT")
|
514 |
+
error_message = gr.Textbox(label="Error Message", visible=False) # 错误消息组件,默认不显示
|
515 |
+
|
516 |
+
generate_srt_audio_btn.click(
|
517 |
+
fn=process_srt_and_generate_audio,
|
518 |
+
inputs=[
|
519 |
+
srt_upload,
|
520 |
+
tts_language,
|
521 |
+
speaker_reference_audio,
|
522 |
+
temperature,
|
523 |
+
length_penalty,
|
524 |
+
repetition_penalty,
|
525 |
+
top_k,
|
526 |
+
top_p,
|
527 |
+
speed,
|
528 |
+
sentence_split,
|
529 |
+
use_config
|
530 |
+
],
|
531 |
+
outputs=[srt_output_audio]
|
532 |
+
)
|
533 |
+
|
534 |
+
prompt_compute_btn.click(
|
535 |
+
fn=preprocess_dataset,
|
536 |
+
inputs=[
|
537 |
+
upload_file,
|
538 |
+
lang,
|
539 |
+
whisper_model,
|
540 |
+
out_path,
|
541 |
+
train_csv,
|
542 |
+
eval_csv
|
543 |
+
],
|
544 |
+
outputs=[
|
545 |
+
progress_data,
|
546 |
+
train_csv,
|
547 |
+
eval_csv,
|
548 |
+
],
|
549 |
+
)
|
550 |
+
|
551 |
+
load_params_btn.click(
|
552 |
+
fn=load_params,
|
553 |
+
inputs=[out_path],
|
554 |
+
outputs=[
|
555 |
+
progress_train,
|
556 |
+
train_csv,
|
557 |
+
eval_csv,
|
558 |
+
lang
|
559 |
+
]
|
560 |
+
)
|
561 |
+
|
562 |
+
|
563 |
+
train_btn.click(
|
564 |
+
fn=train_model,
|
565 |
+
inputs=[
|
566 |
+
custom_model,
|
567 |
+
version,
|
568 |
+
lang,
|
569 |
+
train_csv,
|
570 |
+
eval_csv,
|
571 |
+
num_epochs,
|
572 |
+
batch_size,
|
573 |
+
grad_acumm,
|
574 |
+
out_path,
|
575 |
+
max_audio_length,
|
576 |
+
],
|
577 |
+
outputs=[progress_train, xtts_config, xtts_vocab, xtts_checkpoint,xtts_speaker, speaker_reference_audio],
|
578 |
+
)
|
579 |
+
|
580 |
+
optimize_model_btn.click(
|
581 |
+
fn=optimize_model,
|
582 |
+
inputs=[
|
583 |
+
out_path,
|
584 |
+
clear_train_data
|
585 |
+
],
|
586 |
+
outputs=[progress_train,xtts_checkpoint],
|
587 |
+
)
|
588 |
+
|
589 |
+
load_btn.click(
|
590 |
+
fn=load_model,
|
591 |
+
inputs=[
|
592 |
+
xtts_checkpoint,
|
593 |
+
xtts_config,
|
594 |
+
xtts_vocab,
|
595 |
+
xtts_speaker
|
596 |
+
],
|
597 |
+
outputs=[progress_load],
|
598 |
+
)
|
599 |
+
|
600 |
+
tts_btn.click(
|
601 |
+
fn=run_tts,
|
602 |
+
inputs=[
|
603 |
+
tts_language,
|
604 |
+
tts_text,
|
605 |
+
speaker_reference_audio,
|
606 |
+
temperature,
|
607 |
+
length_penalty,
|
608 |
+
repetition_penalty,
|
609 |
+
top_k,
|
610 |
+
top_p,
|
611 |
+
speed,
|
612 |
+
sentence_split,
|
613 |
+
use_config
|
614 |
+
],
|
615 |
+
outputs=[progress_gen, tts_output_audio, reference_audio],
|
616 |
+
)
|
617 |
+
|
618 |
+
load_params_tts_btn.click(
|
619 |
+
fn=load_params_tts,
|
620 |
+
inputs=[
|
621 |
+
out_path,
|
622 |
+
version
|
623 |
+
],
|
624 |
+
outputs=[progress_load,xtts_checkpoint,xtts_config,xtts_vocab,xtts_speaker,speaker_reference_audio],
|
625 |
+
)
|
626 |
+
|
627 |
+
with gr.Tab("4 - Voice conversion"):
|
628 |
+
with gr.Column() as col0:
|
629 |
+
gr.Markdown("## OpenVoice Conversion Tool")
|
630 |
+
voice_convert_seed = gr.File(label="Upload Reference Speaker Audio being generated")
|
631 |
+
#pitch_shift_slider = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch Shift (Semitones)")
|
632 |
+
audio_to_convert = gr.Textbox(
|
633 |
+
label="Input the to-be-convert audio location",
|
634 |
+
value="",
|
635 |
+
)
|
636 |
+
convert_button = gr.Button("Convert Voice")
|
637 |
+
converted_audio = gr.Audio(label="Converted Audio")
|
638 |
+
|
639 |
+
convert_button.click(
|
640 |
+
convert_voice,
|
641 |
+
inputs=[voice_convert_seed, audio_to_convert], #, pitch_shift_slider],
|
642 |
+
outputs=[converted_audio]
|
643 |
+
)
|
644 |
+
|
645 |
+
with gr.Tab("5 - Logs"):
|
646 |
+
# 添加一个按钮来读取日志
|
647 |
+
read_logs_btn = gr.Button("Read Logs")
|
648 |
+
log_output = gr.Textbox(label="Log Output")
|
649 |
+
read_logs_btn.click(fn=read_logs, inputs=None, outputs=log_output)
|
650 |
+
|
651 |
+
|
652 |
+
demo.launch(
|
653 |
+
#share=False,
|
654 |
+
share=True,
|
655 |
+
debug=False,
|
656 |
+
server_port=args.port,
|
657 |
+
#server_name="localhost"
|
658 |
+
server_name="0.0.0.0"
|
659 |
+
)
|
old/webui.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import sys
|
4 |
+
import warnings
|
5 |
+
import platform
|
6 |
+
import psutil
|
7 |
+
import signal
|
8 |
+
import site
|
9 |
+
import traceback
|
10 |
+
import torch
|
11 |
+
import gradio as gr
|
12 |
+
from subprocess import Popen
|
13 |
+
from config import python_exec, is_share, webui_port_main
|
14 |
+
from tools.i18n.i18n import I18nAuto
|
15 |
+
from tools import my_utils
|
16 |
+
from multiprocessing import cpu_count
|
17 |
+
|
18 |
+
now_dir = os.getcwd()
|
19 |
+
sys.path.insert(0, now_dir)
|
20 |
+
warnings.filterwarnings("ignore")
|
21 |
+
torch.manual_seed(233333)
|
22 |
+
tmp = os.path.join(now_dir, "TEMP")
|
23 |
+
os.makedirs(tmp, exist_ok=True)
|
24 |
+
os.environ["TEMP"] = tmp
|
25 |
+
if(os.path.exists(tmp)):
|
26 |
+
for name in os.listdir(tmp):
|
27 |
+
if(name=="jieba.cache"):continue
|
28 |
+
path="%s/%s"%(tmp,name)
|
29 |
+
delete=os.remove if os.path.isfile(path) else shutil.rmtree
|
30 |
+
try:
|
31 |
+
delete(path)
|
32 |
+
except Exception as e:
|
33 |
+
print(str(e))
|
34 |
+
pass
|
35 |
+
import site
|
36 |
+
site_packages_roots = []
|
37 |
+
for path in site.getsitepackages():
|
38 |
+
if "packages" in path:
|
39 |
+
site_packages_roots.append(path)
|
40 |
+
if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
|
41 |
+
#os.environ["OPENBLAS_NUM_THREADS"] = "4"
|
42 |
+
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
|
43 |
+
os.environ["all_proxy"] = ""
|
44 |
+
for site_packages_root in site_packages_roots:
|
45 |
+
if os.path.exists(site_packages_root):
|
46 |
+
try:
|
47 |
+
with open("%s/users.pth" % (site_packages_root), "w") as f:
|
48 |
+
f.write(
|
49 |
+
"%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
|
50 |
+
% (now_dir, now_dir, now_dir, now_dir, now_dir)
|
51 |
+
)
|
52 |
+
break
|
53 |
+
except PermissionError:
|
54 |
+
pass
|
55 |
+
|
56 |
+
i18n = I18nAuto()
|
57 |
+
|
58 |
+
|
59 |
+
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
|
60 |
+
|
61 |
+
n_cpu=cpu_count()
|
62 |
+
|
63 |
+
ngpu = torch.cuda.device_count()
|
64 |
+
gpu_infos = []
|
65 |
+
mem = []
|
66 |
+
if_gpu_ok = False
|
67 |
+
|
68 |
+
if if_gpu_ok and len(gpu_infos) > 0:
|
69 |
+
gpu_info = "\n".join(gpu_infos)
|
70 |
+
default_batch_size = min(mem) // 2
|
71 |
+
else:
|
72 |
+
gpu_info = ("%s\t%s" % ("0", "CPU"))
|
73 |
+
gpu_infos.append("%s\t%s" % ("0", "CPU"))
|
74 |
+
default_batch_size = psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2
|
75 |
+
gpus = "-".join([i[0] for i in gpu_infos])
|
76 |
+
|
77 |
+
p_label=None
|
78 |
+
p_uvr5=None
|
79 |
+
p_asr=None
|
80 |
+
p_denoise=None
|
81 |
+
p_tts_inference=None
|
82 |
+
|
83 |
+
def kill_proc_tree(pid, including_parent=True):
|
84 |
+
try:
|
85 |
+
parent = psutil.Process(pid)
|
86 |
+
except psutil.NoSuchProcess:
|
87 |
+
# Process already terminated
|
88 |
+
return
|
89 |
+
|
90 |
+
children = parent.children(recursive=True)
|
91 |
+
for child in children:
|
92 |
+
try:
|
93 |
+
os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
|
94 |
+
except OSError:
|
95 |
+
pass
|
96 |
+
if including_parent:
|
97 |
+
try:
|
98 |
+
os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
|
99 |
+
except OSError:
|
100 |
+
pass
|
101 |
+
|
102 |
+
system=platform.system()
|
103 |
+
def kill_process(pid):
|
104 |
+
if(system=="Windows"):
|
105 |
+
cmd = "taskkill /t /f /pid %s" % pid
|
106 |
+
os.system(cmd)
|
107 |
+
else:
|
108 |
+
kill_proc_tree(pid)
|
109 |
+
|
110 |
+
|
111 |
+
ps_slice=[]
|
112 |
+
def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
|
113 |
+
global ps_slice
|
114 |
+
inp = my_utils.clean_path(inp)
|
115 |
+
opt_root = my_utils.clean_path(opt_root)
|
116 |
+
if(os.path.exists(inp)==False):
|
117 |
+
yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
118 |
+
return
|
119 |
+
if os.path.isfile(inp):n_parts=1
|
120 |
+
elif os.path.isdir(inp):pass
|
121 |
+
else:
|
122 |
+
yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
123 |
+
return
|
124 |
+
if (ps_slice == []):
|
125 |
+
for i_part in range(n_parts):
|
126 |
+
cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
|
127 |
+
print(cmd)
|
128 |
+
p = Popen(cmd, shell=True)
|
129 |
+
ps_slice.append(p)
|
130 |
+
yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
|
131 |
+
for p in ps_slice:
|
132 |
+
p.wait()
|
133 |
+
ps_slice=[]
|
134 |
+
yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
135 |
+
else:
|
136 |
+
yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
|
137 |
+
|
138 |
+
def close_slice():
|
139 |
+
global ps_slice
|
140 |
+
if (ps_slice != []):
|
141 |
+
for p_slice in ps_slice:
|
142 |
+
try:
|
143 |
+
kill_process(p_slice.pid)
|
144 |
+
except:
|
145 |
+
traceback.print_exc()
|
146 |
+
ps_slice=[]
|
147 |
+
return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
|
148 |
+
|
149 |
+
with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
150 |
+
gr.Markdown(
|
151 |
+
value=
|
152 |
+
i18n("本软件以MIT协议开源, 作者不对��件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
|
153 |
+
)
|
154 |
+
gr.Markdown(
|
155 |
+
value=
|
156 |
+
i18n("中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e")
|
157 |
+
)
|
158 |
+
|
159 |
+
with gr.Tabs():
|
160 |
+
with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
|
161 |
+
gr.Markdown(value=i18n("0b-语音切分工具"))
|
162 |
+
with gr.Row():
|
163 |
+
with gr.Row():
|
164 |
+
slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),value="")
|
165 |
+
slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt")
|
166 |
+
threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34")
|
167 |
+
min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000")
|
168 |
+
min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300")
|
169 |
+
hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10")
|
170 |
+
max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500")
|
171 |
+
with gr.Row():
|
172 |
+
open_slicer_button=gr.Button(i18n("开启语音切割"), variant="primary",visible=True)
|
173 |
+
close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False)
|
174 |
+
_max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True)
|
175 |
+
alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
|
176 |
+
n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
|
177 |
+
slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
|
178 |
+
|
179 |
+
open_slicer_button.click(open_slice, [slice_inp_path, slice_opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, n_process], [slicer_info, open_slicer_button, close_slicer_button])
|
180 |
+
close_slicer_button.click(close_slice, [], [slicer_info, open_slicer_button, close_slicer_button])
|
181 |
+
|
182 |
+
app.queue(concurrency_count=511, max_size=1022).launch(
|
183 |
+
server_name="0.0.0.0",
|
184 |
+
inbrowser=True,
|
185 |
+
share=is_share,
|
186 |
+
server_port=webui_port_main,
|
187 |
+
quiet=True,
|
188 |
+
)
|
old/xtts_demo.py
ADDED
@@ -0,0 +1,973 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import tempfile
|
5 |
+
import logging
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import os
|
9 |
+
import shutil
|
10 |
+
import glob
|
11 |
+
|
12 |
+
import gradio as gr
|
13 |
+
import librosa.display
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from datetime import datetime
|
17 |
+
from pydub import AudioSegment
|
18 |
+
import pysrt
|
19 |
+
|
20 |
+
import torch
|
21 |
+
import torchaudio
|
22 |
+
import traceback
|
23 |
+
from utils.formatter import format_audio_list, find_latest_best_model
|
24 |
+
from utils.gpt_train import train_gpt
|
25 |
+
|
26 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
27 |
+
from TTS.tts.models.xtts import Xtts
|
28 |
+
|
29 |
+
from openvoice_cli.downloader import download_checkpoint
|
30 |
+
from openvoice_cli.api import ToneColorConverter
|
31 |
+
import openvoice_cli.se_extractor as se_extractor
|
32 |
+
|
33 |
+
|
34 |
+
logging.basicConfig(level=logging.INFO)
|
35 |
+
logger = logging.getLogger(__name__)
|
36 |
+
|
37 |
+
# Clear logs
|
38 |
+
def remove_log_file(file_path):
|
39 |
+
log_file = Path(file_path)
|
40 |
+
|
41 |
+
if log_file.exists() and log_file.is_file():
|
42 |
+
log_file.unlink()
|
43 |
+
|
44 |
+
# remove_log_file(str(Path.cwd() / "log.out"))
|
45 |
+
|
46 |
+
def clear_gpu_cache():
|
47 |
+
# clear the GPU cache
|
48 |
+
if torch.cuda.is_available():
|
49 |
+
torch.cuda.empty_cache()
|
50 |
+
|
51 |
+
XTTS_MODEL = None
|
52 |
+
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
53 |
+
global XTTS_MODEL
|
54 |
+
clear_gpu_cache()
|
55 |
+
if not xtts_checkpoint or not xtts_config or not xtts_vocab:
|
56 |
+
return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
|
57 |
+
config = XttsConfig()
|
58 |
+
config.load_json(xtts_config)
|
59 |
+
XTTS_MODEL = Xtts.init_from_config(config)
|
60 |
+
print("Loading XTTS model! ")
|
61 |
+
XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab,speaker_file_path=xtts_speaker, use_deepspeed=False)
|
62 |
+
if torch.cuda.is_available():
|
63 |
+
XTTS_MODEL.cuda()
|
64 |
+
|
65 |
+
print("Model Loaded!")
|
66 |
+
return "Model Loaded!"
|
67 |
+
|
68 |
+
def run_tts(lang, tts_text, speaker_audio_file, output_file_path, temperature, length_penalty, repetition_penalty, top_k, top_p, speed, sentence_split, use_config):
|
69 |
+
if XTTS_MODEL is None:
|
70 |
+
raise Exception("XTTS_MODEL is not loaded. Please load the model before running TTS.")
|
71 |
+
if not tts_text.strip():
|
72 |
+
raise ValueError("Text for TTS is empty.")
|
73 |
+
if not os.path.exists(speaker_audio_file):
|
74 |
+
raise FileNotFoundError(f"Speaker audio file not found: {speaker_audio_file}")
|
75 |
+
|
76 |
+
gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_file, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
|
77 |
+
|
78 |
+
if use_config:
|
79 |
+
out = XTTS_MODEL.inference(
|
80 |
+
text=tts_text,
|
81 |
+
language=lang,
|
82 |
+
gpt_cond_latent=gpt_cond_latent,
|
83 |
+
speaker_embedding=speaker_embedding,
|
84 |
+
temperature=XTTS_MODEL.config.temperature, # Add custom parameters here
|
85 |
+
length_penalty=XTTS_MODEL.config.length_penalty,
|
86 |
+
repetition_penalty=XTTS_MODEL.config.repetition_penalty,
|
87 |
+
top_k=XTTS_MODEL.config.top_k,
|
88 |
+
top_p=XTTS_MODEL.config.top_p,
|
89 |
+
speed=speed,
|
90 |
+
enable_text_splitting = True
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
out = XTTS_MODEL.inference(
|
94 |
+
text=tts_text,
|
95 |
+
language=lang,
|
96 |
+
gpt_cond_latent=gpt_cond_latent,
|
97 |
+
speaker_embedding=speaker_embedding,
|
98 |
+
temperature=temperature, # Add custom parameters here
|
99 |
+
length_penalty=length_penalty,
|
100 |
+
repetition_penalty=float(repetition_penalty),
|
101 |
+
top_k=top_k,
|
102 |
+
top_p=top_p,
|
103 |
+
speed=speed,
|
104 |
+
enable_text_splitting = sentence_split
|
105 |
+
)
|
106 |
+
|
107 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
108 |
+
out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
|
109 |
+
out_path = fp.name
|
110 |
+
torchaudio.save(out_path, out["wav"], 24000)
|
111 |
+
|
112 |
+
return "Speech generated !", out_path, speaker_audio_file
|
113 |
+
|
114 |
+
|
115 |
+
def load_params_tts(out_path,version):
|
116 |
+
|
117 |
+
out_path = Path(out_path)
|
118 |
+
|
119 |
+
# base_model_path = Path.cwd() / "models" / version
|
120 |
+
|
121 |
+
# if not base_model_path.exists():
|
122 |
+
# return "Base model not found !","","",""
|
123 |
+
|
124 |
+
ready_model_path = out_path / "ready"
|
125 |
+
|
126 |
+
vocab_path = ready_model_path / "vocab.json"
|
127 |
+
config_path = ready_model_path / "config.json"
|
128 |
+
speaker_path = ready_model_path / "speakers_xtts.pth"
|
129 |
+
reference_path = ready_model_path / "reference.wav"
|
130 |
+
|
131 |
+
model_path = ready_model_path / "model.pth"
|
132 |
+
|
133 |
+
if not model_path.exists():
|
134 |
+
model_path = ready_model_path / "unoptimize_model.pth"
|
135 |
+
if not model_path.exists():
|
136 |
+
return "Params for TTS not found", "", "", ""
|
137 |
+
|
138 |
+
return "Params for TTS loaded", model_path, config_path, vocab_path,speaker_path, reference_path
|
139 |
+
|
140 |
+
|
141 |
+
def process_srt_and_generate_audio(
|
142 |
+
srt_file,
|
143 |
+
lang,
|
144 |
+
speaker_reference_audio,
|
145 |
+
temperature,
|
146 |
+
length_penalty,
|
147 |
+
repetition_penalty,
|
148 |
+
top_k,
|
149 |
+
top_p,
|
150 |
+
speed,
|
151 |
+
sentence_split,
|
152 |
+
use_config ):
|
153 |
+
try:
|
154 |
+
subtitles = pysrt.open(srt_file)
|
155 |
+
audio_files = []
|
156 |
+
output_dir = create_output_dir(parent_dir='/content/drive/MyDrive/Voice Conversion Result')
|
157 |
+
|
158 |
+
for index, subtitle in enumerate(subtitles):
|
159 |
+
audio_filename = f"audio_{index+1:03d}.wav"
|
160 |
+
audio_file_path = os.path.join(output_dir, audio_filename)
|
161 |
+
|
162 |
+
subtitle_text=remove_endperiod(subtitle.text)
|
163 |
+
|
164 |
+
run_tts(lang, subtitle_text, speaker_reference_audio, audio_file_path,
|
165 |
+
temperature, length_penalty, repetition_penalty, top_k, top_p,
|
166 |
+
speed, sentence_split, use_config)
|
167 |
+
logger.info(f"Generated audio file: {audio_file_path}")
|
168 |
+
audio_files.append(audio_file_path)
|
169 |
+
|
170 |
+
output_audio_path = merge_audio_with_srt_timing(subtitles, audio_files, output_dir)
|
171 |
+
return output_audio_path
|
172 |
+
except Exception as e:
|
173 |
+
logger.error(f"Error in process_srt_and_generate_audio: {e}")
|
174 |
+
raise
|
175 |
+
|
176 |
+
|
177 |
+
def create_output_dir(parent_dir):
|
178 |
+
try:
|
179 |
+
# 定义一个基于当前日期和时间的文件夹名称
|
180 |
+
folder_name = datetime.now().strftime("audio_outputs_%Y-%m-%d_%H-%M-%S")
|
181 |
+
|
182 |
+
# 定义父目录,这里假设在Colab的根目录
|
183 |
+
#parent_dir = "/content/drive/MyDrive/Voice Conversion Result"
|
184 |
+
|
185 |
+
# 完整的文件夹路径
|
186 |
+
output_dir = os.path.join(parent_dir, folder_name)
|
187 |
+
|
188 |
+
# 创建文件夹
|
189 |
+
if not os.path.exists(output_dir):
|
190 |
+
os.makedirs(output_dir)
|
191 |
+
logger.info(f"Created output directory at: {output_dir}")
|
192 |
+
|
193 |
+
return output_dir
|
194 |
+
except Exception as e:
|
195 |
+
logger.error(f"Failed to create output directory: {e}")
|
196 |
+
raise
|
197 |
+
|
198 |
+
|
199 |
+
def srt_time_to_ms(srt_time):
|
200 |
+
return (srt_time.hours * 3600 + srt_time.minutes * 60 + srt_time.seconds) * 1000 + srt_time.milliseconds
|
201 |
+
|
202 |
+
|
203 |
+
def merge_audio_with_srt_timing(subtitles, audio_files, output_dir):
|
204 |
+
try:
|
205 |
+
combined = AudioSegment.silent(duration=0)
|
206 |
+
last_position_ms = 0
|
207 |
+
|
208 |
+
for subtitle, audio_file in zip(subtitles, audio_files):
|
209 |
+
start_time_ms = srt_time_to_ms(subtitle.start)
|
210 |
+
if last_position_ms < start_time_ms:
|
211 |
+
silence_duration = start_time_ms - last_position_ms
|
212 |
+
combined += AudioSegment.silent(duration=silence_duration)
|
213 |
+
last_position_ms = start_time_ms
|
214 |
+
|
215 |
+
audio = AudioSegment.from_file(audio_file, format="wav")
|
216 |
+
|
217 |
+
combined += audio
|
218 |
+
last_position_ms += len(audio)
|
219 |
+
|
220 |
+
output_path = os.path.join(output_dir, "combined_audio_with_timing.wav")
|
221 |
+
#combined_with_set_frame_rate = combined.set_frame_rate(24000)
|
222 |
+
#combined_with_set_frame_rate.export(output_path, format="wav")
|
223 |
+
combined.export(output_path, format="wav")
|
224 |
+
logger.info(f"Exported combined audio to: {output_path}")
|
225 |
+
|
226 |
+
return output_path
|
227 |
+
except Exception as e:
|
228 |
+
logger.error(f"Error merging audio files: {e}")
|
229 |
+
raise
|
230 |
+
|
231 |
+
|
232 |
+
def remove_endperiod(subtitle):
|
233 |
+
"""Removes the period (.) at the end of a subtitle.
|
234 |
+
"""
|
235 |
+
if subtitle.endswith('.'):
|
236 |
+
subtitle = subtitle[:-1]
|
237 |
+
return subtitle
|
238 |
+
|
239 |
+
def convert_voice(reference_audio, audio_to_convert):
|
240 |
+
|
241 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
242 |
+
# 定义输入和输出音频路径
|
243 |
+
#input_audio_path = audio_to_convert
|
244 |
+
base_name, ext = os.path.splitext(os.path.basename(audio_to_convert))
|
245 |
+
new_file_name = base_name + 'convertedvoice' + ext
|
246 |
+
output_path = os.path.join(os.path.dirname(audio_to_convert), new_file_name)
|
247 |
+
|
248 |
+
tune_one(input_file=audio_to_convert, ref_file=reference_audio, output_file=output_path, device=device)
|
249 |
+
|
250 |
+
return output_path
|
251 |
+
|
252 |
+
def tune_one(input_file,ref_file,output_file,device):
|
253 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
254 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
255 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
256 |
+
|
257 |
+
if not os.path.exists(ckpt_converter):
|
258 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
259 |
+
download_checkpoint(ckpt_converter)
|
260 |
+
|
261 |
+
device = device
|
262 |
+
|
263 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
264 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
265 |
+
|
266 |
+
source_se, _ = se_extractor.get_se(input_file, tone_color_converter, vad=True)
|
267 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
268 |
+
|
269 |
+
# Ensure output directory exists and is writable
|
270 |
+
output_dir = os.path.dirname(output_file)
|
271 |
+
if output_dir:
|
272 |
+
if not os.path.exists(output_dir):
|
273 |
+
os.makedirs(output_dir, exist_ok=True)
|
274 |
+
|
275 |
+
# Run the tone color converter
|
276 |
+
tone_color_converter.convert(
|
277 |
+
audio_src_path=input_file,
|
278 |
+
src_se=source_se,
|
279 |
+
tgt_se=target_se,
|
280 |
+
output_path=output_file,
|
281 |
+
)
|
282 |
+
'''
|
283 |
+
def tune_batch(input_dir, ref_file, output_dir=None, device='cpu', output_format='.wav'):
|
284 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
285 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
286 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
287 |
+
|
288 |
+
if not os.path.exists(ckpt_converter):
|
289 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
290 |
+
download_checkpoint(ckpt_converter)
|
291 |
+
|
292 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
293 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
294 |
+
|
295 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
296 |
+
|
297 |
+
# Use default output directory 'out' if not provided
|
298 |
+
if output_dir is None:
|
299 |
+
output_dir = os.path.join(current_dir, 'out')
|
300 |
+
os.makedirs(output_dir, exist_ok=True)
|
301 |
+
|
302 |
+
# Check for any audio files in the input directory (wav, mp3, flac) using glob
|
303 |
+
audio_extensions = ('*.wav', '*.mp3', '*.flac')
|
304 |
+
audio_files = []
|
305 |
+
for extension in audio_extensions:
|
306 |
+
audio_files.extend(glob.glob(os.path.join(input_dir, extension)))
|
307 |
+
|
308 |
+
for audio_file in tqdm(audio_files,"Tune file",len(audio_files)):
|
309 |
+
# Extract source SE from audio file
|
310 |
+
source_se, _ = se_extractor.get_se(audio_file, tone_color_converter, vad=True)
|
311 |
+
|
312 |
+
# Run the tone color converter
|
313 |
+
filename_without_extension = os.path.splitext(os.path.basename(audio_file))[0]
|
314 |
+
output_filename = f"{filename_without_extension}_tuned{output_format}"
|
315 |
+
output_file = os.path.join(output_dir, output_filename)
|
316 |
+
|
317 |
+
tone_color_converter.convert(
|
318 |
+
audio_src_path=audio_file,
|
319 |
+
src_se=source_se,
|
320 |
+
tgt_se=target_se,
|
321 |
+
output_path=output_file,
|
322 |
+
)
|
323 |
+
print(f"Converted {audio_file} to {output_file}")
|
324 |
+
|
325 |
+
return output_dir
|
326 |
+
|
327 |
+
def main_single(args):
|
328 |
+
tune_one(input_file=args.input, ref_file=args.ref, output_file=args.output, device=args.device)
|
329 |
+
|
330 |
+
def main_batch(args):
|
331 |
+
output_dir = tune_batch(
|
332 |
+
input_dir=args.input_dir,
|
333 |
+
ref_file=args.ref_file,
|
334 |
+
output_dir=args.output_dir,
|
335 |
+
device=args.device,
|
336 |
+
output_format=args.output_format
|
337 |
+
)
|
338 |
+
print(f"Batch processing complete. Converted files are saved in {output_dir}")
|
339 |
+
'''
|
340 |
+
|
341 |
+
# define a logger to redirect
|
342 |
+
class Logger:
|
343 |
+
def __init__(self, filename="log.out"):
|
344 |
+
self.log_file = filename
|
345 |
+
self.terminal = sys.stdout
|
346 |
+
self.log = open(self.log_file, "w")
|
347 |
+
|
348 |
+
def write(self, message):
|
349 |
+
self.terminal.write(message)
|
350 |
+
self.log.write(message)
|
351 |
+
|
352 |
+
def flush(self):
|
353 |
+
self.terminal.flush()
|
354 |
+
self.log.flush()
|
355 |
+
|
356 |
+
def isatty(self):
|
357 |
+
return False
|
358 |
+
|
359 |
+
# redirect stdout and stderr to a file
|
360 |
+
sys.stdout = Logger()
|
361 |
+
sys.stderr = sys.stdout
|
362 |
+
|
363 |
+
|
364 |
+
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
365 |
+
import logging
|
366 |
+
logging.basicConfig(
|
367 |
+
level=logging.INFO,
|
368 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
369 |
+
handlers=[
|
370 |
+
logging.StreamHandler(sys.stdout)
|
371 |
+
]
|
372 |
+
)
|
373 |
+
|
374 |
+
def read_logs():
|
375 |
+
sys.stdout.flush()
|
376 |
+
with open(sys.stdout.log_file, "r") as f:
|
377 |
+
return f.read()
|
378 |
+
|
379 |
+
|
380 |
+
if __name__ == "__main__":
|
381 |
+
|
382 |
+
parser = argparse.ArgumentParser(
|
383 |
+
description="""XTTS fine-tuning demo\n\n"""
|
384 |
+
"""
|
385 |
+
Example runs:
|
386 |
+
python3 TTS/demos/xtts_ft_demo/xtts_demo.py --port
|
387 |
+
""",
|
388 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
389 |
+
)
|
390 |
+
parser.add_argument(
|
391 |
+
"--port",
|
392 |
+
type=int,
|
393 |
+
help="Port to run the gradio demo. Default: 5003",
|
394 |
+
default=5003,
|
395 |
+
)
|
396 |
+
parser.add_argument(
|
397 |
+
"--out_path",
|
398 |
+
type=str,
|
399 |
+
help="Output path (where data and checkpoints will be saved) Default: output/",
|
400 |
+
default=str(Path.cwd() / "finetune_models"),
|
401 |
+
)
|
402 |
+
|
403 |
+
parser.add_argument(
|
404 |
+
"--num_epochs",
|
405 |
+
type=int,
|
406 |
+
help="Number of epochs to train. Default: 6",
|
407 |
+
default=6,
|
408 |
+
)
|
409 |
+
parser.add_argument(
|
410 |
+
"--batch_size",
|
411 |
+
type=int,
|
412 |
+
help="Batch size. Default: 2",
|
413 |
+
default=2,
|
414 |
+
)
|
415 |
+
parser.add_argument(
|
416 |
+
"--grad_acumm",
|
417 |
+
type=int,
|
418 |
+
help="Grad accumulation steps. Default: 1",
|
419 |
+
default=1,
|
420 |
+
)
|
421 |
+
parser.add_argument(
|
422 |
+
"--max_audio_length",
|
423 |
+
type=int,
|
424 |
+
help="Max permitted audio size in seconds. Default: 11",
|
425 |
+
default=11,
|
426 |
+
)
|
427 |
+
|
428 |
+
args = parser.parse_args()
|
429 |
+
|
430 |
+
with gr.Blocks() as demo:
|
431 |
+
with gr.Tab("0 - Voice conversion"):
|
432 |
+
with gr.Column() as col0:
|
433 |
+
gr.Markdown("## OpenVoice Conversion Tool")
|
434 |
+
voice_convert_seed = gr.File(label="Upload Reference Speaker Audio being generated")
|
435 |
+
#pitch_shift_slider = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch Shift (Semitones)")
|
436 |
+
audio_to_convert = gr.Textbox(
|
437 |
+
label="Input the to-be-convert audio location",
|
438 |
+
value="",
|
439 |
+
)
|
440 |
+
convert_button = gr.Button("Convert Voice")
|
441 |
+
converted_audio = gr.Audio(label="Converted Audio")
|
442 |
+
|
443 |
+
convert_button.click(
|
444 |
+
convert_voice,
|
445 |
+
inputs=[voice_convert_seed, audio_to_convert], #, pitch_shift_slider],
|
446 |
+
outputs=[converted_audio]
|
447 |
+
)
|
448 |
+
with gr.Tab("1 - Data processing"):
|
449 |
+
out_path = gr.Textbox(
|
450 |
+
label="Output path (where data and checkpoints will be saved):",
|
451 |
+
value=args.out_path,
|
452 |
+
)
|
453 |
+
# upload_file = gr.Audio(
|
454 |
+
# sources="upload",
|
455 |
+
# label="Select here the audio files that you want to use for XTTS trainining !",
|
456 |
+
# type="filepath",
|
457 |
+
# )
|
458 |
+
upload_file = gr.File(
|
459 |
+
file_count="multiple",
|
460 |
+
label="Select here the audio files that you want to use for XTTS trainining (Supported formats: wav, mp3, and flac)",
|
461 |
+
)
|
462 |
+
|
463 |
+
whisper_model = gr.Dropdown(
|
464 |
+
label="Whisper Model",
|
465 |
+
value="large-v3",
|
466 |
+
choices=[
|
467 |
+
"large-v3",
|
468 |
+
"large-v2",
|
469 |
+
"large",
|
470 |
+
"medium",
|
471 |
+
"small"
|
472 |
+
],
|
473 |
+
)
|
474 |
+
|
475 |
+
lang = gr.Dropdown(
|
476 |
+
label="Dataset Language",
|
477 |
+
value="en",
|
478 |
+
choices=[
|
479 |
+
"en",
|
480 |
+
"es",
|
481 |
+
"fr",
|
482 |
+
"de",
|
483 |
+
"it",
|
484 |
+
"pt",
|
485 |
+
"pl",
|
486 |
+
"tr",
|
487 |
+
"ru",
|
488 |
+
"nl",
|
489 |
+
"cs",
|
490 |
+
"ar",
|
491 |
+
"zh",
|
492 |
+
"hu",
|
493 |
+
"ko",
|
494 |
+
"ja"
|
495 |
+
],
|
496 |
+
)
|
497 |
+
progress_data = gr.Label(
|
498 |
+
label="Progress:"
|
499 |
+
)
|
500 |
+
# demo.load(read_logs, None, logs, every=1)
|
501 |
+
|
502 |
+
prompt_compute_btn = gr.Button(value="Step 1 - Create dataset")
|
503 |
+
|
504 |
+
def preprocess_dataset(audio_path, language, whisper_model, out_path,train_csv,eval_csv, progress=gr.Progress(track_tqdm=True)):
|
505 |
+
clear_gpu_cache()
|
506 |
+
|
507 |
+
train_csv = ""
|
508 |
+
eval_csv = ""
|
509 |
+
|
510 |
+
out_path = os.path.join(out_path, "dataset")
|
511 |
+
os.makedirs(out_path, exist_ok=True)
|
512 |
+
if audio_path is None:
|
513 |
+
return "You should provide one or multiple audio files! If you provided it, probably the upload of the files is not finished yet!", "", ""
|
514 |
+
else:
|
515 |
+
try:
|
516 |
+
train_meta, eval_meta, audio_total_size = format_audio_list(audio_path, whisper_model = whisper_model, target_language=language, out_path=out_path, gradio_progress=progress)
|
517 |
+
except:
|
518 |
+
traceback.print_exc()
|
519 |
+
error = traceback.format_exc()
|
520 |
+
return f"The data processing was interrupted due an error !! Please check the console to verify the full error message! \n Error summary: {error}", "", ""
|
521 |
+
|
522 |
+
# clear_gpu_cache()
|
523 |
+
|
524 |
+
# if audio total len is less than 2 minutes raise an error
|
525 |
+
if audio_total_size < 120:
|
526 |
+
message = "The sum of the duration of the audios that you provided should be at least 2 minutes!"
|
527 |
+
print(message)
|
528 |
+
return message, "", ""
|
529 |
+
|
530 |
+
print("Dataset Processed!")
|
531 |
+
return "Dataset Processed!", train_meta, eval_meta
|
532 |
+
|
533 |
+
with gr.Tab("2 - Fine-tuning XTTS Encoder"):
|
534 |
+
load_params_btn = gr.Button(value="Load Params from output folder")
|
535 |
+
version = gr.Dropdown(
|
536 |
+
label="XTTS base version",
|
537 |
+
value="v2.0.2",
|
538 |
+
choices=[
|
539 |
+
"v2.0.3",
|
540 |
+
"v2.0.2",
|
541 |
+
"v2.0.1",
|
542 |
+
"v2.0.0",
|
543 |
+
"main"
|
544 |
+
],
|
545 |
+
)
|
546 |
+
train_csv = gr.Textbox(
|
547 |
+
label="Train CSV:",
|
548 |
+
)
|
549 |
+
eval_csv = gr.Textbox(
|
550 |
+
label="Eval CSV:",
|
551 |
+
)
|
552 |
+
custom_model = gr.Textbox(
|
553 |
+
label="(Optional) Custom model.pth file , leave blank if you want to use the base file.",
|
554 |
+
value="",
|
555 |
+
)
|
556 |
+
num_epochs = gr.Slider(
|
557 |
+
label="Number of epochs:",
|
558 |
+
minimum=1,
|
559 |
+
maximum=100,
|
560 |
+
step=1,
|
561 |
+
value=args.num_epochs,
|
562 |
+
)
|
563 |
+
batch_size = gr.Slider(
|
564 |
+
label="Batch size:",
|
565 |
+
minimum=2,
|
566 |
+
maximum=512,
|
567 |
+
step=1,
|
568 |
+
value=args.batch_size,
|
569 |
+
)
|
570 |
+
grad_acumm = gr.Slider(
|
571 |
+
label="Grad accumulation steps:",
|
572 |
+
minimum=2,
|
573 |
+
maximum=128,
|
574 |
+
step=1,
|
575 |
+
value=args.grad_acumm,
|
576 |
+
)
|
577 |
+
max_audio_length = gr.Slider(
|
578 |
+
label="Max permitted audio size in seconds:",
|
579 |
+
minimum=2,
|
580 |
+
maximum=20,
|
581 |
+
step=1,
|
582 |
+
value=args.max_audio_length,
|
583 |
+
)
|
584 |
+
clear_train_data = gr.Dropdown(
|
585 |
+
label="Clear train data, you will delete selected folder, after optimizing",
|
586 |
+
value="run",
|
587 |
+
choices=[
|
588 |
+
"none",
|
589 |
+
"run",
|
590 |
+
"dataset",
|
591 |
+
"all"
|
592 |
+
])
|
593 |
+
|
594 |
+
progress_train = gr.Label(
|
595 |
+
label="Progress:"
|
596 |
+
)
|
597 |
+
|
598 |
+
# demo.load(read_logs, None, logs_tts_train, every=1)
|
599 |
+
train_btn = gr.Button(value="Step 2 - Run the training")
|
600 |
+
optimize_model_btn = gr.Button(value="Step 2.5 - Optimize the model")
|
601 |
+
|
602 |
+
def train_model(custom_model,version,language, train_csv, eval_csv, num_epochs, batch_size, grad_acumm, output_path, max_audio_length):
|
603 |
+
clear_gpu_cache()
|
604 |
+
|
605 |
+
run_dir = Path(output_path) / "run"
|
606 |
+
|
607 |
+
# # Remove train dir
|
608 |
+
if run_dir.exists():
|
609 |
+
os.remove(run_dir)
|
610 |
+
|
611 |
+
# Check if the dataset language matches the language you specified
|
612 |
+
lang_file_path = Path(output_path) / "dataset" / "lang.txt"
|
613 |
+
|
614 |
+
# Check if lang.txt already exists and contains a different language
|
615 |
+
current_language = None
|
616 |
+
if lang_file_path.exists():
|
617 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
618 |
+
current_language = existing_lang_file.read().strip()
|
619 |
+
if current_language != language:
|
620 |
+
print("The language that was prepared for the dataset does not match the specified language. Change the language to the one specified in the dataset")
|
621 |
+
language = current_language
|
622 |
+
|
623 |
+
if not train_csv or not eval_csv:
|
624 |
+
return "You need to run the data processing step or manually set `Train CSV` and `Eval CSV` fields !", "", "", "", ""
|
625 |
+
try:
|
626 |
+
# convert seconds to waveform frames
|
627 |
+
max_audio_length = int(max_audio_length * 22050)
|
628 |
+
speaker_xtts_path,config_path, original_xtts_checkpoint, vocab_file, exp_path, speaker_wav = train_gpt(custom_model,version,language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv, output_path=output_path, max_audio_length=max_audio_length)
|
629 |
+
except:
|
630 |
+
traceback.print_exc()
|
631 |
+
error = traceback.format_exc()
|
632 |
+
return f"The training was interrupted due an error !! Please check the console to check the full error message! \n Error summary: {error}", "", "", "", ""
|
633 |
+
|
634 |
+
# copy original files to avoid parameters changes issues
|
635 |
+
# os.system(f"cp {config_path} {exp_path}")
|
636 |
+
# os.system(f"cp {vocab_file} {exp_path}")
|
637 |
+
|
638 |
+
ready_dir = Path(output_path) / "ready"
|
639 |
+
|
640 |
+
ft_xtts_checkpoint = os.path.join(exp_path, "best_model.pth")
|
641 |
+
|
642 |
+
shutil.copy(ft_xtts_checkpoint, ready_dir / "unoptimize_model.pth")
|
643 |
+
# os.remove(ft_xtts_checkpoint)
|
644 |
+
|
645 |
+
ft_xtts_checkpoint = os.path.join(ready_dir, "unoptimize_model.pth")
|
646 |
+
|
647 |
+
# Reference
|
648 |
+
# Move reference audio to output folder and rename it
|
649 |
+
speaker_reference_path = Path(speaker_wav)
|
650 |
+
speaker_reference_new_path = ready_dir / "reference.wav"
|
651 |
+
shutil.copy(speaker_reference_path, speaker_reference_new_path)
|
652 |
+
|
653 |
+
print("Model training done!")
|
654 |
+
# clear_gpu_cache()
|
655 |
+
return "Model training done!", config_path, vocab_file, ft_xtts_checkpoint,speaker_xtts_path, speaker_reference_new_path
|
656 |
+
|
657 |
+
def optimize_model(out_path, clear_train_data):
|
658 |
+
# print(out_path)
|
659 |
+
out_path = Path(out_path) # Ensure that out_path is a Path object.
|
660 |
+
|
661 |
+
ready_dir = out_path / "ready"
|
662 |
+
run_dir = out_path / "run"
|
663 |
+
dataset_dir = out_path / "dataset"
|
664 |
+
|
665 |
+
# Clear specified training data directories.
|
666 |
+
if clear_train_data in {"run", "all"} and run_dir.exists():
|
667 |
+
try:
|
668 |
+
shutil.rmtree(run_dir)
|
669 |
+
except PermissionError as e:
|
670 |
+
print(f"An error occurred while deleting {run_dir}: {e}")
|
671 |
+
|
672 |
+
if clear_train_data in {"dataset", "all"} and dataset_dir.exists():
|
673 |
+
try:
|
674 |
+
shutil.rmtree(dataset_dir)
|
675 |
+
except PermissionError as e:
|
676 |
+
print(f"An error occurred while deleting {dataset_dir}: {e}")
|
677 |
+
|
678 |
+
# Get full path to model
|
679 |
+
model_path = ready_dir / "unoptimize_model.pth"
|
680 |
+
|
681 |
+
if not model_path.is_file():
|
682 |
+
return "Unoptimized model not found in ready folder", ""
|
683 |
+
|
684 |
+
# Load the checkpoint and remove unnecessary parts.
|
685 |
+
checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
|
686 |
+
del checkpoint["optimizer"]
|
687 |
+
|
688 |
+
for key in list(checkpoint["model"].keys()):
|
689 |
+
if "dvae" in key:
|
690 |
+
del checkpoint["model"][key]
|
691 |
+
|
692 |
+
# Make sure out_path is a Path object or convert it to Path
|
693 |
+
os.remove(model_path)
|
694 |
+
|
695 |
+
# Save the optimized model.
|
696 |
+
optimized_model_file_name="model.pth"
|
697 |
+
optimized_model=ready_dir/optimized_model_file_name
|
698 |
+
|
699 |
+
torch.save(checkpoint, optimized_model)
|
700 |
+
ft_xtts_checkpoint=str(optimized_model)
|
701 |
+
|
702 |
+
clear_gpu_cache()
|
703 |
+
|
704 |
+
return f"Model optimized and saved at {ft_xtts_checkpoint}!", ft_xtts_checkpoint
|
705 |
+
|
706 |
+
def load_params(out_path):
|
707 |
+
path_output = Path(out_path)
|
708 |
+
|
709 |
+
dataset_path = path_output / "dataset"
|
710 |
+
|
711 |
+
if not dataset_path.exists():
|
712 |
+
return "The output folder does not exist!", "", ""
|
713 |
+
|
714 |
+
eval_train = dataset_path / "metadata_train.csv"
|
715 |
+
eval_csv = dataset_path / "metadata_eval.csv"
|
716 |
+
|
717 |
+
# Write the target language to lang.txt in the output directory
|
718 |
+
lang_file_path = dataset_path / "lang.txt"
|
719 |
+
|
720 |
+
# Check if lang.txt already exists and contains a different language
|
721 |
+
current_language = None
|
722 |
+
if os.path.exists(lang_file_path):
|
723 |
+
with open(lang_file_path, 'r', encoding='utf-8') as existing_lang_file:
|
724 |
+
current_language = existing_lang_file.read().strip()
|
725 |
+
|
726 |
+
clear_gpu_cache()
|
727 |
+
|
728 |
+
print(current_language)
|
729 |
+
return "The data has been updated", eval_train, eval_csv, current_language
|
730 |
+
|
731 |
+
with gr.Tab("3 - Inference"):
|
732 |
+
with gr.Row():
|
733 |
+
with gr.Column() as col1:
|
734 |
+
load_params_tts_btn = gr.Button(value="Load params for TTS from output folder")
|
735 |
+
xtts_checkpoint = gr.Textbox(
|
736 |
+
label="XTTS checkpoint path:",
|
737 |
+
value="",
|
738 |
+
)
|
739 |
+
xtts_config = gr.Textbox(
|
740 |
+
label="XTTS config path:",
|
741 |
+
value="",
|
742 |
+
)
|
743 |
+
|
744 |
+
xtts_vocab = gr.Textbox(
|
745 |
+
label="XTTS vocab path:",
|
746 |
+
value="",
|
747 |
+
)
|
748 |
+
xtts_speaker = gr.Textbox(
|
749 |
+
label="XTTS speaker path:",
|
750 |
+
value="",
|
751 |
+
)
|
752 |
+
progress_load = gr.Label(
|
753 |
+
label="Progress:"
|
754 |
+
)
|
755 |
+
load_btn = gr.Button(value="Step 3 - Load Fine-tuned XTTS model")
|
756 |
+
|
757 |
+
with gr.Column() as col2:
|
758 |
+
speaker_reference_audio = gr.Textbox(
|
759 |
+
label="Speaker reference audio:",
|
760 |
+
value="",
|
761 |
+
)
|
762 |
+
tts_language = gr.Dropdown(
|
763 |
+
label="Language",
|
764 |
+
value="en",
|
765 |
+
choices=[
|
766 |
+
"en",
|
767 |
+
"es",
|
768 |
+
"fr",
|
769 |
+
"de",
|
770 |
+
"it",
|
771 |
+
"pt",
|
772 |
+
"pl",
|
773 |
+
"tr",
|
774 |
+
"ru",
|
775 |
+
"nl",
|
776 |
+
"cs",
|
777 |
+
"ar",
|
778 |
+
"zh",
|
779 |
+
"hu",
|
780 |
+
"ko",
|
781 |
+
"ja",
|
782 |
+
]
|
783 |
+
)
|
784 |
+
tts_text = gr.Textbox(
|
785 |
+
label="Input Text.",
|
786 |
+
value="This model sounds really good and above all, it's reasonably fast.",
|
787 |
+
)
|
788 |
+
with gr.Accordion("Advanced settings", open=False) as acr:
|
789 |
+
temperature = gr.Slider(
|
790 |
+
label="temperature",
|
791 |
+
minimum=0,
|
792 |
+
maximum=1,
|
793 |
+
step=0.05,
|
794 |
+
value=0.75,
|
795 |
+
)
|
796 |
+
length_penalty = gr.Slider(
|
797 |
+
label="length_penalty",
|
798 |
+
minimum=-10.0,
|
799 |
+
maximum=10.0,
|
800 |
+
step=0.5,
|
801 |
+
value=1,
|
802 |
+
)
|
803 |
+
repetition_penalty = gr.Slider(
|
804 |
+
label="repetition penalty",
|
805 |
+
minimum=1,
|
806 |
+
maximum=10,
|
807 |
+
step=0.5,
|
808 |
+
value=5,
|
809 |
+
)
|
810 |
+
top_k = gr.Slider(
|
811 |
+
label="top_k",
|
812 |
+
minimum=1,
|
813 |
+
maximum=100,
|
814 |
+
step=1,
|
815 |
+
value=50,
|
816 |
+
)
|
817 |
+
top_p = gr.Slider(
|
818 |
+
label="top_p",
|
819 |
+
minimum=0,
|
820 |
+
maximum=1,
|
821 |
+
step=0.05,
|
822 |
+
value=0.85,
|
823 |
+
)
|
824 |
+
speed = gr.Slider(
|
825 |
+
label="speed",
|
826 |
+
minimum=0.2,
|
827 |
+
maximum=4.0,
|
828 |
+
step=0.05,
|
829 |
+
value=1.0,
|
830 |
+
)
|
831 |
+
sentence_split = gr.Checkbox(
|
832 |
+
label="Enable text splitting",
|
833 |
+
value=True,
|
834 |
+
)
|
835 |
+
use_config = gr.Checkbox(
|
836 |
+
label="Use Inference settings from config, if disabled use the settings above",
|
837 |
+
value=False,
|
838 |
+
)
|
839 |
+
tts_btn = gr.Button(value="Step 4 - Inference")
|
840 |
+
|
841 |
+
with gr.Column() as col3:
|
842 |
+
progress_gen = gr.Label(
|
843 |
+
label="Progress:"
|
844 |
+
)
|
845 |
+
tts_output_audio = gr.Audio(label="Generated Audio.")
|
846 |
+
reference_audio = gr.Audio(label="Reference audio used.")
|
847 |
+
|
848 |
+
|
849 |
+
with gr.Column() as col4:
|
850 |
+
srt_upload = gr.File(label="Upload SRT File")
|
851 |
+
generate_srt_audio_btn = gr.Button(value="Generate Audio from SRT")
|
852 |
+
srt_output_audio = gr.Audio(label="Combined Audio from SRT")
|
853 |
+
error_message = gr.Textbox(label="Error Message", visible=False) # 错误消息组件,默认不显示
|
854 |
+
|
855 |
+
generate_srt_audio_btn.click(
|
856 |
+
fn=process_srt_and_generate_audio,
|
857 |
+
inputs=[
|
858 |
+
srt_upload,
|
859 |
+
tts_language,
|
860 |
+
speaker_reference_audio,
|
861 |
+
temperature,
|
862 |
+
length_penalty,
|
863 |
+
repetition_penalty,
|
864 |
+
top_k,
|
865 |
+
top_p,
|
866 |
+
speed,
|
867 |
+
sentence_split,
|
868 |
+
use_config
|
869 |
+
],
|
870 |
+
outputs=[srt_output_audio]
|
871 |
+
)
|
872 |
+
|
873 |
+
prompt_compute_btn.click(
|
874 |
+
fn=preprocess_dataset,
|
875 |
+
inputs=[
|
876 |
+
upload_file,
|
877 |
+
lang,
|
878 |
+
whisper_model,
|
879 |
+
out_path,
|
880 |
+
train_csv,
|
881 |
+
eval_csv
|
882 |
+
],
|
883 |
+
outputs=[
|
884 |
+
progress_data,
|
885 |
+
train_csv,
|
886 |
+
eval_csv,
|
887 |
+
],
|
888 |
+
)
|
889 |
+
|
890 |
+
load_params_btn.click(
|
891 |
+
fn=load_params,
|
892 |
+
inputs=[out_path],
|
893 |
+
outputs=[
|
894 |
+
progress_train,
|
895 |
+
train_csv,
|
896 |
+
eval_csv,
|
897 |
+
lang
|
898 |
+
]
|
899 |
+
)
|
900 |
+
|
901 |
+
|
902 |
+
train_btn.click(
|
903 |
+
fn=train_model,
|
904 |
+
inputs=[
|
905 |
+
custom_model,
|
906 |
+
version,
|
907 |
+
lang,
|
908 |
+
train_csv,
|
909 |
+
eval_csv,
|
910 |
+
num_epochs,
|
911 |
+
batch_size,
|
912 |
+
grad_acumm,
|
913 |
+
out_path,
|
914 |
+
max_audio_length,
|
915 |
+
],
|
916 |
+
outputs=[progress_train, xtts_config, xtts_vocab, xtts_checkpoint,xtts_speaker, speaker_reference_audio],
|
917 |
+
)
|
918 |
+
|
919 |
+
optimize_model_btn.click(
|
920 |
+
fn=optimize_model,
|
921 |
+
inputs=[
|
922 |
+
out_path,
|
923 |
+
clear_train_data
|
924 |
+
],
|
925 |
+
outputs=[progress_train,xtts_checkpoint],
|
926 |
+
)
|
927 |
+
|
928 |
+
load_btn.click(
|
929 |
+
fn=load_model,
|
930 |
+
inputs=[
|
931 |
+
xtts_checkpoint,
|
932 |
+
xtts_config,
|
933 |
+
xtts_vocab,
|
934 |
+
xtts_speaker
|
935 |
+
],
|
936 |
+
outputs=[progress_load],
|
937 |
+
)
|
938 |
+
|
939 |
+
tts_btn.click(
|
940 |
+
fn=run_tts,
|
941 |
+
inputs=[
|
942 |
+
tts_language,
|
943 |
+
tts_text,
|
944 |
+
speaker_reference_audio,
|
945 |
+
temperature,
|
946 |
+
length_penalty,
|
947 |
+
repetition_penalty,
|
948 |
+
top_k,
|
949 |
+
top_p,
|
950 |
+
speed,
|
951 |
+
sentence_split,
|
952 |
+
use_config
|
953 |
+
],
|
954 |
+
outputs=[progress_gen, tts_output_audio, reference_audio],
|
955 |
+
)
|
956 |
+
|
957 |
+
load_params_tts_btn.click(
|
958 |
+
fn=load_params_tts,
|
959 |
+
inputs=[
|
960 |
+
out_path,
|
961 |
+
version
|
962 |
+
],
|
963 |
+
outputs=[progress_load,xtts_checkpoint,xtts_config,xtts_vocab,xtts_speaker,speaker_reference_audio],
|
964 |
+
)
|
965 |
+
|
966 |
+
demo.launch(
|
967 |
+
#share=False,
|
968 |
+
share=True,
|
969 |
+
debug=False,
|
970 |
+
server_port=args.port,
|
971 |
+
#server_name="localhost"
|
972 |
+
server_name="0.0.0.0"
|
973 |
+
)
|
openvoice_cli/__init__.py
ADDED
File without changes
|
openvoice_cli/__main__.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
from tqdm import tqdm
|
4 |
+
from openvoice_cli.downloader import download_checkpoint
|
5 |
+
from openvoice_cli.api import ToneColorConverter
|
6 |
+
import openvoice_cli.se_extractor as se_extractor
|
7 |
+
import glob
|
8 |
+
|
9 |
+
def tune_one(input_file,ref_file,output_file,device):
|
10 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
11 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
12 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
13 |
+
|
14 |
+
if not os.path.exists(ckpt_converter):
|
15 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
16 |
+
download_checkpoint(ckpt_converter)
|
17 |
+
|
18 |
+
device = device
|
19 |
+
|
20 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
21 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
22 |
+
|
23 |
+
source_se, _ = se_extractor.get_se(input_file, tone_color_converter, vad=True)
|
24 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
25 |
+
|
26 |
+
# Ensure output directory exists and is writable
|
27 |
+
output_dir = os.path.dirname(output_file)
|
28 |
+
if output_dir:
|
29 |
+
if not os.path.exists(output_dir):
|
30 |
+
os.makedirs(output_dir, exist_ok=True)
|
31 |
+
|
32 |
+
# Run the tone color converter
|
33 |
+
tone_color_converter.convert(
|
34 |
+
audio_src_path=input_file,
|
35 |
+
src_se=source_se,
|
36 |
+
tgt_se=target_se,
|
37 |
+
output_path=output_file,
|
38 |
+
)
|
39 |
+
|
40 |
+
def tune_batch(input_dir, ref_file, output_dir=None, device='cpu', output_format='.wav'):
|
41 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
42 |
+
checkpoints_dir = os.path.join(current_dir, 'checkpoints')
|
43 |
+
ckpt_converter = os.path.join(checkpoints_dir, 'converter')
|
44 |
+
|
45 |
+
if not os.path.exists(ckpt_converter):
|
46 |
+
os.makedirs(ckpt_converter, exist_ok=True)
|
47 |
+
download_checkpoint(ckpt_converter)
|
48 |
+
|
49 |
+
tone_color_converter = ToneColorConverter(os.path.join(ckpt_converter, 'config.json'), device=device)
|
50 |
+
tone_color_converter.load_ckpt(os.path.join(ckpt_converter, 'checkpoint.pth'))
|
51 |
+
|
52 |
+
target_se, _ = se_extractor.get_se(ref_file, tone_color_converter, vad=True)
|
53 |
+
|
54 |
+
# Use default output directory 'out' if not provided
|
55 |
+
if output_dir is None:
|
56 |
+
output_dir = os.path.join(current_dir, 'out')
|
57 |
+
os.makedirs(output_dir, exist_ok=True)
|
58 |
+
|
59 |
+
# Check for any audio files in the input directory (wav, mp3, flac) using glob
|
60 |
+
audio_extensions = ('*.wav', '*.mp3', '*.flac')
|
61 |
+
audio_files = []
|
62 |
+
for extension in audio_extensions:
|
63 |
+
audio_files.extend(glob.glob(os.path.join(input_dir, extension)))
|
64 |
+
|
65 |
+
for audio_file in tqdm(audio_files,"Tune file",len(audio_files)):
|
66 |
+
# Extract source SE from audio file
|
67 |
+
source_se, _ = se_extractor.get_se(audio_file, tone_color_converter, vad=True)
|
68 |
+
|
69 |
+
# Run the tone color converter
|
70 |
+
filename_without_extension = os.path.splitext(os.path.basename(audio_file))[0]
|
71 |
+
output_filename = f"{filename_without_extension}_tuned{output_format}"
|
72 |
+
output_file = os.path.join(output_dir, output_filename)
|
73 |
+
|
74 |
+
tone_color_converter.convert(
|
75 |
+
audio_src_path=audio_file,
|
76 |
+
src_se=source_se,
|
77 |
+
tgt_se=target_se,
|
78 |
+
output_path=output_file,
|
79 |
+
)
|
80 |
+
print(f"Converted {audio_file} to {output_file}")
|
81 |
+
|
82 |
+
return output_dir
|
83 |
+
|
84 |
+
def main_single(args):
|
85 |
+
tune_one(input_file=args.input, ref_file=args.ref, output_file=args.output, device=args.device)
|
86 |
+
|
87 |
+
def main_batch(args):
|
88 |
+
output_dir = tune_batch(
|
89 |
+
input_dir=args.input_dir,
|
90 |
+
ref_file=args.ref_file,
|
91 |
+
output_dir=args.output_dir,
|
92 |
+
device=args.device,
|
93 |
+
output_format=args.output_format
|
94 |
+
)
|
95 |
+
print(f"Batch processing complete. Converted files are saved in {output_dir}")
|
96 |
+
|
97 |
+
|
98 |
+
if __name__ == '__main__':
|
99 |
+
parser = argparse.ArgumentParser(description='Convert the tone color of audio files using a reference audio.')
|
100 |
+
|
101 |
+
# Create subparsers for single and batch processing
|
102 |
+
subparsers = parser.add_subparsers(help='commands', dest='command')
|
103 |
+
|
104 |
+
# Single file conversion arguments
|
105 |
+
single_parser = subparsers.add_parser('single', help='Process a single file')
|
106 |
+
single_parser.add_argument('-i', '--input', help='Input audio file path', required=True)
|
107 |
+
single_parser.add_argument('-r', '--ref', help='Reference audio file path', required=True)
|
108 |
+
single_parser.add_argument('-o', '--output', default="out.wav", help='Output path for converted audio file')
|
109 |
+
single_parser.add_argument('-d', '--device', default="cpu", help='Device to use (e.g., "cuda:0" or "cpu")')
|
110 |
+
single_parser.set_defaults(func=main_single)
|
111 |
+
|
112 |
+
# Batch processing arguments
|
113 |
+
batch_parser = subparsers.add_parser('batch', help='Process a batch of files in a directory')
|
114 |
+
batch_parser.add_argument('-id', '--input_dir', help='Input directory containing audio files to process', required=True)
|
115 |
+
batch_parser.add_argument('-rf', '--ref_file', help='Reference audio file path', required=True)
|
116 |
+
batch_parser.add_argument('-od', '--output_dir', help='Output directory for converted audio files', default="outputs")
|
117 |
+
batch_parser.add_argument('-d', '--device', default="cuda", help='Device to use')
|
118 |
+
batch_parser.add_argument('-of', '--output_format', default=".wav", help='Output file format (e.g., ".wav")')
|
119 |
+
batch_parser.set_defaults(func=main_batch)
|
120 |
+
|
121 |
+
args = parser.parse_args()
|
122 |
+
if hasattr(args, 'func'):
|
123 |
+
args.func(args)
|
124 |
+
else:
|
125 |
+
parser.print_help()
|
openvoice_cli/api.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import re
|
4 |
+
import soundfile
|
5 |
+
import openvoice_cli.utils as utils
|
6 |
+
import os
|
7 |
+
import librosa
|
8 |
+
from openvoice_cli.mel_processing import spectrogram_torch
|
9 |
+
from openvoice_cli.models import SynthesizerTrn
|
10 |
+
|
11 |
+
|
12 |
+
class OpenVoiceBaseClass(object):
|
13 |
+
def __init__(self,
|
14 |
+
config_path,
|
15 |
+
device='cuda:0'):
|
16 |
+
if 'cuda' in device:
|
17 |
+
assert torch.cuda.is_available()
|
18 |
+
|
19 |
+
hps = utils.get_hparams_from_file(config_path)
|
20 |
+
|
21 |
+
model = SynthesizerTrn(
|
22 |
+
len(getattr(hps, 'symbols', [])),
|
23 |
+
hps.data.filter_length // 2 + 1,
|
24 |
+
n_speakers=hps.data.n_speakers,
|
25 |
+
**hps.model,
|
26 |
+
).to(device)
|
27 |
+
|
28 |
+
model.eval()
|
29 |
+
self.model = model
|
30 |
+
self.hps = hps
|
31 |
+
self.device = device
|
32 |
+
|
33 |
+
def load_ckpt(self, ckpt_path):
|
34 |
+
checkpoint_dict = torch.load(ckpt_path, map_location=torch.device(self.device))
|
35 |
+
a, b = self.model.load_state_dict(checkpoint_dict['model'], strict=False)
|
36 |
+
print("Loaded checkpoint '{}'".format(ckpt_path))
|
37 |
+
print('missing/unexpected keys:', a, b)
|
38 |
+
|
39 |
+
class ToneColorConverter(OpenVoiceBaseClass):
|
40 |
+
def __init__(self, *args, **kwargs):
|
41 |
+
super().__init__(*args, **kwargs)
|
42 |
+
|
43 |
+
if kwargs.get('enable_watermark', True):
|
44 |
+
import wavmark
|
45 |
+
self.watermark_model = wavmark.load_model().to(self.device)
|
46 |
+
else:
|
47 |
+
self.watermark_model = None
|
48 |
+
|
49 |
+
def extract_se(self, ref_wav_list, se_save_path=None):
|
50 |
+
if isinstance(ref_wav_list, str):
|
51 |
+
ref_wav_list = [ref_wav_list]
|
52 |
+
|
53 |
+
device = self.device
|
54 |
+
hps = self.hps
|
55 |
+
gs = []
|
56 |
+
|
57 |
+
for fname in ref_wav_list:
|
58 |
+
audio_ref, sr = librosa.load(fname, sr=hps.data.sampling_rate)
|
59 |
+
y = torch.FloatTensor(audio_ref)
|
60 |
+
y = y.to(device)
|
61 |
+
y = y.unsqueeze(0)
|
62 |
+
y = spectrogram_torch(y, hps.data.filter_length,
|
63 |
+
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
64 |
+
center=False).to(device)
|
65 |
+
with torch.no_grad():
|
66 |
+
g = self.model.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
|
67 |
+
gs.append(g.detach())
|
68 |
+
gs = torch.stack(gs).mean(0)
|
69 |
+
|
70 |
+
if se_save_path is not None:
|
71 |
+
os.makedirs(os.path.dirname(se_save_path), exist_ok=True)
|
72 |
+
torch.save(gs.cpu(), se_save_path)
|
73 |
+
|
74 |
+
return gs
|
75 |
+
|
76 |
+
def convert(self, audio_src_path, src_se, tgt_se, output_path=None, tau=0.3, message="default"):
|
77 |
+
hps = self.hps
|
78 |
+
# load audio
|
79 |
+
audio, sample_rate = librosa.load(audio_src_path, sr=hps.data.sampling_rate)
|
80 |
+
audio = torch.tensor(audio).float()
|
81 |
+
|
82 |
+
with torch.no_grad():
|
83 |
+
y = torch.FloatTensor(audio).to(self.device)
|
84 |
+
y = y.unsqueeze(0)
|
85 |
+
spec = spectrogram_torch(y, hps.data.filter_length,
|
86 |
+
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
87 |
+
center=False).to(self.device)
|
88 |
+
spec_lengths = torch.LongTensor([spec.size(-1)]).to(self.device)
|
89 |
+
audio = self.model.voice_conversion(spec, spec_lengths, sid_src=src_se, sid_tgt=tgt_se, tau=tau)[0][
|
90 |
+
0, 0].data.cpu().float().numpy()
|
91 |
+
audio = self.add_watermark(audio, message)
|
92 |
+
if output_path is None:
|
93 |
+
return audio
|
94 |
+
else:
|
95 |
+
soundfile.write(output_path, audio, hps.data.sampling_rate)
|
96 |
+
|
97 |
+
def add_watermark(self, audio, message):
|
98 |
+
if self.watermark_model is None:
|
99 |
+
return audio
|
100 |
+
device = self.device
|
101 |
+
bits = utils.string_to_bits(message).reshape(-1)
|
102 |
+
n_repeat = len(bits) // 32
|
103 |
+
|
104 |
+
K = 16000
|
105 |
+
coeff = 2
|
106 |
+
for n in range(n_repeat):
|
107 |
+
trunck = audio[(coeff * n) * K: (coeff * n + 1) * K]
|
108 |
+
if len(trunck) != K:
|
109 |
+
print('Audio too short, fail to add watermark')
|
110 |
+
break
|
111 |
+
message_npy = bits[n * 32: (n + 1) * 32]
|
112 |
+
|
113 |
+
with torch.no_grad():
|
114 |
+
signal = torch.FloatTensor(trunck).to(device)[None]
|
115 |
+
message_tensor = torch.FloatTensor(message_npy).to(device)[None]
|
116 |
+
signal_wmd_tensor = self.watermark_model.encode(signal, message_tensor)
|
117 |
+
signal_wmd_npy = signal_wmd_tensor.detach().cpu().squeeze()
|
118 |
+
audio[(coeff * n) * K: (coeff * n + 1) * K] = signal_wmd_npy
|
119 |
+
return audio
|
120 |
+
|
121 |
+
def detect_watermark(self, audio, n_repeat):
|
122 |
+
bits = []
|
123 |
+
K = 16000
|
124 |
+
coeff = 2
|
125 |
+
for n in range(n_repeat):
|
126 |
+
trunck = audio[(coeff * n) * K: (coeff * n + 1) * K]
|
127 |
+
if len(trunck) != K:
|
128 |
+
print('Audio too short, fail to detect watermark')
|
129 |
+
return 'Fail'
|
130 |
+
with torch.no_grad():
|
131 |
+
signal = torch.FloatTensor(trunck).to(self.device).unsqueeze(0)
|
132 |
+
message_decoded_npy = (self.watermark_model.decode(signal) >= 0.5).int().detach().cpu().numpy().squeeze()
|
133 |
+
bits.append(message_decoded_npy)
|
134 |
+
bits = np.stack(bits).reshape(-1, 8)
|
135 |
+
message = utils.bits_to_string(bits)
|
136 |
+
return message
|
137 |
+
|
openvoice_cli/attentions.py
ADDED
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
import openvoice_cli.commons as commons
|
7 |
+
import logging
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class LayerNorm(nn.Module):
|
13 |
+
def __init__(self, channels, eps=1e-5):
|
14 |
+
super().__init__()
|
15 |
+
self.channels = channels
|
16 |
+
self.eps = eps
|
17 |
+
|
18 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
19 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
20 |
+
|
21 |
+
def forward(self, x):
|
22 |
+
x = x.transpose(1, -1)
|
23 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
24 |
+
return x.transpose(1, -1)
|
25 |
+
|
26 |
+
|
27 |
+
@torch.jit.script
|
28 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
29 |
+
n_channels_int = n_channels[0]
|
30 |
+
in_act = input_a + input_b
|
31 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
32 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
33 |
+
acts = t_act * s_act
|
34 |
+
return acts
|
35 |
+
|
36 |
+
|
37 |
+
class Encoder(nn.Module):
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
hidden_channels,
|
41 |
+
filter_channels,
|
42 |
+
n_heads,
|
43 |
+
n_layers,
|
44 |
+
kernel_size=1,
|
45 |
+
p_dropout=0.0,
|
46 |
+
window_size=4,
|
47 |
+
isflow=True,
|
48 |
+
**kwargs
|
49 |
+
):
|
50 |
+
super().__init__()
|
51 |
+
self.hidden_channels = hidden_channels
|
52 |
+
self.filter_channels = filter_channels
|
53 |
+
self.n_heads = n_heads
|
54 |
+
self.n_layers = n_layers
|
55 |
+
self.kernel_size = kernel_size
|
56 |
+
self.p_dropout = p_dropout
|
57 |
+
self.window_size = window_size
|
58 |
+
# if isflow:
|
59 |
+
# cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
|
60 |
+
# self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
|
61 |
+
# self.cond_layer = weight_norm(cond_layer, name='weight')
|
62 |
+
# self.gin_channels = 256
|
63 |
+
self.cond_layer_idx = self.n_layers
|
64 |
+
if "gin_channels" in kwargs:
|
65 |
+
self.gin_channels = kwargs["gin_channels"]
|
66 |
+
if self.gin_channels != 0:
|
67 |
+
self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
|
68 |
+
# vits2 says 3rd block, so idx is 2 by default
|
69 |
+
self.cond_layer_idx = (
|
70 |
+
kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
|
71 |
+
)
|
72 |
+
# logging.debug(self.gin_channels, self.cond_layer_idx)
|
73 |
+
assert (
|
74 |
+
self.cond_layer_idx < self.n_layers
|
75 |
+
), "cond_layer_idx should be less than n_layers"
|
76 |
+
self.drop = nn.Dropout(p_dropout)
|
77 |
+
self.attn_layers = nn.ModuleList()
|
78 |
+
self.norm_layers_1 = nn.ModuleList()
|
79 |
+
self.ffn_layers = nn.ModuleList()
|
80 |
+
self.norm_layers_2 = nn.ModuleList()
|
81 |
+
|
82 |
+
for i in range(self.n_layers):
|
83 |
+
self.attn_layers.append(
|
84 |
+
MultiHeadAttention(
|
85 |
+
hidden_channels,
|
86 |
+
hidden_channels,
|
87 |
+
n_heads,
|
88 |
+
p_dropout=p_dropout,
|
89 |
+
window_size=window_size,
|
90 |
+
)
|
91 |
+
)
|
92 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
93 |
+
self.ffn_layers.append(
|
94 |
+
FFN(
|
95 |
+
hidden_channels,
|
96 |
+
hidden_channels,
|
97 |
+
filter_channels,
|
98 |
+
kernel_size,
|
99 |
+
p_dropout=p_dropout,
|
100 |
+
)
|
101 |
+
)
|
102 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
103 |
+
|
104 |
+
def forward(self, x, x_mask, g=None):
|
105 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
106 |
+
x = x * x_mask
|
107 |
+
for i in range(self.n_layers):
|
108 |
+
if i == self.cond_layer_idx and g is not None:
|
109 |
+
g = self.spk_emb_linear(g.transpose(1, 2))
|
110 |
+
g = g.transpose(1, 2)
|
111 |
+
x = x + g
|
112 |
+
x = x * x_mask
|
113 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
114 |
+
y = self.drop(y)
|
115 |
+
x = self.norm_layers_1[i](x + y)
|
116 |
+
|
117 |
+
y = self.ffn_layers[i](x, x_mask)
|
118 |
+
y = self.drop(y)
|
119 |
+
x = self.norm_layers_2[i](x + y)
|
120 |
+
x = x * x_mask
|
121 |
+
return x
|
122 |
+
|
123 |
+
|
124 |
+
class Decoder(nn.Module):
|
125 |
+
def __init__(
|
126 |
+
self,
|
127 |
+
hidden_channels,
|
128 |
+
filter_channels,
|
129 |
+
n_heads,
|
130 |
+
n_layers,
|
131 |
+
kernel_size=1,
|
132 |
+
p_dropout=0.0,
|
133 |
+
proximal_bias=False,
|
134 |
+
proximal_init=True,
|
135 |
+
**kwargs
|
136 |
+
):
|
137 |
+
super().__init__()
|
138 |
+
self.hidden_channels = hidden_channels
|
139 |
+
self.filter_channels = filter_channels
|
140 |
+
self.n_heads = n_heads
|
141 |
+
self.n_layers = n_layers
|
142 |
+
self.kernel_size = kernel_size
|
143 |
+
self.p_dropout = p_dropout
|
144 |
+
self.proximal_bias = proximal_bias
|
145 |
+
self.proximal_init = proximal_init
|
146 |
+
|
147 |
+
self.drop = nn.Dropout(p_dropout)
|
148 |
+
self.self_attn_layers = nn.ModuleList()
|
149 |
+
self.norm_layers_0 = nn.ModuleList()
|
150 |
+
self.encdec_attn_layers = nn.ModuleList()
|
151 |
+
self.norm_layers_1 = nn.ModuleList()
|
152 |
+
self.ffn_layers = nn.ModuleList()
|
153 |
+
self.norm_layers_2 = nn.ModuleList()
|
154 |
+
for i in range(self.n_layers):
|
155 |
+
self.self_attn_layers.append(
|
156 |
+
MultiHeadAttention(
|
157 |
+
hidden_channels,
|
158 |
+
hidden_channels,
|
159 |
+
n_heads,
|
160 |
+
p_dropout=p_dropout,
|
161 |
+
proximal_bias=proximal_bias,
|
162 |
+
proximal_init=proximal_init,
|
163 |
+
)
|
164 |
+
)
|
165 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
166 |
+
self.encdec_attn_layers.append(
|
167 |
+
MultiHeadAttention(
|
168 |
+
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
169 |
+
)
|
170 |
+
)
|
171 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
172 |
+
self.ffn_layers.append(
|
173 |
+
FFN(
|
174 |
+
hidden_channels,
|
175 |
+
hidden_channels,
|
176 |
+
filter_channels,
|
177 |
+
kernel_size,
|
178 |
+
p_dropout=p_dropout,
|
179 |
+
causal=True,
|
180 |
+
)
|
181 |
+
)
|
182 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
183 |
+
|
184 |
+
def forward(self, x, x_mask, h, h_mask):
|
185 |
+
"""
|
186 |
+
x: decoder input
|
187 |
+
h: encoder output
|
188 |
+
"""
|
189 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
190 |
+
device=x.device, dtype=x.dtype
|
191 |
+
)
|
192 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
193 |
+
x = x * x_mask
|
194 |
+
for i in range(self.n_layers):
|
195 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
196 |
+
y = self.drop(y)
|
197 |
+
x = self.norm_layers_0[i](x + y)
|
198 |
+
|
199 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
200 |
+
y = self.drop(y)
|
201 |
+
x = self.norm_layers_1[i](x + y)
|
202 |
+
|
203 |
+
y = self.ffn_layers[i](x, x_mask)
|
204 |
+
y = self.drop(y)
|
205 |
+
x = self.norm_layers_2[i](x + y)
|
206 |
+
x = x * x_mask
|
207 |
+
return x
|
208 |
+
|
209 |
+
|
210 |
+
class MultiHeadAttention(nn.Module):
|
211 |
+
def __init__(
|
212 |
+
self,
|
213 |
+
channels,
|
214 |
+
out_channels,
|
215 |
+
n_heads,
|
216 |
+
p_dropout=0.0,
|
217 |
+
window_size=None,
|
218 |
+
heads_share=True,
|
219 |
+
block_length=None,
|
220 |
+
proximal_bias=False,
|
221 |
+
proximal_init=False,
|
222 |
+
):
|
223 |
+
super().__init__()
|
224 |
+
assert channels % n_heads == 0
|
225 |
+
|
226 |
+
self.channels = channels
|
227 |
+
self.out_channels = out_channels
|
228 |
+
self.n_heads = n_heads
|
229 |
+
self.p_dropout = p_dropout
|
230 |
+
self.window_size = window_size
|
231 |
+
self.heads_share = heads_share
|
232 |
+
self.block_length = block_length
|
233 |
+
self.proximal_bias = proximal_bias
|
234 |
+
self.proximal_init = proximal_init
|
235 |
+
self.attn = None
|
236 |
+
|
237 |
+
self.k_channels = channels // n_heads
|
238 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
239 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
240 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
241 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
242 |
+
self.drop = nn.Dropout(p_dropout)
|
243 |
+
|
244 |
+
if window_size is not None:
|
245 |
+
n_heads_rel = 1 if heads_share else n_heads
|
246 |
+
rel_stddev = self.k_channels**-0.5
|
247 |
+
self.emb_rel_k = nn.Parameter(
|
248 |
+
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
249 |
+
* rel_stddev
|
250 |
+
)
|
251 |
+
self.emb_rel_v = nn.Parameter(
|
252 |
+
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
253 |
+
* rel_stddev
|
254 |
+
)
|
255 |
+
|
256 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
257 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
258 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
259 |
+
if proximal_init:
|
260 |
+
with torch.no_grad():
|
261 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
262 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
263 |
+
|
264 |
+
def forward(self, x, c, attn_mask=None):
|
265 |
+
q = self.conv_q(x)
|
266 |
+
k = self.conv_k(c)
|
267 |
+
v = self.conv_v(c)
|
268 |
+
|
269 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
270 |
+
|
271 |
+
x = self.conv_o(x)
|
272 |
+
return x
|
273 |
+
|
274 |
+
def attention(self, query, key, value, mask=None):
|
275 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
276 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
277 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
278 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
279 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
280 |
+
|
281 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
282 |
+
if self.window_size is not None:
|
283 |
+
assert (
|
284 |
+
t_s == t_t
|
285 |
+
), "Relative attention is only available for self-attention."
|
286 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
287 |
+
rel_logits = self._matmul_with_relative_keys(
|
288 |
+
query / math.sqrt(self.k_channels), key_relative_embeddings
|
289 |
+
)
|
290 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
291 |
+
scores = scores + scores_local
|
292 |
+
if self.proximal_bias:
|
293 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
294 |
+
scores = scores + self._attention_bias_proximal(t_s).to(
|
295 |
+
device=scores.device, dtype=scores.dtype
|
296 |
+
)
|
297 |
+
if mask is not None:
|
298 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
299 |
+
if self.block_length is not None:
|
300 |
+
assert (
|
301 |
+
t_s == t_t
|
302 |
+
), "Local attention is only available for self-attention."
|
303 |
+
block_mask = (
|
304 |
+
torch.ones_like(scores)
|
305 |
+
.triu(-self.block_length)
|
306 |
+
.tril(self.block_length)
|
307 |
+
)
|
308 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
309 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
310 |
+
p_attn = self.drop(p_attn)
|
311 |
+
output = torch.matmul(p_attn, value)
|
312 |
+
if self.window_size is not None:
|
313 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
314 |
+
value_relative_embeddings = self._get_relative_embeddings(
|
315 |
+
self.emb_rel_v, t_s
|
316 |
+
)
|
317 |
+
output = output + self._matmul_with_relative_values(
|
318 |
+
relative_weights, value_relative_embeddings
|
319 |
+
)
|
320 |
+
output = (
|
321 |
+
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
322 |
+
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
323 |
+
return output, p_attn
|
324 |
+
|
325 |
+
def _matmul_with_relative_values(self, x, y):
|
326 |
+
"""
|
327 |
+
x: [b, h, l, m]
|
328 |
+
y: [h or 1, m, d]
|
329 |
+
ret: [b, h, l, d]
|
330 |
+
"""
|
331 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
332 |
+
return ret
|
333 |
+
|
334 |
+
def _matmul_with_relative_keys(self, x, y):
|
335 |
+
"""
|
336 |
+
x: [b, h, l, d]
|
337 |
+
y: [h or 1, m, d]
|
338 |
+
ret: [b, h, l, m]
|
339 |
+
"""
|
340 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
341 |
+
return ret
|
342 |
+
|
343 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
344 |
+
2 * self.window_size + 1
|
345 |
+
# Pad first before slice to avoid using cond ops.
|
346 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
347 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
348 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
349 |
+
if pad_length > 0:
|
350 |
+
padded_relative_embeddings = F.pad(
|
351 |
+
relative_embeddings,
|
352 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
353 |
+
)
|
354 |
+
else:
|
355 |
+
padded_relative_embeddings = relative_embeddings
|
356 |
+
used_relative_embeddings = padded_relative_embeddings[
|
357 |
+
:, slice_start_position:slice_end_position
|
358 |
+
]
|
359 |
+
return used_relative_embeddings
|
360 |
+
|
361 |
+
def _relative_position_to_absolute_position(self, x):
|
362 |
+
"""
|
363 |
+
x: [b, h, l, 2*l-1]
|
364 |
+
ret: [b, h, l, l]
|
365 |
+
"""
|
366 |
+
batch, heads, length, _ = x.size()
|
367 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
368 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
369 |
+
|
370 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
371 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
372 |
+
x_flat = F.pad(
|
373 |
+
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
374 |
+
)
|
375 |
+
|
376 |
+
# Reshape and slice out the padded elements.
|
377 |
+
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
378 |
+
:, :, :length, length - 1 :
|
379 |
+
]
|
380 |
+
return x_final
|
381 |
+
|
382 |
+
def _absolute_position_to_relative_position(self, x):
|
383 |
+
"""
|
384 |
+
x: [b, h, l, l]
|
385 |
+
ret: [b, h, l, 2*l-1]
|
386 |
+
"""
|
387 |
+
batch, heads, length, _ = x.size()
|
388 |
+
# pad along column
|
389 |
+
x = F.pad(
|
390 |
+
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
391 |
+
)
|
392 |
+
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
393 |
+
# add 0's in the beginning that will skew the elements after reshape
|
394 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
395 |
+
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
396 |
+
return x_final
|
397 |
+
|
398 |
+
def _attention_bias_proximal(self, length):
|
399 |
+
"""Bias for self-attention to encourage attention to close positions.
|
400 |
+
Args:
|
401 |
+
length: an integer scalar.
|
402 |
+
Returns:
|
403 |
+
a Tensor with shape [1, 1, length, length]
|
404 |
+
"""
|
405 |
+
r = torch.arange(length, dtype=torch.float32)
|
406 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
407 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
408 |
+
|
409 |
+
|
410 |
+
class FFN(nn.Module):
|
411 |
+
def __init__(
|
412 |
+
self,
|
413 |
+
in_channels,
|
414 |
+
out_channels,
|
415 |
+
filter_channels,
|
416 |
+
kernel_size,
|
417 |
+
p_dropout=0.0,
|
418 |
+
activation=None,
|
419 |
+
causal=False,
|
420 |
+
):
|
421 |
+
super().__init__()
|
422 |
+
self.in_channels = in_channels
|
423 |
+
self.out_channels = out_channels
|
424 |
+
self.filter_channels = filter_channels
|
425 |
+
self.kernel_size = kernel_size
|
426 |
+
self.p_dropout = p_dropout
|
427 |
+
self.activation = activation
|
428 |
+
self.causal = causal
|
429 |
+
|
430 |
+
if causal:
|
431 |
+
self.padding = self._causal_padding
|
432 |
+
else:
|
433 |
+
self.padding = self._same_padding
|
434 |
+
|
435 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
436 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
437 |
+
self.drop = nn.Dropout(p_dropout)
|
438 |
+
|
439 |
+
def forward(self, x, x_mask):
|
440 |
+
x = self.conv_1(self.padding(x * x_mask))
|
441 |
+
if self.activation == "gelu":
|
442 |
+
x = x * torch.sigmoid(1.702 * x)
|
443 |
+
else:
|
444 |
+
x = torch.relu(x)
|
445 |
+
x = self.drop(x)
|
446 |
+
x = self.conv_2(self.padding(x * x_mask))
|
447 |
+
return x * x_mask
|
448 |
+
|
449 |
+
def _causal_padding(self, x):
|
450 |
+
if self.kernel_size == 1:
|
451 |
+
return x
|
452 |
+
pad_l = self.kernel_size - 1
|
453 |
+
pad_r = 0
|
454 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
455 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
456 |
+
return x
|
457 |
+
|
458 |
+
def _same_padding(self, x):
|
459 |
+
if self.kernel_size == 1:
|
460 |
+
return x
|
461 |
+
pad_l = (self.kernel_size - 1) // 2
|
462 |
+
pad_r = self.kernel_size // 2
|
463 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
464 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
465 |
+
return x
|
openvoice_cli/commons.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch.nn import functional as F
|
4 |
+
|
5 |
+
|
6 |
+
def init_weights(m, mean=0.0, std=0.01):
|
7 |
+
classname = m.__class__.__name__
|
8 |
+
if classname.find("Conv") != -1:
|
9 |
+
m.weight.data.normal_(mean, std)
|
10 |
+
|
11 |
+
|
12 |
+
def get_padding(kernel_size, dilation=1):
|
13 |
+
return int((kernel_size * dilation - dilation) / 2)
|
14 |
+
|
15 |
+
|
16 |
+
def convert_pad_shape(pad_shape):
|
17 |
+
layer = pad_shape[::-1]
|
18 |
+
pad_shape = [item for sublist in layer for item in sublist]
|
19 |
+
return pad_shape
|
20 |
+
|
21 |
+
|
22 |
+
def intersperse(lst, item):
|
23 |
+
result = [item] * (len(lst) * 2 + 1)
|
24 |
+
result[1::2] = lst
|
25 |
+
return result
|
26 |
+
|
27 |
+
|
28 |
+
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
29 |
+
"""KL(P||Q)"""
|
30 |
+
kl = (logs_q - logs_p) - 0.5
|
31 |
+
kl += (
|
32 |
+
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
33 |
+
)
|
34 |
+
return kl
|
35 |
+
|
36 |
+
|
37 |
+
def rand_gumbel(shape):
|
38 |
+
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
+
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
+
return -torch.log(-torch.log(uniform_samples))
|
41 |
+
|
42 |
+
|
43 |
+
def rand_gumbel_like(x):
|
44 |
+
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
+
return g
|
46 |
+
|
47 |
+
|
48 |
+
def slice_segments(x, ids_str, segment_size=4):
|
49 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
+
for i in range(x.size(0)):
|
51 |
+
idx_str = ids_str[i]
|
52 |
+
idx_end = idx_str + segment_size
|
53 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
+
return ret
|
55 |
+
|
56 |
+
|
57 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
+
b, d, t = x.size()
|
59 |
+
if x_lengths is None:
|
60 |
+
x_lengths = t
|
61 |
+
ids_str_max = x_lengths - segment_size + 1
|
62 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
+
ret = slice_segments(x, ids_str, segment_size)
|
64 |
+
return ret, ids_str
|
65 |
+
|
66 |
+
|
67 |
+
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
68 |
+
position = torch.arange(length, dtype=torch.float)
|
69 |
+
num_timescales = channels // 2
|
70 |
+
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
71 |
+
num_timescales - 1
|
72 |
+
)
|
73 |
+
inv_timescales = min_timescale * torch.exp(
|
74 |
+
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
75 |
+
)
|
76 |
+
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
+
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
+
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
+
signal = signal.view(1, channels, length)
|
80 |
+
return signal
|
81 |
+
|
82 |
+
|
83 |
+
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
+
b, channels, length = x.size()
|
85 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
+
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
+
|
88 |
+
|
89 |
+
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
+
b, channels, length = x.size()
|
91 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
+
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
+
|
94 |
+
|
95 |
+
def subsequent_mask(length):
|
96 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
+
return mask
|
98 |
+
|
99 |
+
|
100 |
+
@torch.jit.script
|
101 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
+
n_channels_int = n_channels[0]
|
103 |
+
in_act = input_a + input_b
|
104 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
+
acts = t_act * s_act
|
107 |
+
return acts
|
108 |
+
|
109 |
+
|
110 |
+
def convert_pad_shape(pad_shape):
|
111 |
+
layer = pad_shape[::-1]
|
112 |
+
pad_shape = [item for sublist in layer for item in sublist]
|
113 |
+
return pad_shape
|
114 |
+
|
115 |
+
|
116 |
+
def shift_1d(x):
|
117 |
+
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
+
return x
|
119 |
+
|
120 |
+
|
121 |
+
def sequence_mask(length, max_length=None):
|
122 |
+
if max_length is None:
|
123 |
+
max_length = length.max()
|
124 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
+
|
127 |
+
|
128 |
+
def generate_path(duration, mask):
|
129 |
+
"""
|
130 |
+
duration: [b, 1, t_x]
|
131 |
+
mask: [b, 1, t_y, t_x]
|
132 |
+
"""
|
133 |
+
|
134 |
+
b, _, t_y, t_x = mask.shape
|
135 |
+
cum_duration = torch.cumsum(duration, -1)
|
136 |
+
|
137 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
138 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
139 |
+
path = path.view(b, t_x, t_y)
|
140 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
141 |
+
path = path.unsqueeze(1).transpose(2, 3) * mask
|
142 |
+
return path
|
143 |
+
|
144 |
+
|
145 |
+
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
146 |
+
if isinstance(parameters, torch.Tensor):
|
147 |
+
parameters = [parameters]
|
148 |
+
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
149 |
+
norm_type = float(norm_type)
|
150 |
+
if clip_value is not None:
|
151 |
+
clip_value = float(clip_value)
|
152 |
+
|
153 |
+
total_norm = 0
|
154 |
+
for p in parameters:
|
155 |
+
param_norm = p.grad.data.norm(norm_type)
|
156 |
+
total_norm += param_norm.item() ** norm_type
|
157 |
+
if clip_value is not None:
|
158 |
+
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
159 |
+
total_norm = total_norm ** (1.0 / norm_type)
|
160 |
+
return total_norm
|
openvoice_cli/downloader.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from tqdm import tqdm
|
3 |
+
import requests
|
4 |
+
|
5 |
+
def download_file(url, destination):
|
6 |
+
response = requests.get(url, stream=True)
|
7 |
+
total_size_in_bytes = int(response.headers.get('content-length', 0))
|
8 |
+
block_size = 1024 # 1 Kibibyte
|
9 |
+
|
10 |
+
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
|
11 |
+
|
12 |
+
with open(destination, 'wb') as file:
|
13 |
+
for data in response.iter_content(block_size):
|
14 |
+
progress_bar.update(len(data))
|
15 |
+
file.write(data)
|
16 |
+
|
17 |
+
progress_bar.close()
|
18 |
+
|
19 |
+
def create_directory_if_not_exists(directory):
|
20 |
+
if not directory.exists():
|
21 |
+
directory.mkdir(parents=True)
|
22 |
+
|
23 |
+
def download_checkpoint(dest_dir):
|
24 |
+
# Define paths
|
25 |
+
model_path = Path(dest_dir)
|
26 |
+
|
27 |
+
# Define files and their corresponding URLs
|
28 |
+
files_to_download = {
|
29 |
+
"checkpoint.pth": f"https://huggingface.co/myshell-ai/OpenVoice/resolve/main/checkpoints/converter/checkpoint.pth?download=true",
|
30 |
+
"config.json": f"https://huggingface.co/myshell-ai/OpenVoice/raw/main/checkpoints/converter/config.json",
|
31 |
+
}
|
32 |
+
|
33 |
+
# Check and create directories
|
34 |
+
create_directory_if_not_exists(model_path)
|
35 |
+
|
36 |
+
# Download files if they don't exist
|
37 |
+
for filename, url in files_to_download.items():
|
38 |
+
destination = model_path / filename
|
39 |
+
if not destination.exists():
|
40 |
+
print(f"[OpenVoice Converter] Downloading {filename}...")
|
41 |
+
download_file(url, destination)
|
openvoice_cli/mel_processing.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.utils.data
|
3 |
+
from librosa.filters import mel as librosa_mel_fn
|
4 |
+
|
5 |
+
MAX_WAV_VALUE = 32768.0
|
6 |
+
|
7 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
8 |
+
"""
|
9 |
+
PARAMS
|
10 |
+
------
|
11 |
+
C: compression factor
|
12 |
+
"""
|
13 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
14 |
+
|
15 |
+
|
16 |
+
def dynamic_range_decompression_torch(x, C=1):
|
17 |
+
"""
|
18 |
+
PARAMS
|
19 |
+
------
|
20 |
+
C: compression factor used to compress
|
21 |
+
"""
|
22 |
+
return torch.exp(x) / C
|
23 |
+
|
24 |
+
|
25 |
+
def spectral_normalize_torch(magnitudes):
|
26 |
+
output = dynamic_range_compression_torch(magnitudes)
|
27 |
+
return output
|
28 |
+
|
29 |
+
|
30 |
+
def spectral_de_normalize_torch(magnitudes):
|
31 |
+
output = dynamic_range_decompression_torch(magnitudes)
|
32 |
+
return output
|
33 |
+
|
34 |
+
|
35 |
+
mel_basis = {}
|
36 |
+
hann_window = {}
|
37 |
+
|
38 |
+
|
39 |
+
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
40 |
+
if torch.min(y) < -1.1:
|
41 |
+
print("min value is ", torch.min(y))
|
42 |
+
if torch.max(y) > 1.1:
|
43 |
+
print("max value is ", torch.max(y))
|
44 |
+
|
45 |
+
global hann_window
|
46 |
+
dtype_device = str(y.dtype) + "_" + str(y.device)
|
47 |
+
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
48 |
+
if wnsize_dtype_device not in hann_window:
|
49 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
50 |
+
dtype=y.dtype, device=y.device
|
51 |
+
)
|
52 |
+
|
53 |
+
y = torch.nn.functional.pad(
|
54 |
+
y.unsqueeze(1),
|
55 |
+
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
56 |
+
mode="reflect",
|
57 |
+
)
|
58 |
+
y = y.squeeze(1)
|
59 |
+
|
60 |
+
spec = torch.stft(
|
61 |
+
y,
|
62 |
+
n_fft,
|
63 |
+
hop_length=hop_size,
|
64 |
+
win_length=win_size,
|
65 |
+
window=hann_window[wnsize_dtype_device],
|
66 |
+
center=center,
|
67 |
+
pad_mode="reflect",
|
68 |
+
normalized=False,
|
69 |
+
onesided=True,
|
70 |
+
return_complex=False,
|
71 |
+
)
|
72 |
+
|
73 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
74 |
+
return spec
|
75 |
+
|
76 |
+
|
77 |
+
def spectrogram_torch_conv(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
78 |
+
# if torch.min(y) < -1.:
|
79 |
+
# print('min value is ', torch.min(y))
|
80 |
+
# if torch.max(y) > 1.:
|
81 |
+
# print('max value is ', torch.max(y))
|
82 |
+
|
83 |
+
global hann_window
|
84 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
85 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
86 |
+
if wnsize_dtype_device not in hann_window:
|
87 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
88 |
+
|
89 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
90 |
+
|
91 |
+
# ******************** original ************************#
|
92 |
+
# y = y.squeeze(1)
|
93 |
+
# spec1 = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
94 |
+
# center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
95 |
+
|
96 |
+
# ******************** ConvSTFT ************************#
|
97 |
+
freq_cutoff = n_fft // 2 + 1
|
98 |
+
fourier_basis = torch.view_as_real(torch.fft.fft(torch.eye(n_fft)))
|
99 |
+
forward_basis = fourier_basis[:freq_cutoff].permute(2, 0, 1).reshape(-1, 1, fourier_basis.shape[1])
|
100 |
+
forward_basis = forward_basis * torch.as_tensor(librosa.util.pad_center(torch.hann_window(win_size), size=n_fft)).float()
|
101 |
+
|
102 |
+
import torch.nn.functional as F
|
103 |
+
|
104 |
+
# if center:
|
105 |
+
# signal = F.pad(y[:, None, None, :], (n_fft // 2, n_fft // 2, 0, 0), mode = 'reflect').squeeze(1)
|
106 |
+
assert center is False
|
107 |
+
|
108 |
+
forward_transform_squared = F.conv1d(y, forward_basis.to(y.device), stride = hop_size)
|
109 |
+
spec2 = torch.stack([forward_transform_squared[:, :freq_cutoff, :], forward_transform_squared[:, freq_cutoff:, :]], dim = -1)
|
110 |
+
|
111 |
+
|
112 |
+
# ******************** Verification ************************#
|
113 |
+
spec1 = torch.stft(y.squeeze(1), n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
114 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
115 |
+
assert torch.allclose(spec1, spec2, atol=1e-4)
|
116 |
+
|
117 |
+
spec = torch.sqrt(spec2.pow(2).sum(-1) + 1e-6)
|
118 |
+
return spec
|
119 |
+
|
120 |
+
|
121 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
122 |
+
global mel_basis
|
123 |
+
dtype_device = str(spec.dtype) + "_" + str(spec.device)
|
124 |
+
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
125 |
+
if fmax_dtype_device not in mel_basis:
|
126 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
127 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
128 |
+
dtype=spec.dtype, device=spec.device
|
129 |
+
)
|
130 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
131 |
+
spec = spectral_normalize_torch(spec)
|
132 |
+
return spec
|
133 |
+
|
134 |
+
|
135 |
+
def mel_spectrogram_torch(
|
136 |
+
y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
|
137 |
+
):
|
138 |
+
if torch.min(y) < -1.0:
|
139 |
+
print("min value is ", torch.min(y))
|
140 |
+
if torch.max(y) > 1.0:
|
141 |
+
print("max value is ", torch.max(y))
|
142 |
+
|
143 |
+
global mel_basis, hann_window
|
144 |
+
dtype_device = str(y.dtype) + "_" + str(y.device)
|
145 |
+
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
146 |
+
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
147 |
+
if fmax_dtype_device not in mel_basis:
|
148 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
149 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
150 |
+
dtype=y.dtype, device=y.device
|
151 |
+
)
|
152 |
+
if wnsize_dtype_device not in hann_window:
|
153 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
154 |
+
dtype=y.dtype, device=y.device
|
155 |
+
)
|
156 |
+
|
157 |
+
y = torch.nn.functional.pad(
|
158 |
+
y.unsqueeze(1),
|
159 |
+
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
160 |
+
mode="reflect",
|
161 |
+
)
|
162 |
+
y = y.squeeze(1)
|
163 |
+
|
164 |
+
spec = torch.stft(
|
165 |
+
y,
|
166 |
+
n_fft,
|
167 |
+
hop_length=hop_size,
|
168 |
+
win_length=win_size,
|
169 |
+
window=hann_window[wnsize_dtype_device],
|
170 |
+
center=center,
|
171 |
+
pad_mode="reflect",
|
172 |
+
normalized=False,
|
173 |
+
onesided=True,
|
174 |
+
return_complex=False,
|
175 |
+
)
|
176 |
+
|
177 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
178 |
+
|
179 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
180 |
+
spec = spectral_normalize_torch(spec)
|
181 |
+
|
182 |
+
return spec
|
openvoice_cli/models.py
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
import openvoice_cli.commons as commons
|
7 |
+
import openvoice_cli.attentions as attentions
|
8 |
+
import openvoice_cli.modules as modules
|
9 |
+
|
10 |
+
|
11 |
+
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
13 |
+
|
14 |
+
from openvoice_cli.commons import init_weights, get_padding
|
15 |
+
|
16 |
+
|
17 |
+
class TextEncoder(nn.Module):
|
18 |
+
def __init__(self,
|
19 |
+
n_vocab,
|
20 |
+
out_channels,
|
21 |
+
hidden_channels,
|
22 |
+
filter_channels,
|
23 |
+
n_heads,
|
24 |
+
n_layers,
|
25 |
+
kernel_size,
|
26 |
+
p_dropout):
|
27 |
+
super().__init__()
|
28 |
+
self.n_vocab = n_vocab
|
29 |
+
self.out_channels = out_channels
|
30 |
+
self.hidden_channels = hidden_channels
|
31 |
+
self.filter_channels = filter_channels
|
32 |
+
self.n_heads = n_heads
|
33 |
+
self.n_layers = n_layers
|
34 |
+
self.kernel_size = kernel_size
|
35 |
+
self.p_dropout = p_dropout
|
36 |
+
|
37 |
+
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
38 |
+
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
39 |
+
|
40 |
+
self.encoder = attentions.Encoder(
|
41 |
+
hidden_channels,
|
42 |
+
filter_channels,
|
43 |
+
n_heads,
|
44 |
+
n_layers,
|
45 |
+
kernel_size,
|
46 |
+
p_dropout)
|
47 |
+
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
48 |
+
|
49 |
+
def forward(self, x, x_lengths):
|
50 |
+
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
51 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
52 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
53 |
+
|
54 |
+
x = self.encoder(x * x_mask, x_mask)
|
55 |
+
stats = self.proj(x) * x_mask
|
56 |
+
|
57 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
58 |
+
return x, m, logs, x_mask
|
59 |
+
|
60 |
+
|
61 |
+
class DurationPredictor(nn.Module):
|
62 |
+
def __init__(
|
63 |
+
self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
|
64 |
+
):
|
65 |
+
super().__init__()
|
66 |
+
|
67 |
+
self.in_channels = in_channels
|
68 |
+
self.filter_channels = filter_channels
|
69 |
+
self.kernel_size = kernel_size
|
70 |
+
self.p_dropout = p_dropout
|
71 |
+
self.gin_channels = gin_channels
|
72 |
+
|
73 |
+
self.drop = nn.Dropout(p_dropout)
|
74 |
+
self.conv_1 = nn.Conv1d(
|
75 |
+
in_channels, filter_channels, kernel_size, padding=kernel_size // 2
|
76 |
+
)
|
77 |
+
self.norm_1 = modules.LayerNorm(filter_channels)
|
78 |
+
self.conv_2 = nn.Conv1d(
|
79 |
+
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
|
80 |
+
)
|
81 |
+
self.norm_2 = modules.LayerNorm(filter_channels)
|
82 |
+
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
83 |
+
|
84 |
+
if gin_channels != 0:
|
85 |
+
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
86 |
+
|
87 |
+
def forward(self, x, x_mask, g=None):
|
88 |
+
x = torch.detach(x)
|
89 |
+
if g is not None:
|
90 |
+
g = torch.detach(g)
|
91 |
+
x = x + self.cond(g)
|
92 |
+
x = self.conv_1(x * x_mask)
|
93 |
+
x = torch.relu(x)
|
94 |
+
x = self.norm_1(x)
|
95 |
+
x = self.drop(x)
|
96 |
+
x = self.conv_2(x * x_mask)
|
97 |
+
x = torch.relu(x)
|
98 |
+
x = self.norm_2(x)
|
99 |
+
x = self.drop(x)
|
100 |
+
x = self.proj(x * x_mask)
|
101 |
+
return x * x_mask
|
102 |
+
|
103 |
+
class StochasticDurationPredictor(nn.Module):
|
104 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
105 |
+
super().__init__()
|
106 |
+
filter_channels = in_channels # it needs to be removed from future version.
|
107 |
+
self.in_channels = in_channels
|
108 |
+
self.filter_channels = filter_channels
|
109 |
+
self.kernel_size = kernel_size
|
110 |
+
self.p_dropout = p_dropout
|
111 |
+
self.n_flows = n_flows
|
112 |
+
self.gin_channels = gin_channels
|
113 |
+
|
114 |
+
self.log_flow = modules.Log()
|
115 |
+
self.flows = nn.ModuleList()
|
116 |
+
self.flows.append(modules.ElementwiseAffine(2))
|
117 |
+
for i in range(n_flows):
|
118 |
+
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
119 |
+
self.flows.append(modules.Flip())
|
120 |
+
|
121 |
+
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
122 |
+
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
123 |
+
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
124 |
+
self.post_flows = nn.ModuleList()
|
125 |
+
self.post_flows.append(modules.ElementwiseAffine(2))
|
126 |
+
for i in range(4):
|
127 |
+
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
128 |
+
self.post_flows.append(modules.Flip())
|
129 |
+
|
130 |
+
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
131 |
+
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
132 |
+
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
133 |
+
if gin_channels != 0:
|
134 |
+
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
135 |
+
|
136 |
+
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
137 |
+
x = torch.detach(x)
|
138 |
+
x = self.pre(x)
|
139 |
+
if g is not None:
|
140 |
+
g = torch.detach(g)
|
141 |
+
x = x + self.cond(g)
|
142 |
+
x = self.convs(x, x_mask)
|
143 |
+
x = self.proj(x) * x_mask
|
144 |
+
|
145 |
+
if not reverse:
|
146 |
+
flows = self.flows
|
147 |
+
assert w is not None
|
148 |
+
|
149 |
+
logdet_tot_q = 0
|
150 |
+
h_w = self.post_pre(w)
|
151 |
+
h_w = self.post_convs(h_w, x_mask)
|
152 |
+
h_w = self.post_proj(h_w) * x_mask
|
153 |
+
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
154 |
+
z_q = e_q
|
155 |
+
for flow in self.post_flows:
|
156 |
+
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
157 |
+
logdet_tot_q += logdet_q
|
158 |
+
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
159 |
+
u = torch.sigmoid(z_u) * x_mask
|
160 |
+
z0 = (w - u) * x_mask
|
161 |
+
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
162 |
+
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
163 |
+
|
164 |
+
logdet_tot = 0
|
165 |
+
z0, logdet = self.log_flow(z0, x_mask)
|
166 |
+
logdet_tot += logdet
|
167 |
+
z = torch.cat([z0, z1], 1)
|
168 |
+
for flow in flows:
|
169 |
+
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
170 |
+
logdet_tot = logdet_tot + logdet
|
171 |
+
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
172 |
+
return nll + logq # [b]
|
173 |
+
else:
|
174 |
+
flows = list(reversed(self.flows))
|
175 |
+
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
176 |
+
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
177 |
+
for flow in flows:
|
178 |
+
z = flow(z, x_mask, g=x, reverse=reverse)
|
179 |
+
z0, z1 = torch.split(z, [1, 1], 1)
|
180 |
+
logw = z0
|
181 |
+
return logw
|
182 |
+
|
183 |
+
class PosteriorEncoder(nn.Module):
|
184 |
+
def __init__(
|
185 |
+
self,
|
186 |
+
in_channels,
|
187 |
+
out_channels,
|
188 |
+
hidden_channels,
|
189 |
+
kernel_size,
|
190 |
+
dilation_rate,
|
191 |
+
n_layers,
|
192 |
+
gin_channels=0,
|
193 |
+
):
|
194 |
+
super().__init__()
|
195 |
+
self.in_channels = in_channels
|
196 |
+
self.out_channels = out_channels
|
197 |
+
self.hidden_channels = hidden_channels
|
198 |
+
self.kernel_size = kernel_size
|
199 |
+
self.dilation_rate = dilation_rate
|
200 |
+
self.n_layers = n_layers
|
201 |
+
self.gin_channels = gin_channels
|
202 |
+
|
203 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
204 |
+
self.enc = modules.WN(
|
205 |
+
hidden_channels,
|
206 |
+
kernel_size,
|
207 |
+
dilation_rate,
|
208 |
+
n_layers,
|
209 |
+
gin_channels=gin_channels,
|
210 |
+
)
|
211 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
212 |
+
|
213 |
+
def forward(self, x, x_lengths, g=None, tau=1.0):
|
214 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
215 |
+
x.dtype
|
216 |
+
)
|
217 |
+
x = self.pre(x) * x_mask
|
218 |
+
x = self.enc(x, x_mask, g=g)
|
219 |
+
stats = self.proj(x) * x_mask
|
220 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
221 |
+
z = (m + torch.randn_like(m) * tau * torch.exp(logs)) * x_mask
|
222 |
+
return z, m, logs, x_mask
|
223 |
+
|
224 |
+
|
225 |
+
class Generator(torch.nn.Module):
|
226 |
+
def __init__(
|
227 |
+
self,
|
228 |
+
initial_channel,
|
229 |
+
resblock,
|
230 |
+
resblock_kernel_sizes,
|
231 |
+
resblock_dilation_sizes,
|
232 |
+
upsample_rates,
|
233 |
+
upsample_initial_channel,
|
234 |
+
upsample_kernel_sizes,
|
235 |
+
gin_channels=0,
|
236 |
+
):
|
237 |
+
super(Generator, self).__init__()
|
238 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
239 |
+
self.num_upsamples = len(upsample_rates)
|
240 |
+
self.conv_pre = Conv1d(
|
241 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
242 |
+
)
|
243 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
244 |
+
|
245 |
+
self.ups = nn.ModuleList()
|
246 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
247 |
+
self.ups.append(
|
248 |
+
weight_norm(
|
249 |
+
ConvTranspose1d(
|
250 |
+
upsample_initial_channel // (2**i),
|
251 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
252 |
+
k,
|
253 |
+
u,
|
254 |
+
padding=(k - u) // 2,
|
255 |
+
)
|
256 |
+
)
|
257 |
+
)
|
258 |
+
|
259 |
+
self.resblocks = nn.ModuleList()
|
260 |
+
for i in range(len(self.ups)):
|
261 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
262 |
+
for j, (k, d) in enumerate(
|
263 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
264 |
+
):
|
265 |
+
self.resblocks.append(resblock(ch, k, d))
|
266 |
+
|
267 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
268 |
+
self.ups.apply(init_weights)
|
269 |
+
|
270 |
+
if gin_channels != 0:
|
271 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
272 |
+
|
273 |
+
def forward(self, x, g=None):
|
274 |
+
x = self.conv_pre(x)
|
275 |
+
if g is not None:
|
276 |
+
x = x + self.cond(g)
|
277 |
+
|
278 |
+
for i in range(self.num_upsamples):
|
279 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
280 |
+
x = self.ups[i](x)
|
281 |
+
xs = None
|
282 |
+
for j in range(self.num_kernels):
|
283 |
+
if xs is None:
|
284 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
285 |
+
else:
|
286 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
287 |
+
x = xs / self.num_kernels
|
288 |
+
x = F.leaky_relu(x)
|
289 |
+
x = self.conv_post(x)
|
290 |
+
x = torch.tanh(x)
|
291 |
+
|
292 |
+
return x
|
293 |
+
|
294 |
+
def remove_weight_norm(self):
|
295 |
+
print("Removing weight norm...")
|
296 |
+
for layer in self.ups:
|
297 |
+
remove_weight_norm(layer)
|
298 |
+
for layer in self.resblocks:
|
299 |
+
layer.remove_weight_norm()
|
300 |
+
|
301 |
+
|
302 |
+
class ReferenceEncoder(nn.Module):
|
303 |
+
"""
|
304 |
+
inputs --- [N, Ty/r, n_mels*r] mels
|
305 |
+
outputs --- [N, ref_enc_gru_size]
|
306 |
+
"""
|
307 |
+
|
308 |
+
def __init__(self, spec_channels, gin_channels=0, layernorm=True):
|
309 |
+
super().__init__()
|
310 |
+
self.spec_channels = spec_channels
|
311 |
+
ref_enc_filters = [32, 32, 64, 64, 128, 128]
|
312 |
+
K = len(ref_enc_filters)
|
313 |
+
filters = [1] + ref_enc_filters
|
314 |
+
convs = [
|
315 |
+
weight_norm(
|
316 |
+
nn.Conv2d(
|
317 |
+
in_channels=filters[i],
|
318 |
+
out_channels=filters[i + 1],
|
319 |
+
kernel_size=(3, 3),
|
320 |
+
stride=(2, 2),
|
321 |
+
padding=(1, 1),
|
322 |
+
)
|
323 |
+
)
|
324 |
+
for i in range(K)
|
325 |
+
]
|
326 |
+
self.convs = nn.ModuleList(convs)
|
327 |
+
|
328 |
+
out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
|
329 |
+
self.gru = nn.GRU(
|
330 |
+
input_size=ref_enc_filters[-1] * out_channels,
|
331 |
+
hidden_size=256 // 2,
|
332 |
+
batch_first=True,
|
333 |
+
)
|
334 |
+
self.proj = nn.Linear(128, gin_channels)
|
335 |
+
if layernorm:
|
336 |
+
self.layernorm = nn.LayerNorm(self.spec_channels)
|
337 |
+
else:
|
338 |
+
self.layernorm = None
|
339 |
+
|
340 |
+
def forward(self, inputs, mask=None):
|
341 |
+
N = inputs.size(0)
|
342 |
+
|
343 |
+
out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
|
344 |
+
if self.layernorm is not None:
|
345 |
+
out = self.layernorm(out)
|
346 |
+
|
347 |
+
for conv in self.convs:
|
348 |
+
out = conv(out)
|
349 |
+
# out = wn(out)
|
350 |
+
out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
|
351 |
+
|
352 |
+
out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
|
353 |
+
T = out.size(1)
|
354 |
+
N = out.size(0)
|
355 |
+
out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
|
356 |
+
|
357 |
+
self.gru.flatten_parameters()
|
358 |
+
memory, out = self.gru(out) # out --- [1, N, 128]
|
359 |
+
|
360 |
+
return self.proj(out.squeeze(0))
|
361 |
+
|
362 |
+
def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
|
363 |
+
for i in range(n_convs):
|
364 |
+
L = (L - kernel_size + 2 * pad) // stride + 1
|
365 |
+
return L
|
366 |
+
|
367 |
+
|
368 |
+
class ResidualCouplingBlock(nn.Module):
|
369 |
+
def __init__(self,
|
370 |
+
channels,
|
371 |
+
hidden_channels,
|
372 |
+
kernel_size,
|
373 |
+
dilation_rate,
|
374 |
+
n_layers,
|
375 |
+
n_flows=4,
|
376 |
+
gin_channels=0):
|
377 |
+
super().__init__()
|
378 |
+
self.channels = channels
|
379 |
+
self.hidden_channels = hidden_channels
|
380 |
+
self.kernel_size = kernel_size
|
381 |
+
self.dilation_rate = dilation_rate
|
382 |
+
self.n_layers = n_layers
|
383 |
+
self.n_flows = n_flows
|
384 |
+
self.gin_channels = gin_channels
|
385 |
+
|
386 |
+
self.flows = nn.ModuleList()
|
387 |
+
for i in range(n_flows):
|
388 |
+
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
389 |
+
self.flows.append(modules.Flip())
|
390 |
+
|
391 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
392 |
+
if not reverse:
|
393 |
+
for flow in self.flows:
|
394 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
395 |
+
else:
|
396 |
+
for flow in reversed(self.flows):
|
397 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
398 |
+
return x
|
399 |
+
|
400 |
+
class SynthesizerTrn(nn.Module):
|
401 |
+
"""
|
402 |
+
Synthesizer for Training
|
403 |
+
"""
|
404 |
+
|
405 |
+
def __init__(
|
406 |
+
self,
|
407 |
+
n_vocab,
|
408 |
+
spec_channels,
|
409 |
+
inter_channels,
|
410 |
+
hidden_channels,
|
411 |
+
filter_channels,
|
412 |
+
n_heads,
|
413 |
+
n_layers,
|
414 |
+
kernel_size,
|
415 |
+
p_dropout,
|
416 |
+
resblock,
|
417 |
+
resblock_kernel_sizes,
|
418 |
+
resblock_dilation_sizes,
|
419 |
+
upsample_rates,
|
420 |
+
upsample_initial_channel,
|
421 |
+
upsample_kernel_sizes,
|
422 |
+
n_speakers=256,
|
423 |
+
gin_channels=256,
|
424 |
+
**kwargs
|
425 |
+
):
|
426 |
+
super().__init__()
|
427 |
+
|
428 |
+
self.dec = Generator(
|
429 |
+
inter_channels,
|
430 |
+
resblock,
|
431 |
+
resblock_kernel_sizes,
|
432 |
+
resblock_dilation_sizes,
|
433 |
+
upsample_rates,
|
434 |
+
upsample_initial_channel,
|
435 |
+
upsample_kernel_sizes,
|
436 |
+
gin_channels=gin_channels,
|
437 |
+
)
|
438 |
+
self.enc_q = PosteriorEncoder(
|
439 |
+
spec_channels,
|
440 |
+
inter_channels,
|
441 |
+
hidden_channels,
|
442 |
+
5,
|
443 |
+
1,
|
444 |
+
16,
|
445 |
+
gin_channels=gin_channels,
|
446 |
+
)
|
447 |
+
|
448 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
449 |
+
|
450 |
+
self.n_speakers = n_speakers
|
451 |
+
if n_speakers == 0:
|
452 |
+
self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
|
453 |
+
else:
|
454 |
+
self.enc_p = TextEncoder(n_vocab,
|
455 |
+
inter_channels,
|
456 |
+
hidden_channels,
|
457 |
+
filter_channels,
|
458 |
+
n_heads,
|
459 |
+
n_layers,
|
460 |
+
kernel_size,
|
461 |
+
p_dropout)
|
462 |
+
self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
463 |
+
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
464 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
465 |
+
|
466 |
+
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., sdp_ratio=0.2, max_len=None):
|
467 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
468 |
+
if self.n_speakers > 0:
|
469 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
470 |
+
else:
|
471 |
+
g = None
|
472 |
+
|
473 |
+
logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * sdp_ratio \
|
474 |
+
+ self.dp(x, x_mask, g=g) * (1 - sdp_ratio)
|
475 |
+
|
476 |
+
w = torch.exp(logw) * x_mask * length_scale
|
477 |
+
w_ceil = torch.ceil(w)
|
478 |
+
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
479 |
+
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
480 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
481 |
+
attn = commons.generate_path(w_ceil, attn_mask)
|
482 |
+
|
483 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
484 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
485 |
+
|
486 |
+
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
487 |
+
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
488 |
+
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
489 |
+
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
490 |
+
|
491 |
+
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt, tau=1.0):
|
492 |
+
g_src = sid_src
|
493 |
+
g_tgt = sid_tgt
|
494 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src, tau=tau)
|
495 |
+
z_p = self.flow(z, y_mask, g=g_src)
|
496 |
+
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
497 |
+
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
498 |
+
return o_hat, y_mask, (z, z_p, z_hat)
|
openvoice_cli/modules.py
ADDED
@@ -0,0 +1,598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
from torch.nn import Conv1d
|
7 |
+
from torch.nn.utils import weight_norm, remove_weight_norm
|
8 |
+
|
9 |
+
import openvoice_cli.commons as commons
|
10 |
+
from openvoice_cli.commons import init_weights, get_padding
|
11 |
+
from openvoice_cli.transforms import piecewise_rational_quadratic_transform
|
12 |
+
from openvoice_cli.attentions import Encoder
|
13 |
+
|
14 |
+
LRELU_SLOPE = 0.1
|
15 |
+
|
16 |
+
|
17 |
+
class LayerNorm(nn.Module):
|
18 |
+
def __init__(self, channels, eps=1e-5):
|
19 |
+
super().__init__()
|
20 |
+
self.channels = channels
|
21 |
+
self.eps = eps
|
22 |
+
|
23 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
24 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
25 |
+
|
26 |
+
def forward(self, x):
|
27 |
+
x = x.transpose(1, -1)
|
28 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
29 |
+
return x.transpose(1, -1)
|
30 |
+
|
31 |
+
|
32 |
+
class ConvReluNorm(nn.Module):
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
in_channels,
|
36 |
+
hidden_channels,
|
37 |
+
out_channels,
|
38 |
+
kernel_size,
|
39 |
+
n_layers,
|
40 |
+
p_dropout,
|
41 |
+
):
|
42 |
+
super().__init__()
|
43 |
+
self.in_channels = in_channels
|
44 |
+
self.hidden_channels = hidden_channels
|
45 |
+
self.out_channels = out_channels
|
46 |
+
self.kernel_size = kernel_size
|
47 |
+
self.n_layers = n_layers
|
48 |
+
self.p_dropout = p_dropout
|
49 |
+
assert n_layers > 1, "Number of layers should be larger than 0."
|
50 |
+
|
51 |
+
self.conv_layers = nn.ModuleList()
|
52 |
+
self.norm_layers = nn.ModuleList()
|
53 |
+
self.conv_layers.append(
|
54 |
+
nn.Conv1d(
|
55 |
+
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
|
56 |
+
)
|
57 |
+
)
|
58 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
59 |
+
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
|
60 |
+
for _ in range(n_layers - 1):
|
61 |
+
self.conv_layers.append(
|
62 |
+
nn.Conv1d(
|
63 |
+
hidden_channels,
|
64 |
+
hidden_channels,
|
65 |
+
kernel_size,
|
66 |
+
padding=kernel_size // 2,
|
67 |
+
)
|
68 |
+
)
|
69 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
70 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
71 |
+
self.proj.weight.data.zero_()
|
72 |
+
self.proj.bias.data.zero_()
|
73 |
+
|
74 |
+
def forward(self, x, x_mask):
|
75 |
+
x_org = x
|
76 |
+
for i in range(self.n_layers):
|
77 |
+
x = self.conv_layers[i](x * x_mask)
|
78 |
+
x = self.norm_layers[i](x)
|
79 |
+
x = self.relu_drop(x)
|
80 |
+
x = x_org + self.proj(x)
|
81 |
+
return x * x_mask
|
82 |
+
|
83 |
+
|
84 |
+
class DDSConv(nn.Module):
|
85 |
+
"""
|
86 |
+
Dilated and Depth-Separable Convolution
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
|
90 |
+
super().__init__()
|
91 |
+
self.channels = channels
|
92 |
+
self.kernel_size = kernel_size
|
93 |
+
self.n_layers = n_layers
|
94 |
+
self.p_dropout = p_dropout
|
95 |
+
|
96 |
+
self.drop = nn.Dropout(p_dropout)
|
97 |
+
self.convs_sep = nn.ModuleList()
|
98 |
+
self.convs_1x1 = nn.ModuleList()
|
99 |
+
self.norms_1 = nn.ModuleList()
|
100 |
+
self.norms_2 = nn.ModuleList()
|
101 |
+
for i in range(n_layers):
|
102 |
+
dilation = kernel_size**i
|
103 |
+
padding = (kernel_size * dilation - dilation) // 2
|
104 |
+
self.convs_sep.append(
|
105 |
+
nn.Conv1d(
|
106 |
+
channels,
|
107 |
+
channels,
|
108 |
+
kernel_size,
|
109 |
+
groups=channels,
|
110 |
+
dilation=dilation,
|
111 |
+
padding=padding,
|
112 |
+
)
|
113 |
+
)
|
114 |
+
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
115 |
+
self.norms_1.append(LayerNorm(channels))
|
116 |
+
self.norms_2.append(LayerNorm(channels))
|
117 |
+
|
118 |
+
def forward(self, x, x_mask, g=None):
|
119 |
+
if g is not None:
|
120 |
+
x = x + g
|
121 |
+
for i in range(self.n_layers):
|
122 |
+
y = self.convs_sep[i](x * x_mask)
|
123 |
+
y = self.norms_1[i](y)
|
124 |
+
y = F.gelu(y)
|
125 |
+
y = self.convs_1x1[i](y)
|
126 |
+
y = self.norms_2[i](y)
|
127 |
+
y = F.gelu(y)
|
128 |
+
y = self.drop(y)
|
129 |
+
x = x + y
|
130 |
+
return x * x_mask
|
131 |
+
|
132 |
+
|
133 |
+
class WN(torch.nn.Module):
|
134 |
+
def __init__(
|
135 |
+
self,
|
136 |
+
hidden_channels,
|
137 |
+
kernel_size,
|
138 |
+
dilation_rate,
|
139 |
+
n_layers,
|
140 |
+
gin_channels=0,
|
141 |
+
p_dropout=0,
|
142 |
+
):
|
143 |
+
super(WN, self).__init__()
|
144 |
+
assert kernel_size % 2 == 1
|
145 |
+
self.hidden_channels = hidden_channels
|
146 |
+
self.kernel_size = (kernel_size,)
|
147 |
+
self.dilation_rate = dilation_rate
|
148 |
+
self.n_layers = n_layers
|
149 |
+
self.gin_channels = gin_channels
|
150 |
+
self.p_dropout = p_dropout
|
151 |
+
|
152 |
+
self.in_layers = torch.nn.ModuleList()
|
153 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
154 |
+
self.drop = nn.Dropout(p_dropout)
|
155 |
+
|
156 |
+
if gin_channels != 0:
|
157 |
+
cond_layer = torch.nn.Conv1d(
|
158 |
+
gin_channels, 2 * hidden_channels * n_layers, 1
|
159 |
+
)
|
160 |
+
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
|
161 |
+
|
162 |
+
for i in range(n_layers):
|
163 |
+
dilation = dilation_rate**i
|
164 |
+
padding = int((kernel_size * dilation - dilation) / 2)
|
165 |
+
in_layer = torch.nn.Conv1d(
|
166 |
+
hidden_channels,
|
167 |
+
2 * hidden_channels,
|
168 |
+
kernel_size,
|
169 |
+
dilation=dilation,
|
170 |
+
padding=padding,
|
171 |
+
)
|
172 |
+
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
|
173 |
+
self.in_layers.append(in_layer)
|
174 |
+
|
175 |
+
# last one is not necessary
|
176 |
+
if i < n_layers - 1:
|
177 |
+
res_skip_channels = 2 * hidden_channels
|
178 |
+
else:
|
179 |
+
res_skip_channels = hidden_channels
|
180 |
+
|
181 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
182 |
+
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
|
183 |
+
self.res_skip_layers.append(res_skip_layer)
|
184 |
+
|
185 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
186 |
+
output = torch.zeros_like(x)
|
187 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
188 |
+
|
189 |
+
if g is not None:
|
190 |
+
g = self.cond_layer(g)
|
191 |
+
|
192 |
+
for i in range(self.n_layers):
|
193 |
+
x_in = self.in_layers[i](x)
|
194 |
+
if g is not None:
|
195 |
+
cond_offset = i * 2 * self.hidden_channels
|
196 |
+
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
|
197 |
+
else:
|
198 |
+
g_l = torch.zeros_like(x_in)
|
199 |
+
|
200 |
+
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
|
201 |
+
acts = self.drop(acts)
|
202 |
+
|
203 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
204 |
+
if i < self.n_layers - 1:
|
205 |
+
res_acts = res_skip_acts[:, : self.hidden_channels, :]
|
206 |
+
x = (x + res_acts) * x_mask
|
207 |
+
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
208 |
+
else:
|
209 |
+
output = output + res_skip_acts
|
210 |
+
return output * x_mask
|
211 |
+
|
212 |
+
def remove_weight_norm(self):
|
213 |
+
if self.gin_channels != 0:
|
214 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
215 |
+
for l in self.in_layers:
|
216 |
+
torch.nn.utils.remove_weight_norm(l)
|
217 |
+
for l in self.res_skip_layers:
|
218 |
+
torch.nn.utils.remove_weight_norm(l)
|
219 |
+
|
220 |
+
|
221 |
+
class ResBlock1(torch.nn.Module):
|
222 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
223 |
+
super(ResBlock1, self).__init__()
|
224 |
+
self.convs1 = nn.ModuleList(
|
225 |
+
[
|
226 |
+
weight_norm(
|
227 |
+
Conv1d(
|
228 |
+
channels,
|
229 |
+
channels,
|
230 |
+
kernel_size,
|
231 |
+
1,
|
232 |
+
dilation=dilation[0],
|
233 |
+
padding=get_padding(kernel_size, dilation[0]),
|
234 |
+
)
|
235 |
+
),
|
236 |
+
weight_norm(
|
237 |
+
Conv1d(
|
238 |
+
channels,
|
239 |
+
channels,
|
240 |
+
kernel_size,
|
241 |
+
1,
|
242 |
+
dilation=dilation[1],
|
243 |
+
padding=get_padding(kernel_size, dilation[1]),
|
244 |
+
)
|
245 |
+
),
|
246 |
+
weight_norm(
|
247 |
+
Conv1d(
|
248 |
+
channels,
|
249 |
+
channels,
|
250 |
+
kernel_size,
|
251 |
+
1,
|
252 |
+
dilation=dilation[2],
|
253 |
+
padding=get_padding(kernel_size, dilation[2]),
|
254 |
+
)
|
255 |
+
),
|
256 |
+
]
|
257 |
+
)
|
258 |
+
self.convs1.apply(init_weights)
|
259 |
+
|
260 |
+
self.convs2 = nn.ModuleList(
|
261 |
+
[
|
262 |
+
weight_norm(
|
263 |
+
Conv1d(
|
264 |
+
channels,
|
265 |
+
channels,
|
266 |
+
kernel_size,
|
267 |
+
1,
|
268 |
+
dilation=1,
|
269 |
+
padding=get_padding(kernel_size, 1),
|
270 |
+
)
|
271 |
+
),
|
272 |
+
weight_norm(
|
273 |
+
Conv1d(
|
274 |
+
channels,
|
275 |
+
channels,
|
276 |
+
kernel_size,
|
277 |
+
1,
|
278 |
+
dilation=1,
|
279 |
+
padding=get_padding(kernel_size, 1),
|
280 |
+
)
|
281 |
+
),
|
282 |
+
weight_norm(
|
283 |
+
Conv1d(
|
284 |
+
channels,
|
285 |
+
channels,
|
286 |
+
kernel_size,
|
287 |
+
1,
|
288 |
+
dilation=1,
|
289 |
+
padding=get_padding(kernel_size, 1),
|
290 |
+
)
|
291 |
+
),
|
292 |
+
]
|
293 |
+
)
|
294 |
+
self.convs2.apply(init_weights)
|
295 |
+
|
296 |
+
def forward(self, x, x_mask=None):
|
297 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
298 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
299 |
+
if x_mask is not None:
|
300 |
+
xt = xt * x_mask
|
301 |
+
xt = c1(xt)
|
302 |
+
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
303 |
+
if x_mask is not None:
|
304 |
+
xt = xt * x_mask
|
305 |
+
xt = c2(xt)
|
306 |
+
x = xt + x
|
307 |
+
if x_mask is not None:
|
308 |
+
x = x * x_mask
|
309 |
+
return x
|
310 |
+
|
311 |
+
def remove_weight_norm(self):
|
312 |
+
for l in self.convs1:
|
313 |
+
remove_weight_norm(l)
|
314 |
+
for l in self.convs2:
|
315 |
+
remove_weight_norm(l)
|
316 |
+
|
317 |
+
|
318 |
+
class ResBlock2(torch.nn.Module):
|
319 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
320 |
+
super(ResBlock2, self).__init__()
|
321 |
+
self.convs = nn.ModuleList(
|
322 |
+
[
|
323 |
+
weight_norm(
|
324 |
+
Conv1d(
|
325 |
+
channels,
|
326 |
+
channels,
|
327 |
+
kernel_size,
|
328 |
+
1,
|
329 |
+
dilation=dilation[0],
|
330 |
+
padding=get_padding(kernel_size, dilation[0]),
|
331 |
+
)
|
332 |
+
),
|
333 |
+
weight_norm(
|
334 |
+
Conv1d(
|
335 |
+
channels,
|
336 |
+
channels,
|
337 |
+
kernel_size,
|
338 |
+
1,
|
339 |
+
dilation=dilation[1],
|
340 |
+
padding=get_padding(kernel_size, dilation[1]),
|
341 |
+
)
|
342 |
+
),
|
343 |
+
]
|
344 |
+
)
|
345 |
+
self.convs.apply(init_weights)
|
346 |
+
|
347 |
+
def forward(self, x, x_mask=None):
|
348 |
+
for c in self.convs:
|
349 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
350 |
+
if x_mask is not None:
|
351 |
+
xt = xt * x_mask
|
352 |
+
xt = c(xt)
|
353 |
+
x = xt + x
|
354 |
+
if x_mask is not None:
|
355 |
+
x = x * x_mask
|
356 |
+
return x
|
357 |
+
|
358 |
+
def remove_weight_norm(self):
|
359 |
+
for l in self.convs:
|
360 |
+
remove_weight_norm(l)
|
361 |
+
|
362 |
+
|
363 |
+
class Log(nn.Module):
|
364 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
365 |
+
if not reverse:
|
366 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
367 |
+
logdet = torch.sum(-y, [1, 2])
|
368 |
+
return y, logdet
|
369 |
+
else:
|
370 |
+
x = torch.exp(x) * x_mask
|
371 |
+
return x
|
372 |
+
|
373 |
+
|
374 |
+
class Flip(nn.Module):
|
375 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
376 |
+
x = torch.flip(x, [1])
|
377 |
+
if not reverse:
|
378 |
+
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
379 |
+
return x, logdet
|
380 |
+
else:
|
381 |
+
return x
|
382 |
+
|
383 |
+
|
384 |
+
class ElementwiseAffine(nn.Module):
|
385 |
+
def __init__(self, channels):
|
386 |
+
super().__init__()
|
387 |
+
self.channels = channels
|
388 |
+
self.m = nn.Parameter(torch.zeros(channels, 1))
|
389 |
+
self.logs = nn.Parameter(torch.zeros(channels, 1))
|
390 |
+
|
391 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
392 |
+
if not reverse:
|
393 |
+
y = self.m + torch.exp(self.logs) * x
|
394 |
+
y = y * x_mask
|
395 |
+
logdet = torch.sum(self.logs * x_mask, [1, 2])
|
396 |
+
return y, logdet
|
397 |
+
else:
|
398 |
+
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
399 |
+
return x
|
400 |
+
|
401 |
+
|
402 |
+
class ResidualCouplingLayer(nn.Module):
|
403 |
+
def __init__(
|
404 |
+
self,
|
405 |
+
channels,
|
406 |
+
hidden_channels,
|
407 |
+
kernel_size,
|
408 |
+
dilation_rate,
|
409 |
+
n_layers,
|
410 |
+
p_dropout=0,
|
411 |
+
gin_channels=0,
|
412 |
+
mean_only=False,
|
413 |
+
):
|
414 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
415 |
+
super().__init__()
|
416 |
+
self.channels = channels
|
417 |
+
self.hidden_channels = hidden_channels
|
418 |
+
self.kernel_size = kernel_size
|
419 |
+
self.dilation_rate = dilation_rate
|
420 |
+
self.n_layers = n_layers
|
421 |
+
self.half_channels = channels // 2
|
422 |
+
self.mean_only = mean_only
|
423 |
+
|
424 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
425 |
+
self.enc = WN(
|
426 |
+
hidden_channels,
|
427 |
+
kernel_size,
|
428 |
+
dilation_rate,
|
429 |
+
n_layers,
|
430 |
+
p_dropout=p_dropout,
|
431 |
+
gin_channels=gin_channels,
|
432 |
+
)
|
433 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
434 |
+
self.post.weight.data.zero_()
|
435 |
+
self.post.bias.data.zero_()
|
436 |
+
|
437 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
438 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
439 |
+
h = self.pre(x0) * x_mask
|
440 |
+
h = self.enc(h, x_mask, g=g)
|
441 |
+
stats = self.post(h) * x_mask
|
442 |
+
if not self.mean_only:
|
443 |
+
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
444 |
+
else:
|
445 |
+
m = stats
|
446 |
+
logs = torch.zeros_like(m)
|
447 |
+
|
448 |
+
if not reverse:
|
449 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
450 |
+
x = torch.cat([x0, x1], 1)
|
451 |
+
logdet = torch.sum(logs, [1, 2])
|
452 |
+
return x, logdet
|
453 |
+
else:
|
454 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
455 |
+
x = torch.cat([x0, x1], 1)
|
456 |
+
return x
|
457 |
+
|
458 |
+
|
459 |
+
class ConvFlow(nn.Module):
|
460 |
+
def __init__(
|
461 |
+
self,
|
462 |
+
in_channels,
|
463 |
+
filter_channels,
|
464 |
+
kernel_size,
|
465 |
+
n_layers,
|
466 |
+
num_bins=10,
|
467 |
+
tail_bound=5.0,
|
468 |
+
):
|
469 |
+
super().__init__()
|
470 |
+
self.in_channels = in_channels
|
471 |
+
self.filter_channels = filter_channels
|
472 |
+
self.kernel_size = kernel_size
|
473 |
+
self.n_layers = n_layers
|
474 |
+
self.num_bins = num_bins
|
475 |
+
self.tail_bound = tail_bound
|
476 |
+
self.half_channels = in_channels // 2
|
477 |
+
|
478 |
+
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
479 |
+
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
|
480 |
+
self.proj = nn.Conv1d(
|
481 |
+
filter_channels, self.half_channels * (num_bins * 3 - 1), 1
|
482 |
+
)
|
483 |
+
self.proj.weight.data.zero_()
|
484 |
+
self.proj.bias.data.zero_()
|
485 |
+
|
486 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
487 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
488 |
+
h = self.pre(x0)
|
489 |
+
h = self.convs(h, x_mask, g=g)
|
490 |
+
h = self.proj(h) * x_mask
|
491 |
+
|
492 |
+
b, c, t = x0.shape
|
493 |
+
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
494 |
+
|
495 |
+
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
|
496 |
+
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
|
497 |
+
self.filter_channels
|
498 |
+
)
|
499 |
+
unnormalized_derivatives = h[..., 2 * self.num_bins :]
|
500 |
+
|
501 |
+
x1, logabsdet = piecewise_rational_quadratic_transform(
|
502 |
+
x1,
|
503 |
+
unnormalized_widths,
|
504 |
+
unnormalized_heights,
|
505 |
+
unnormalized_derivatives,
|
506 |
+
inverse=reverse,
|
507 |
+
tails="linear",
|
508 |
+
tail_bound=self.tail_bound,
|
509 |
+
)
|
510 |
+
|
511 |
+
x = torch.cat([x0, x1], 1) * x_mask
|
512 |
+
logdet = torch.sum(logabsdet * x_mask, [1, 2])
|
513 |
+
if not reverse:
|
514 |
+
return x, logdet
|
515 |
+
else:
|
516 |
+
return x
|
517 |
+
|
518 |
+
|
519 |
+
class TransformerCouplingLayer(nn.Module):
|
520 |
+
def __init__(
|
521 |
+
self,
|
522 |
+
channels,
|
523 |
+
hidden_channels,
|
524 |
+
kernel_size,
|
525 |
+
n_layers,
|
526 |
+
n_heads,
|
527 |
+
p_dropout=0,
|
528 |
+
filter_channels=0,
|
529 |
+
mean_only=False,
|
530 |
+
wn_sharing_parameter=None,
|
531 |
+
gin_channels=0,
|
532 |
+
):
|
533 |
+
assert n_layers == 3, n_layers
|
534 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
535 |
+
super().__init__()
|
536 |
+
self.channels = channels
|
537 |
+
self.hidden_channels = hidden_channels
|
538 |
+
self.kernel_size = kernel_size
|
539 |
+
self.n_layers = n_layers
|
540 |
+
self.half_channels = channels // 2
|
541 |
+
self.mean_only = mean_only
|
542 |
+
|
543 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
544 |
+
self.enc = (
|
545 |
+
Encoder(
|
546 |
+
hidden_channels,
|
547 |
+
filter_channels,
|
548 |
+
n_heads,
|
549 |
+
n_layers,
|
550 |
+
kernel_size,
|
551 |
+
p_dropout,
|
552 |
+
isflow=True,
|
553 |
+
gin_channels=gin_channels,
|
554 |
+
)
|
555 |
+
if wn_sharing_parameter is None
|
556 |
+
else wn_sharing_parameter
|
557 |
+
)
|
558 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
559 |
+
self.post.weight.data.zero_()
|
560 |
+
self.post.bias.data.zero_()
|
561 |
+
|
562 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
563 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
564 |
+
h = self.pre(x0) * x_mask
|
565 |
+
h = self.enc(h, x_mask, g=g)
|
566 |
+
stats = self.post(h) * x_mask
|
567 |
+
if not self.mean_only:
|
568 |
+
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
569 |
+
else:
|
570 |
+
m = stats
|
571 |
+
logs = torch.zeros_like(m)
|
572 |
+
|
573 |
+
if not reverse:
|
574 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
575 |
+
x = torch.cat([x0, x1], 1)
|
576 |
+
logdet = torch.sum(logs, [1, 2])
|
577 |
+
return x, logdet
|
578 |
+
else:
|
579 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
580 |
+
x = torch.cat([x0, x1], 1)
|
581 |
+
return x
|
582 |
+
|
583 |
+
x1, logabsdet = piecewise_rational_quadratic_transform(
|
584 |
+
x1,
|
585 |
+
unnormalized_widths,
|
586 |
+
unnormalized_heights,
|
587 |
+
unnormalized_derivatives,
|
588 |
+
inverse=reverse,
|
589 |
+
tails="linear",
|
590 |
+
tail_bound=self.tail_bound,
|
591 |
+
)
|
592 |
+
|
593 |
+
x = torch.cat([x0, x1], 1) * x_mask
|
594 |
+
logdet = torch.sum(logabsdet * x_mask, [1, 2])
|
595 |
+
if not reverse:
|
596 |
+
return x, logdet
|
597 |
+
else:
|
598 |
+
return x
|
openvoice_cli/se_extractor.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import glob
|
3 |
+
import torch
|
4 |
+
import hashlib
|
5 |
+
import librosa
|
6 |
+
import base64
|
7 |
+
from glob import glob
|
8 |
+
import numpy as np
|
9 |
+
from pydub import AudioSegment
|
10 |
+
from faster_whisper import WhisperModel
|
11 |
+
import hashlib
|
12 |
+
import base64
|
13 |
+
import librosa
|
14 |
+
from whisper_timestamped.transcribe import get_audio_tensor, get_vad_segments
|
15 |
+
|
16 |
+
model_size = "medium"
|
17 |
+
# Run on GPU with FP16
|
18 |
+
model = None
|
19 |
+
def split_audio_whisper(audio_path, audio_name, target_dir='processed'):
|
20 |
+
print("whisper")
|
21 |
+
global model
|
22 |
+
if model is None:
|
23 |
+
model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
24 |
+
audio = AudioSegment.from_file(audio_path)
|
25 |
+
max_len = len(audio)
|
26 |
+
|
27 |
+
target_folder = os.path.join(target_dir, audio_name)
|
28 |
+
|
29 |
+
segments, info = model.transcribe(audio_path, beam_size=5, word_timestamps=True)
|
30 |
+
segments = list(segments)
|
31 |
+
|
32 |
+
# create directory
|
33 |
+
os.makedirs(target_folder, exist_ok=True)
|
34 |
+
wavs_folder = os.path.join(target_folder, 'wavs')
|
35 |
+
os.makedirs(wavs_folder, exist_ok=True)
|
36 |
+
|
37 |
+
# segments
|
38 |
+
s_ind = 0
|
39 |
+
start_time = None
|
40 |
+
|
41 |
+
for k, w in enumerate(segments):
|
42 |
+
# process with the time
|
43 |
+
if k == 0:
|
44 |
+
start_time = max(0, w.start)
|
45 |
+
|
46 |
+
end_time = w.end
|
47 |
+
|
48 |
+
# calculate confidence
|
49 |
+
if len(w.words) > 0:
|
50 |
+
confidence = sum([s.probability for s in w.words]) / len(w.words)
|
51 |
+
else:
|
52 |
+
confidence = 0.
|
53 |
+
# clean text
|
54 |
+
text = w.text.replace('...', '')
|
55 |
+
|
56 |
+
# left 0.08s for each audios
|
57 |
+
audio_seg = audio[int( start_time * 1000) : min(max_len, int(end_time * 1000) + 80)]
|
58 |
+
|
59 |
+
# segment file name
|
60 |
+
fname = f"{audio_name}_seg{s_ind}.wav"
|
61 |
+
|
62 |
+
# filter out the segment shorter than 1.5s and longer than 20s
|
63 |
+
save = audio_seg.duration_seconds > 1.5 and \
|
64 |
+
audio_seg.duration_seconds < 20. and \
|
65 |
+
len(text) >= 2 and len(text) < 200
|
66 |
+
|
67 |
+
if save:
|
68 |
+
output_file = os.path.join(wavs_folder, fname)
|
69 |
+
audio_seg.export(output_file, format='wav')
|
70 |
+
|
71 |
+
if k < len(segments) - 1:
|
72 |
+
start_time = max(0, segments[k+1].start - 0.08)
|
73 |
+
|
74 |
+
s_ind = s_ind + 1
|
75 |
+
return wavs_folder
|
76 |
+
|
77 |
+
|
78 |
+
def split_audio_vad(audio_path, audio_name, target_dir, split_seconds=10.0):
|
79 |
+
SAMPLE_RATE = 16000
|
80 |
+
audio_vad = get_audio_tensor(audio_path)
|
81 |
+
segments = get_vad_segments(
|
82 |
+
audio_vad,
|
83 |
+
output_sample=True,
|
84 |
+
min_speech_duration=0.1,
|
85 |
+
min_silence_duration=1,
|
86 |
+
method="silero",
|
87 |
+
)
|
88 |
+
segments = [(seg["start"], seg["end"]) for seg in segments]
|
89 |
+
segments = [(float(s) / SAMPLE_RATE, float(e) / SAMPLE_RATE) for s,e in segments]
|
90 |
+
print(segments)
|
91 |
+
audio_active = AudioSegment.silent(duration=0)
|
92 |
+
audio = AudioSegment.from_file(audio_path)
|
93 |
+
|
94 |
+
for start_time, end_time in segments:
|
95 |
+
audio_active += audio[int( start_time * 1000) : int(end_time * 1000)]
|
96 |
+
|
97 |
+
audio_dur = audio_active.duration_seconds
|
98 |
+
print(f'after vad: dur = {audio_dur}')
|
99 |
+
target_folder = os.path.join(target_dir, audio_name)
|
100 |
+
wavs_folder = os.path.join(target_folder, 'wavs')
|
101 |
+
os.makedirs(wavs_folder, exist_ok=True)
|
102 |
+
start_time = 0.
|
103 |
+
count = 0
|
104 |
+
num_splits = int(np.round(audio_dur / split_seconds))
|
105 |
+
assert num_splits > 0, 'input audio is too short'
|
106 |
+
interval = audio_dur / num_splits
|
107 |
+
|
108 |
+
for i in range(num_splits):
|
109 |
+
end_time = min(start_time + interval, audio_dur)
|
110 |
+
if i == num_splits - 1:
|
111 |
+
end_time = audio_dur
|
112 |
+
output_file = f"{wavs_folder}/{audio_name}_seg{count}.wav"
|
113 |
+
audio_seg = audio_active[int(start_time * 1000): int(end_time * 1000)]
|
114 |
+
audio_seg.export(output_file, format='wav')
|
115 |
+
start_time = end_time
|
116 |
+
count += 1
|
117 |
+
return wavs_folder
|
118 |
+
|
119 |
+
def hash_numpy_array(audio_path):
|
120 |
+
array, _ = librosa.load(audio_path, sr=None, mono=True)
|
121 |
+
# Convert the array to bytes
|
122 |
+
array_bytes = array.tobytes()
|
123 |
+
# Calculate the hash of the array bytes
|
124 |
+
hash_object = hashlib.sha256(array_bytes)
|
125 |
+
hash_value = hash_object.digest()
|
126 |
+
# Convert the hash value to base64
|
127 |
+
base64_value = base64.b64encode(hash_value)
|
128 |
+
return base64_value.decode('utf-8')[:16].replace('/', '_^')
|
129 |
+
|
130 |
+
def get_se(audio_path, vc_model, target_dir='processed', vad=True):
|
131 |
+
device = vc_model.device
|
132 |
+
|
133 |
+
audio_name = f"{os.path.basename(audio_path).rsplit('.', 1)[0]}_{hash_numpy_array(audio_path)}"
|
134 |
+
se_path = os.path.join(target_dir, audio_name, 'se.pth')
|
135 |
+
|
136 |
+
if os.path.isfile(se_path):
|
137 |
+
se = torch.load(se_path).to(device)
|
138 |
+
return se, audio_name
|
139 |
+
if os.path.isdir(audio_path):
|
140 |
+
wavs_folder = audio_path
|
141 |
+
elif vad:
|
142 |
+
wavs_folder = split_audio_vad(audio_path, target_dir=target_dir, audio_name=audio_name)
|
143 |
+
else:
|
144 |
+
wavs_folder = split_audio_whisper(audio_path, target_dir=target_dir, audio_name=audio_name)
|
145 |
+
|
146 |
+
audio_segs = glob(f'{wavs_folder}/*.wav')
|
147 |
+
if len(audio_segs) == 0:
|
148 |
+
raise NotImplementedError('No audio segments found!')
|
149 |
+
|
150 |
+
return vc_model.extract_se(audio_segs, se_save_path=se_path), audio_name
|
151 |
+
|
openvoice_cli/transforms.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
+
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
+
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
+
|
11 |
+
|
12 |
+
def piecewise_rational_quadratic_transform(
|
13 |
+
inputs,
|
14 |
+
unnormalized_widths,
|
15 |
+
unnormalized_heights,
|
16 |
+
unnormalized_derivatives,
|
17 |
+
inverse=False,
|
18 |
+
tails=None,
|
19 |
+
tail_bound=1.0,
|
20 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
+
):
|
24 |
+
if tails is None:
|
25 |
+
spline_fn = rational_quadratic_spline
|
26 |
+
spline_kwargs = {}
|
27 |
+
else:
|
28 |
+
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
+
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
+
|
31 |
+
outputs, logabsdet = spline_fn(
|
32 |
+
inputs=inputs,
|
33 |
+
unnormalized_widths=unnormalized_widths,
|
34 |
+
unnormalized_heights=unnormalized_heights,
|
35 |
+
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
+
inverse=inverse,
|
37 |
+
min_bin_width=min_bin_width,
|
38 |
+
min_bin_height=min_bin_height,
|
39 |
+
min_derivative=min_derivative,
|
40 |
+
**spline_kwargs
|
41 |
+
)
|
42 |
+
return outputs, logabsdet
|
43 |
+
|
44 |
+
|
45 |
+
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
+
bin_locations[..., -1] += eps
|
47 |
+
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
+
|
49 |
+
|
50 |
+
def unconstrained_rational_quadratic_spline(
|
51 |
+
inputs,
|
52 |
+
unnormalized_widths,
|
53 |
+
unnormalized_heights,
|
54 |
+
unnormalized_derivatives,
|
55 |
+
inverse=False,
|
56 |
+
tails="linear",
|
57 |
+
tail_bound=1.0,
|
58 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
+
):
|
62 |
+
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
+
outside_interval_mask = ~inside_interval_mask
|
64 |
+
|
65 |
+
outputs = torch.zeros_like(inputs)
|
66 |
+
logabsdet = torch.zeros_like(inputs)
|
67 |
+
|
68 |
+
if tails == "linear":
|
69 |
+
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
+
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
+
unnormalized_derivatives[..., 0] = constant
|
72 |
+
unnormalized_derivatives[..., -1] = constant
|
73 |
+
|
74 |
+
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
+
logabsdet[outside_interval_mask] = 0
|
76 |
+
else:
|
77 |
+
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
+
|
79 |
+
(
|
80 |
+
outputs[inside_interval_mask],
|
81 |
+
logabsdet[inside_interval_mask],
|
82 |
+
) = rational_quadratic_spline(
|
83 |
+
inputs=inputs[inside_interval_mask],
|
84 |
+
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
+
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
+
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
+
inverse=inverse,
|
88 |
+
left=-tail_bound,
|
89 |
+
right=tail_bound,
|
90 |
+
bottom=-tail_bound,
|
91 |
+
top=tail_bound,
|
92 |
+
min_bin_width=min_bin_width,
|
93 |
+
min_bin_height=min_bin_height,
|
94 |
+
min_derivative=min_derivative,
|
95 |
+
)
|
96 |
+
|
97 |
+
return outputs, logabsdet
|
98 |
+
|
99 |
+
|
100 |
+
def rational_quadratic_spline(
|
101 |
+
inputs,
|
102 |
+
unnormalized_widths,
|
103 |
+
unnormalized_heights,
|
104 |
+
unnormalized_derivatives,
|
105 |
+
inverse=False,
|
106 |
+
left=0.0,
|
107 |
+
right=1.0,
|
108 |
+
bottom=0.0,
|
109 |
+
top=1.0,
|
110 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
+
):
|
114 |
+
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
+
raise ValueError("Input to a transform is not within its domain")
|
116 |
+
|
117 |
+
num_bins = unnormalized_widths.shape[-1]
|
118 |
+
|
119 |
+
if min_bin_width * num_bins > 1.0:
|
120 |
+
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
+
if min_bin_height * num_bins > 1.0:
|
122 |
+
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
+
|
124 |
+
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
+
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
+
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
+
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
+
cumwidths = (right - left) * cumwidths + left
|
129 |
+
cumwidths[..., 0] = left
|
130 |
+
cumwidths[..., -1] = right
|
131 |
+
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
+
|
133 |
+
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
+
|
135 |
+
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
+
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
+
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
+
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
+
cumheights = (top - bottom) * cumheights + bottom
|
140 |
+
cumheights[..., 0] = bottom
|
141 |
+
cumheights[..., -1] = top
|
142 |
+
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
+
|
144 |
+
if inverse:
|
145 |
+
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
+
else:
|
147 |
+
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
+
|
149 |
+
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
+
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
+
|
152 |
+
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
+
delta = heights / widths
|
154 |
+
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
+
|
156 |
+
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
+
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
+
|
159 |
+
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
+
|
161 |
+
if inverse:
|
162 |
+
a = (inputs - input_cumheights) * (
|
163 |
+
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
+
) + input_heights * (input_delta - input_derivatives)
|
165 |
+
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
+
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
+
)
|
168 |
+
c = -input_delta * (inputs - input_cumheights)
|
169 |
+
|
170 |
+
discriminant = b.pow(2) - 4 * a * c
|
171 |
+
assert (discriminant >= 0).all()
|
172 |
+
|
173 |
+
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
+
outputs = root * input_bin_widths + input_cumwidths
|
175 |
+
|
176 |
+
theta_one_minus_theta = root * (1 - root)
|
177 |
+
denominator = input_delta + (
|
178 |
+
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
+
* theta_one_minus_theta
|
180 |
+
)
|
181 |
+
derivative_numerator = input_delta.pow(2) * (
|
182 |
+
input_derivatives_plus_one * root.pow(2)
|
183 |
+
+ 2 * input_delta * theta_one_minus_theta
|
184 |
+
+ input_derivatives * (1 - root).pow(2)
|
185 |
+
)
|
186 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
+
|
188 |
+
return outputs, -logabsdet
|
189 |
+
else:
|
190 |
+
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
+
theta_one_minus_theta = theta * (1 - theta)
|
192 |
+
|
193 |
+
numerator = input_heights * (
|
194 |
+
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
+
)
|
196 |
+
denominator = input_delta + (
|
197 |
+
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
+
* theta_one_minus_theta
|
199 |
+
)
|
200 |
+
outputs = input_cumheights + numerator / denominator
|
201 |
+
|
202 |
+
derivative_numerator = input_delta.pow(2) * (
|
203 |
+
input_derivatives_plus_one * theta.pow(2)
|
204 |
+
+ 2 * input_delta * theta_one_minus_theta
|
205 |
+
+ input_derivatives * (1 - theta).pow(2)
|
206 |
+
)
|
207 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
+
|
209 |
+
return outputs, logabsdet
|
openvoice_cli/utils.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import json
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
def get_hparams_from_file(config_path):
|
6 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
7 |
+
data = f.read()
|
8 |
+
config = json.loads(data)
|
9 |
+
|
10 |
+
hparams = HParams(**config)
|
11 |
+
return hparams
|
12 |
+
|
13 |
+
class HParams:
|
14 |
+
def __init__(self, **kwargs):
|
15 |
+
for k, v in kwargs.items():
|
16 |
+
if type(v) == dict:
|
17 |
+
v = HParams(**v)
|
18 |
+
self[k] = v
|
19 |
+
|
20 |
+
def keys(self):
|
21 |
+
return self.__dict__.keys()
|
22 |
+
|
23 |
+
def items(self):
|
24 |
+
return self.__dict__.items()
|
25 |
+
|
26 |
+
def values(self):
|
27 |
+
return self.__dict__.values()
|
28 |
+
|
29 |
+
def __len__(self):
|
30 |
+
return len(self.__dict__)
|
31 |
+
|
32 |
+
def __getitem__(self, key):
|
33 |
+
return getattr(self, key)
|
34 |
+
|
35 |
+
def __setitem__(self, key, value):
|
36 |
+
return setattr(self, key, value)
|
37 |
+
|
38 |
+
def __contains__(self, key):
|
39 |
+
return key in self.__dict__
|
40 |
+
|
41 |
+
def __repr__(self):
|
42 |
+
return self.__dict__.__repr__()
|
43 |
+
|
44 |
+
|
45 |
+
def string_to_bits(string, pad_len=8):
|
46 |
+
# Convert each character to its ASCII value
|
47 |
+
ascii_values = [ord(char) for char in string]
|
48 |
+
|
49 |
+
# Convert ASCII values to binary representation
|
50 |
+
binary_values = [bin(value)[2:].zfill(8) for value in ascii_values]
|
51 |
+
|
52 |
+
# Convert binary strings to integer arrays
|
53 |
+
bit_arrays = [[int(bit) for bit in binary] for binary in binary_values]
|
54 |
+
|
55 |
+
# Convert list of arrays to NumPy array
|
56 |
+
numpy_array = np.array(bit_arrays)
|
57 |
+
numpy_array_full = np.zeros((pad_len, 8), dtype=numpy_array.dtype)
|
58 |
+
numpy_array_full[:, 2] = 1
|
59 |
+
max_len = min(pad_len, len(numpy_array))
|
60 |
+
numpy_array_full[:max_len] = numpy_array[:max_len]
|
61 |
+
return numpy_array_full
|
62 |
+
|
63 |
+
|
64 |
+
def bits_to_string(bits_array):
|
65 |
+
# Convert each row of the array to a binary string
|
66 |
+
binary_values = [''.join(str(bit) for bit in row) for row in bits_array]
|
67 |
+
|
68 |
+
# Convert binary strings to ASCII values
|
69 |
+
ascii_values = [int(binary, 2) for binary in binary_values]
|
70 |
+
|
71 |
+
# Convert ASCII values to characters
|
72 |
+
output_string = ''.join(chr(value) for value in ascii_values)
|
73 |
+
|
74 |
+
return output_string
|
75 |
+
|
76 |
+
|
77 |
+
def split_sentence(text, min_len=10, language_str='[EN]'):
|
78 |
+
if language_str in ['EN']:
|
79 |
+
sentences = split_sentences_latin(text, min_len=min_len)
|
80 |
+
else:
|
81 |
+
sentences = split_sentences_zh(text, min_len=min_len)
|
82 |
+
return sentences
|
83 |
+
|
84 |
+
def split_sentences_latin(text, min_len=10):
|
85 |
+
"""Split Long sentences into list of short ones
|
86 |
+
|
87 |
+
Args:
|
88 |
+
str: Input sentences.
|
89 |
+
|
90 |
+
Returns:
|
91 |
+
List[str]: list of output sentences.
|
92 |
+
"""
|
93 |
+
# deal with dirty sentences
|
94 |
+
text = re.sub('[。!?;]', '.', text)
|
95 |
+
text = re.sub('[,]', ',', text)
|
96 |
+
text = re.sub('[“”]', '"', text)
|
97 |
+
text = re.sub('[‘’]', "'", text)
|
98 |
+
text = re.sub(r"[\<\>\(\)\[\]\"\«\»]+", "", text)
|
99 |
+
text = re.sub('[\n\t ]+', ' ', text)
|
100 |
+
text = re.sub('([,.!?;])', r'\1 $#!', text)
|
101 |
+
# split
|
102 |
+
sentences = [s.strip() for s in text.split('$#!')]
|
103 |
+
if len(sentences[-1]) == 0: del sentences[-1]
|
104 |
+
|
105 |
+
new_sentences = []
|
106 |
+
new_sent = []
|
107 |
+
count_len = 0
|
108 |
+
for ind, sent in enumerate(sentences):
|
109 |
+
# print(sent)
|
110 |
+
new_sent.append(sent)
|
111 |
+
count_len += len(sent.split(" "))
|
112 |
+
if count_len > min_len or ind == len(sentences) - 1:
|
113 |
+
count_len = 0
|
114 |
+
new_sentences.append(' '.join(new_sent))
|
115 |
+
new_sent = []
|
116 |
+
return merge_short_sentences_latin(new_sentences)
|
117 |
+
|
118 |
+
|
119 |
+
def merge_short_sentences_latin(sens):
|
120 |
+
"""Avoid short sentences by merging them with the following sentence.
|
121 |
+
|
122 |
+
Args:
|
123 |
+
List[str]: list of input sentences.
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
List[str]: list of output sentences.
|
127 |
+
"""
|
128 |
+
sens_out = []
|
129 |
+
for s in sens:
|
130 |
+
# If the previous sentence is too short, merge them with
|
131 |
+
# the current sentence.
|
132 |
+
if len(sens_out) > 0 and len(sens_out[-1].split(" ")) <= 2:
|
133 |
+
sens_out[-1] = sens_out[-1] + " " + s
|
134 |
+
else:
|
135 |
+
sens_out.append(s)
|
136 |
+
try:
|
137 |
+
if len(sens_out[-1].split(" ")) <= 2:
|
138 |
+
sens_out[-2] = sens_out[-2] + " " + sens_out[-1]
|
139 |
+
sens_out.pop(-1)
|
140 |
+
except:
|
141 |
+
pass
|
142 |
+
return sens_out
|
143 |
+
|
144 |
+
def split_sentences_zh(text, min_len=10):
|
145 |
+
text = re.sub('[。!?;]', '.', text)
|
146 |
+
text = re.sub('[,]', ',', text)
|
147 |
+
# 将文本中的换行符、空格和制表符替换为空格
|
148 |
+
text = re.sub('[\n\t ]+', ' ', text)
|
149 |
+
# 在标点符号后添加一个空格
|
150 |
+
text = re.sub('([,.!?;])', r'\1 $#!', text)
|
151 |
+
# 分隔句子并去除前后空格
|
152 |
+
# sentences = [s.strip() for s in re.split('(。|!|?|;)', text)]
|
153 |
+
sentences = [s.strip() for s in text.split('$#!')]
|
154 |
+
if len(sentences[-1]) == 0: del sentences[-1]
|
155 |
+
|
156 |
+
new_sentences = []
|
157 |
+
new_sent = []
|
158 |
+
count_len = 0
|
159 |
+
for ind, sent in enumerate(sentences):
|
160 |
+
new_sent.append(sent)
|
161 |
+
count_len += len(sent)
|
162 |
+
if count_len > min_len or ind == len(sentences) - 1:
|
163 |
+
count_len = 0
|
164 |
+
new_sentences.append(' '.join(new_sent))
|
165 |
+
new_sent = []
|
166 |
+
return merge_short_sentences_zh(new_sentences)
|
167 |
+
|
168 |
+
|
169 |
+
def merge_short_sentences_zh(sens):
|
170 |
+
# return sens
|
171 |
+
"""Avoid short sentences by merging them with the following sentence.
|
172 |
+
|
173 |
+
Args:
|
174 |
+
List[str]: list of input sentences.
|
175 |
+
|
176 |
+
Returns:
|
177 |
+
List[str]: list of output sentences.
|
178 |
+
"""
|
179 |
+
sens_out = []
|
180 |
+
for s in sens:
|
181 |
+
# If the previous sentense is too short, merge them with
|
182 |
+
# the current sentence.
|
183 |
+
if len(sens_out) > 0 and len(sens_out[-1]) <= 2:
|
184 |
+
sens_out[-1] = sens_out[-1] + " " + s
|
185 |
+
else:
|
186 |
+
sens_out.append(s)
|
187 |
+
try:
|
188 |
+
if len(sens_out[-1]) <= 2:
|
189 |
+
sens_out[-2] = sens_out[-2] + " " + sens_out[-1]
|
190 |
+
sens_out.pop(-1)
|
191 |
+
except:
|
192 |
+
pass
|
193 |
+
return sens_out
|
requirements.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
librosa==0.9.1
|
2 |
+
pydub==0.25.1
|
3 |
+
pysrt
|
4 |
+
wavmark==0.0.3
|
5 |
+
whisper-timestamped>=1.14.2
|
6 |
+
torch==2.1.1
|
7 |
+
torchaudio==2.1.1
|
8 |
+
typing-extensions>=4.6.1
|
9 |
+
numpy==1.26.2
|
10 |
+
faster_whisper==0.10.0
|
11 |
+
gradio==3.9.1
|
12 |
+
tts==0.22.0
|
13 |
+
httpx
|
14 |
+
httpcore
|
15 |
+
psutil
|
16 |
+
ffmpeg-python
|
17 |
+
h11
|
18 |
+
cutlet
|
19 |
+
fugashi[unidic-lite]
|
tools/__init__.py
ADDED
File without changes
|
tools/asr/config.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def check_fw_local_models():
|
4 |
+
'''
|
5 |
+
启动时检查本地是否有 Faster Whisper 模型.
|
6 |
+
'''
|
7 |
+
model_size_list = [
|
8 |
+
"tiny", "tiny.en",
|
9 |
+
"base", "base.en",
|
10 |
+
"small", "small.en",
|
11 |
+
"medium", "medium.en",
|
12 |
+
"large", "large-v1",
|
13 |
+
"large-v2", "large-v3"]
|
14 |
+
for i, size in enumerate(model_size_list):
|
15 |
+
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
|
16 |
+
model_size_list[i] = size + '-local'
|
17 |
+
return model_size_list
|
18 |
+
|
19 |
+
asr_dict = {
|
20 |
+
"达摩 ASR (中文)": {
|
21 |
+
'lang': ['zh'],
|
22 |
+
'size': ['large'],
|
23 |
+
'path': 'funasr_asr.py',
|
24 |
+
},
|
25 |
+
"Faster Whisper (多语种)": {
|
26 |
+
'lang': ['auto', 'zh', 'en', 'ja'],
|
27 |
+
'size': check_fw_local_models(),
|
28 |
+
'path': 'fasterwhisper_asr.py'
|
29 |
+
}
|
30 |
+
}
|
31 |
+
|
tools/asr/fasterwhisper_asr.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
os.environ["HF_ENDPOINT"]="https://hf-mirror.com"
|
4 |
+
import traceback
|
5 |
+
import requests
|
6 |
+
from glob import glob
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from faster_whisper import WhisperModel
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
from tools.asr.config import check_fw_local_models
|
13 |
+
|
14 |
+
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
15 |
+
|
16 |
+
language_code_list = [
|
17 |
+
"af", "am", "ar", "as", "az",
|
18 |
+
"ba", "be", "bg", "bn", "bo",
|
19 |
+
"br", "bs", "ca", "cs", "cy",
|
20 |
+
"da", "de", "el", "en", "es",
|
21 |
+
"et", "eu", "fa", "fi", "fo",
|
22 |
+
"fr", "gl", "gu", "ha", "haw",
|
23 |
+
"he", "hi", "hr", "ht", "hu",
|
24 |
+
"hy", "id", "is", "it", "ja",
|
25 |
+
"jw", "ka", "kk", "km", "kn",
|
26 |
+
"ko", "la", "lb", "ln", "lo",
|
27 |
+
"lt", "lv", "mg", "mi", "mk",
|
28 |
+
"ml", "mn", "mr", "ms", "mt",
|
29 |
+
"my", "ne", "nl", "nn", "no",
|
30 |
+
"oc", "pa", "pl", "ps", "pt",
|
31 |
+
"ro", "ru", "sa", "sd", "si",
|
32 |
+
"sk", "sl", "sn", "so", "sq",
|
33 |
+
"sr", "su", "sv", "sw", "ta",
|
34 |
+
"te", "tg", "th", "tk", "tl",
|
35 |
+
"tr", "tt", "uk", "ur", "uz",
|
36 |
+
"vi", "yi", "yo", "zh", "yue",
|
37 |
+
"auto"]
|
38 |
+
|
39 |
+
def execute_asr(input_folder, output_folder, model_size, language,precision):
|
40 |
+
if '-local' in model_size:
|
41 |
+
model_size = model_size[:-6]
|
42 |
+
model_path = f'tools/asr/models/faster-whisper-{model_size}'
|
43 |
+
else:
|
44 |
+
model_path = model_size
|
45 |
+
if language == 'auto':
|
46 |
+
language = None #不设置语种由模型自动输出概率最高的语种
|
47 |
+
print("loading faster whisper model:",model_size,model_path)
|
48 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
49 |
+
try:
|
50 |
+
model = WhisperModel(model_path, device=device, compute_type=precision)
|
51 |
+
except:
|
52 |
+
return print(traceback.format_exc())
|
53 |
+
output = []
|
54 |
+
output_file_name = os.path.basename(input_folder)
|
55 |
+
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
56 |
+
|
57 |
+
if not os.path.exists(output_folder):
|
58 |
+
os.makedirs(output_folder)
|
59 |
+
|
60 |
+
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
|
61 |
+
try:
|
62 |
+
segments, info = model.transcribe(
|
63 |
+
audio = file,
|
64 |
+
beam_size = 5,
|
65 |
+
vad_filter = True,
|
66 |
+
vad_parameters = dict(min_silence_duration_ms=700),
|
67 |
+
language = language)
|
68 |
+
text = ''
|
69 |
+
|
70 |
+
if info.language == "zh":
|
71 |
+
print("检测为中文文本,转funasr处理")
|
72 |
+
if("only_asr"not in globals()):
|
73 |
+
from tools.asr.funasr_asr import only_asr##如果用英文就不需要导入下载模型
|
74 |
+
text = only_asr(file)
|
75 |
+
|
76 |
+
if text == '':
|
77 |
+
for segment in segments:
|
78 |
+
text += segment.text
|
79 |
+
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
|
80 |
+
except:
|
81 |
+
return print(traceback.format_exc())
|
82 |
+
|
83 |
+
with open(output_file_path, "w", encoding="utf-8") as f:
|
84 |
+
f.write("\n".join(output))
|
85 |
+
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
86 |
+
return output_file_path
|
87 |
+
|
88 |
+
if __name__ == '__main__':
|
89 |
+
parser = argparse.ArgumentParser()
|
90 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
91 |
+
help="Path to the folder containing WAV files.")
|
92 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
93 |
+
help="Output folder to store transcriptions.")
|
94 |
+
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
|
95 |
+
choices=check_fw_local_models(),
|
96 |
+
help="Model Size of Faster Whisper")
|
97 |
+
parser.add_argument("-l", "--language", type=str, default='ja',
|
98 |
+
choices=language_code_list,
|
99 |
+
help="Language of the audio files.")
|
100 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
101 |
+
help="fp16 or fp32")
|
102 |
+
|
103 |
+
cmd = parser.parse_args()
|
104 |
+
output_file_path = execute_asr(
|
105 |
+
input_folder = cmd.input_folder,
|
106 |
+
output_folder = cmd.output_folder,
|
107 |
+
model_size = cmd.model_size,
|
108 |
+
language = cmd.language,
|
109 |
+
precision = cmd.precision,
|
110 |
+
)
|
tools/asr/funasr_asr.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
import traceback
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
from funasr import AutoModel
|
9 |
+
|
10 |
+
path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
11 |
+
path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
12 |
+
path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
13 |
+
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
14 |
+
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
15 |
+
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
16 |
+
|
17 |
+
model = AutoModel(
|
18 |
+
model = path_asr,
|
19 |
+
model_revision = "v2.0.4",
|
20 |
+
vad_model = path_vad,
|
21 |
+
vad_model_revision = "v2.0.4",
|
22 |
+
punc_model = path_punc,
|
23 |
+
punc_model_revision = "v2.0.4",
|
24 |
+
)
|
25 |
+
|
26 |
+
def only_asr(input_file):
|
27 |
+
try:
|
28 |
+
text = model.generate(input=input_file)[0]["text"]
|
29 |
+
except:
|
30 |
+
text = ''
|
31 |
+
print(traceback.format_exc())
|
32 |
+
return text
|
33 |
+
|
34 |
+
def execute_asr(input_folder, output_folder, model_size, language):
|
35 |
+
input_file_names = os.listdir(input_folder)
|
36 |
+
input_file_names.sort()
|
37 |
+
|
38 |
+
output = []
|
39 |
+
output_file_name = os.path.basename(input_folder)
|
40 |
+
|
41 |
+
for name in tqdm(input_file_names):
|
42 |
+
try:
|
43 |
+
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
|
44 |
+
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
|
45 |
+
except:
|
46 |
+
print(traceback.format_exc())
|
47 |
+
|
48 |
+
output_folder = output_folder or "output/asr_opt"
|
49 |
+
os.makedirs(output_folder, exist_ok=True)
|
50 |
+
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
51 |
+
|
52 |
+
with open(output_file_path, "w", encoding="utf-8") as f:
|
53 |
+
f.write("\n".join(output))
|
54 |
+
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
55 |
+
return output_file_path
|
56 |
+
|
57 |
+
if __name__ == '__main__':
|
58 |
+
parser = argparse.ArgumentParser()
|
59 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
60 |
+
help="Path to the folder containing WAV files.")
|
61 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
62 |
+
help="Output folder to store transcriptions.")
|
63 |
+
parser.add_argument("-s", "--model_size", type=str, default='large',
|
64 |
+
help="Model Size of FunASR is Large")
|
65 |
+
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
|
66 |
+
help="Language of the audio files.")
|
67 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
68 |
+
help="fp16 or fp32")#还没接入
|
69 |
+
|
70 |
+
cmd = parser.parse_args()
|
71 |
+
execute_asr(
|
72 |
+
input_folder = cmd.input_folder,
|
73 |
+
output_folder = cmd.output_folder,
|
74 |
+
model_size = cmd.model_size,
|
75 |
+
language = cmd.language,
|
76 |
+
)
|
tools/asr/models/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*
|
2 |
+
!.gitignore
|
tools/cmd-denoise.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,argparse
|
2 |
+
|
3 |
+
from modelscope.pipelines import pipeline
|
4 |
+
from modelscope.utils.constant import Tasks
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
path_denoise = 'tools/denoise-model/speech_frcrn_ans_cirm_16k'
|
8 |
+
path_denoise = path_denoise if os.path.exists(path_denoise) else "damo/speech_frcrn_ans_cirm_16k"
|
9 |
+
ans = pipeline(Tasks.acoustic_noise_suppression,model=path_denoise)
|
10 |
+
def execute_denoise(input_folder,output_folder):
|
11 |
+
os.makedirs(output_folder,exist_ok=True)
|
12 |
+
# print(input_folder)
|
13 |
+
# print(list(os.listdir(input_folder).sort()))
|
14 |
+
for name in tqdm(os.listdir(input_folder)):
|
15 |
+
ans("%s/%s"%(input_folder,name),output_path='%s/%s'%(output_folder,name))
|
16 |
+
|
17 |
+
if __name__ == '__main__':
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
20 |
+
help="Path to the folder containing WAV files.")
|
21 |
+
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
22 |
+
help="Output folder to store transcriptions.")
|
23 |
+
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
24 |
+
help="fp16 or fp32")#还没接入
|
25 |
+
cmd = parser.parse_args()
|
26 |
+
execute_denoise(
|
27 |
+
input_folder = cmd.input_folder,
|
28 |
+
output_folder = cmd.output_folder,
|
29 |
+
)
|
tools/denoise-model/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*
|
2 |
+
!.gitignore
|
tools/i18n/i18n.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import locale
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
def load_language_list(language):
|
7 |
+
# 获取当前文件所在的目录
|
8 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
9 |
+
# 构建语言文件的绝对路径
|
10 |
+
language_file_path = os.path.join(current_dir, "locale", f"{language}.json")
|
11 |
+
|
12 |
+
with open(language_file_path, "r", encoding="utf-8") as f:
|
13 |
+
language_list = json.load(f)
|
14 |
+
return language_list
|
15 |
+
|
16 |
+
|
17 |
+
class I18nAuto:
|
18 |
+
def __init__(self, language=None):
|
19 |
+
if language in ["Auto", None]:
|
20 |
+
language = locale.getdefaultlocale()[0] # getlocale can't identify the system's language ((None, None))
|
21 |
+
|
22 |
+
# 获取当前文件所在的目录
|
23 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
24 |
+
# 构建语言文件的绝对路径
|
25 |
+
language_file_path = os.path.join(current_dir, "locale", f"{language}.json")
|
26 |
+
|
27 |
+
if not os.path.exists(language_file_path):
|
28 |
+
language = "en_US"
|
29 |
+
self.language = language
|
30 |
+
self.language_map = load_language_list(language)
|
31 |
+
|
32 |
+
def __call__(self, key):
|
33 |
+
return self.language_map.get(key, key)
|
34 |
+
|
35 |
+
def __repr__(self):
|
36 |
+
return "Use Language: " + self.language
|
tools/i18n/locale/en_US.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.",
|
3 |
+
"A模型权重": "Weight (w) for Model A:",
|
4 |
+
"A模型路径": "Path to Model A:",
|
5 |
+
"B模型路径": "Path to Model B:",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:",
|
8 |
+
"Index Rate": "Index Rate",
|
9 |
+
"Onnx导出": "Export Onnx",
|
10 |
+
"Onnx输出路径": "Onnx Export Path:",
|
11 |
+
"RVC模型路径": "RVC Model Path:",
|
12 |
+
"ckpt处理": "ckpt Processing",
|
13 |
+
"harvest进程数": "Number of CPU processes used for harvest pitch algorithm",
|
14 |
+
"index文件路径不可包含中文": "index文件路径不可包含中文",
|
15 |
+
"pth文件路径不可包含中文": "pth文件路径不可包含中文",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.",
|
18 |
+
"step1:正在处理数据": "Step 1: Processing data",
|
19 |
+
"step2:正在提取音高&正在提取特征": "step2:Pitch extraction & feature extraction",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index",
|
23 |
+
"step3a:正在训练模型": "Step 3a: Model training started",
|
24 |
+
"一键训练": "One-click training",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Multiple audio files can also be imported. If a folder path exists, this input is ignored.",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.<br>Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).<br>The model is divided into three categories:<br>1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.<br>2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.<br>3. De-reverb and de-delay models (by FoxJoy):<br> (1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;<br> (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.<br>De-reverb/de-delay notes:<br>1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.<br>2. The MDX-Net-Dereverb model is quite slow.<br>3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:",
|
28 |
+
"伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "Save name:",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):",
|
33 |
+
"保存的模型名不带后缀": "Saved model name (without extension):",
|
34 |
+
"保存频率save_every_epoch": "Save frequency (save_every_epoch):",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:",
|
36 |
+
"修改": "Modify",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)",
|
38 |
+
"停止音频转换": "Stop audio conversion",
|
39 |
+
"全流程结束!": "All processes have been completed!",
|
40 |
+
"刷新音色列表和索引路径": "Refresh voice list and index path",
|
41 |
+
"加载模型": "Load model",
|
42 |
+
"加载预训练底模D路径": "Load pre-trained base model D path:",
|
43 |
+
"加载预训练底模G路径": "Load pre-trained base model G path:",
|
44 |
+
"单次推理": "Single Inference",
|
45 |
+
"卸载音色省显存": "Unload voice to save GPU memory:",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:",
|
48 |
+
"否": "No",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "Response threshold",
|
51 |
+
"响度因子": "loudness factor",
|
52 |
+
"处理数据": "Process data",
|
53 |
+
"导出Onnx模型": "Export Onnx Model",
|
54 |
+
"导出文件格式": "Export file format",
|
55 |
+
"常见问题解答": "FAQ (Frequently Asked Questions)",
|
56 |
+
"常规设置": "General settings",
|
57 |
+
"开始音频转换": "Start audio conversion",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
|
59 |
+
"性能设置": "Performance settings",
|
60 |
+
"总训练轮数total_epoch": "Total training epochs (total_epoch):",
|
61 |
+
"批量推理": "Batch Inference",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
|
63 |
+
"指定输出主人声文件夹": "Specify the output folder for vocals:",
|
64 |
+
"指定输出文件夹": "Specify output folder:",
|
65 |
+
"指定输出非主人声文件夹": "Specify the output folder for accompaniment:",
|
66 |
+
"推理时间(ms):": "Inference time (ms):",
|
67 |
+
"推理音色": "Inferencing voice:",
|
68 |
+
"提取": "Extract",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:",
|
70 |
+
"是": "Yes",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:",
|
74 |
+
"显卡信息": "GPU Information",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE.txt</b> for details.",
|
76 |
+
"查看": "View",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)",
|
78 |
+
"检索特征占比": "Search feature ratio (controls accent strength, too high has artifacting):",
|
79 |
+
"模型": "Model",
|
80 |
+
"模型推理": "Model Inference",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:",
|
82 |
+
"模型是否带音高指导": "Whether the model has pitch guidance:",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):",
|
84 |
+
"模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):",
|
85 |
+
"模型版本型号": "Model architecture version:",
|
86 |
+
"模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion",
|
87 |
+
"模型路径": "Path to Model:",
|
88 |
+
"每张显卡的batch_size": "Batch size per GPU:",
|
89 |
+
"淡入淡出长度": "Fade length",
|
90 |
+
"版本": "Version",
|
91 |
+
"特征提取": "Feature extraction",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.",
|
94 |
+
"目标采样率": "Target sample rate:",
|
95 |
+
"算法延迟(ms):": "Algorithmic delays(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:",
|
97 |
+
"融合": "Fusion",
|
98 |
+
"要改的模型信息": "Model information to be modified:",
|
99 |
+
"要置入的模型信息": "Model information to be placed:",
|
100 |
+
"训练": "Train",
|
101 |
+
"训练模型": "Train model",
|
102 |
+
"训练特征索引": "Train feature index",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.",
|
104 |
+
"请指定说话人id": "Please specify the speaker/singer ID:",
|
105 |
+
"请选择index文件": "Please choose the .index file",
|
106 |
+
"请选择pth文件": "Please choose the .pth file",
|
107 |
+
"请选择说话人id": "Select Speaker/Singer ID:",
|
108 |
+
"转换": "Convert",
|
109 |
+
"输入实验名": "Enter the experiment name:",
|
110 |
+
"输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:",
|
114 |
+
"输入监听": "Input voice monitor",
|
115 |
+
"输入训练文件夹路径": "Enter the path of the training folder:",
|
116 |
+
"输入设备": "Input device",
|
117 |
+
"输入降噪": "Input noise reduction",
|
118 |
+
"输出信息": "Output information",
|
119 |
+
"输出变声": "Output converted voice",
|
120 |
+
"输出设备": "Output device",
|
121 |
+
"输出降噪": "Output noise reduction",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)",
|
123 |
+
"选择.index文件": "Select the .index file",
|
124 |
+
"选择.pth文件": "Select the .pth file",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "Sample length",
|
130 |
+
"重载设备列表": "Reload device list",
|
131 |
+
"音调设置": "Pitch settings",
|
132 |
+
"音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)",
|
133 |
+
"音高算法": "pitch detection algorithm",
|
134 |
+
"额外推理时长": "Extra inference time"
|
135 |
+
}
|
tools/i18n/locale/es_ES.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si es >=3, entonces use el resultado del reconocimiento de tono de 'harvest' con filtro de mediana, el valor es el radio del filtro, su uso puede debilitar el sonido sordo",
|
3 |
+
"A模型权重": "Un peso modelo para el modelo A.",
|
4 |
+
"A模型路径": "Modelo A ruta.",
|
5 |
+
"B模型路径": "Modelo B ruta.",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, un tono por línea, en lugar de F0 predeterminado y cambio de tono",
|
8 |
+
"Index Rate": "Tasa de índice",
|
9 |
+
"Onnx导出": "Exportar Onnx",
|
10 |
+
"Onnx输出路径": "Ruta de salida Onnx",
|
11 |
+
"RVC模型路径": "Ruta del modelo RVC",
|
12 |
+
"ckpt处理": "Procesamiento de recibos",
|
13 |
+
"harvest进程数": "Número de procesos",
|
14 |
+
"index文件路径不可包含中文": "La ruta del archivo .index no debe contener caracteres chinos.",
|
15 |
+
"pth文件路径不可包含中文": "La ruta del archivo .pth no debe contener caracteres chinos.",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Complete la configuración del experimento. Los datos del experimento se almacenan en el directorio 'logs', con cada experimento en una carpeta separada. La ruta del nombre del experimento debe ingresarse manualmente y debe contener la configuración del experimento, los registros y los archivos del modelo entrenado.",
|
18 |
+
"step1:正在处理数据": "Paso 1: Procesando datos",
|
19 |
+
"step2:正在提取音高&正在提取特征": "Paso 2: Extracción del tono y extracción de características",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorra automáticamente la carpeta de capacitación y corte y normalice todos los archivos de audio que se pueden decodificar en audio. Se generarán dos carpetas 'wav' en el directorio del experimento. Actualmente, solo se admite la capacitación de una sola persona.",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Use la CPU para extraer el tono (si el modelo tiene guía de tono) y la GPU para extraer características (seleccione el número de tarjeta).",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Complete la configuración de entrenamiento y comience a entrenar el modelo y el índice.",
|
23 |
+
"step3a:正在训练模型": "Paso 3a: Entrenando el modelo",
|
24 |
+
"一键训练": "Entrenamiento con un clic",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden importar varios archivos de audio. Si existe una ruta de carpeta, esta entrada se ignora.",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Procesamiento por lotes para la separación de acompañamiento vocal utilizando el modelo UVR5.<br>Ejemplo de formato de ruta de carpeta válido: D:\\ruta\\a\\la\\carpeta\\de\\entrada (copiar desde la barra de direcciones del administrador de archivos).<br>El modelo se divide en tres categorías:<br>1. Preservar voces: Elija esta opción para audio sin armonías. Preserva las voces mejor que HP5. Incluye dos modelos incorporados: HP2 y HP3. HP3 puede filtrar ligeramente el acompañamiento pero conserva las voces un poco mejor que HP2.<br>2. Preservar solo voces principales: Elija esta opción para audio con armonías. Puede debilitar las voces principales. Incluye un modelo incorporado: HP5.<br>3. Modelos de des-reverberación y des-retardo (por FoxJoy):<br> (1) MDX-Net: La mejor opción para la eliminación de reverberación estéreo pero no puede eliminar la reverberación mono;<br> (234) DeEcho: Elimina efectos de retardo. El modo Agresivo elimina más a fondo que el modo Normal. DeReverb adicionalmente elimina la reverberación y puede eliminar la reverberación mono, pero no muy efectivamente para contenido de alta frecuencia fuertemente reverberado.<br>Notas de des-reverberación/des-retardo:<br>1. El tiempo de procesamiento para el modelo DeEcho-DeReverb es aproximadamente el doble que los otros dos modelos DeEcho.<br>2. El modelo MDX-Net-Dereverb es bastante lento.<br>3. La configuración más limpia recomendada es aplicar primero MDX-Net y luego DeEcho-Agresivo.",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.",
|
28 |
+
"伴奏人声分离&去混响&去回声": "Separación de voz acompañante & eliminación de reverberación & eco",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "Guardar nombre",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "Nombre del archivo que se guardará, el valor predeterminado es el mismo que el nombre del archivo de origen",
|
33 |
+
"保存的模型名不带后缀": "Nombre del modelo guardado sin extensión.",
|
34 |
+
"保存频率save_every_epoch": "Frecuencia de guardado (save_every_epoch)",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y la respiración, prevenir artefactos como la distorsión de sonido electrónico, 0.5 no está activado, reducir aumentará la protección pero puede reducir el efecto del índice",
|
36 |
+
"修改": "Modificar",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar la información del modelo (solo admite archivos de modelos pequeños extraídos en la carpeta weights)",
|
38 |
+
"停止音频转换": "Detener la conversión de audio",
|
39 |
+
"全流程结束!": "¡Todo el proceso ha terminado!",
|
40 |
+
"刷新音色列表和索引路径": "Actualizar la lista de modelos e índice de rutas",
|
41 |
+
"加载模型": "Cargar modelo",
|
42 |
+
"加载预训练底模D路径": "Cargue la ruta del modelo D base pre-entrenada.",
|
43 |
+
"加载预训练底模G路径": "Cargue la ruta del modelo G base pre-entrenada.",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "Descargue la voz para ahorrar memoria GPU",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear",
|
48 |
+
"否": "No",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "Umbral de respuesta",
|
51 |
+
"响度因子": "factor de sonoridad",
|
52 |
+
"处理数据": "Procesar datos",
|
53 |
+
"导出Onnx模型": "Exportar modelo Onnx",
|
54 |
+
"导出文件格式": "Formato de archivo de exportación",
|
55 |
+
"常见问题解答": "Preguntas frecuentes",
|
56 |
+
"常规设置": "Configuración general",
|
57 |
+
"开始音频转换": "Iniciar conversión de audio",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento",
|
59 |
+
"性能设置": "Configuración de rendimiento",
|
60 |
+
"总训练轮数total_epoch": "Total de épocas de entrenamiento (total_epoch)",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).",
|
63 |
+
"指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal",
|
64 |
+
"指定输出文件夹": "Especificar carpeta de salida",
|
65 |
+
"指定输出非主人声文件夹": "Especifique la carpeta de salida para las voces no principales",
|
66 |
+
"推理时间(ms):": "Inferir tiempo (ms):",
|
67 |
+
"推理音色": "inferencia de voz",
|
68 |
+
"提取": "Extraer",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "Número de procesos de CPU utilizados para extraer el tono y procesar los datos",
|
70 |
+
"是": "Sí",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Guardar solo el archivo ckpt más reciente para ahorrar espacio en disco",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Guardar pequeño modelo final en la carpeta 'weights' en cada punto de guardado",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Si almacenar en caché todos los conjuntos de entrenamiento en la memoria de la GPU. Los conjuntos de datos pequeños (menos de 10 minutos) se pueden almacenar en caché para acelerar el entrenamiento, pero el almacenamiento en caché de conjuntos de datos grandes puede causar errores de memoria en la GPU y no aumenta la velocidad de manera significativa.",
|
74 |
+
"显卡信息": "información de la GPU",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software es de código abierto bajo la licencia MIT, el autor no tiene ningún control sobre el software, y aquellos que usan el software y difunden los sonidos exportados por el software son los únicos responsables.<br>Si no está de acuerdo con esta cláusula , no puede utilizar ni citar ningún código ni archivo del paquete de software Consulte el directorio raíz <b>Agreement-LICENSE.txt</b> para obtener más información.",
|
76 |
+
"查看": "Ver",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo aplicable a archivos de modelos pequeños extraídos de la carpeta 'pesos')",
|
78 |
+
"检索特征占比": "Proporción de función de búsqueda",
|
79 |
+
"模型": "Modelo",
|
80 |
+
"模型推理": "inferencia del modelo",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingrese la ruta de un archivo de modelo grande en la carpeta 'logs'), aplicable cuando desea extraer un archivo de modelo pequeño después de entrenar a mitad de camino y no se guardó automáticamente, o cuando desea probar un modelo intermedio",
|
82 |
+
"模型是否带音高指导": "Si el modelo tiene guía de tono.",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Si el modelo tiene guía de tono (necesaria para cantar, pero no para hablar)",
|
84 |
+
"模型是否带音高指导,1是0否": "Si el modelo tiene guía de tono, 1 para sí, 0 para no",
|
85 |
+
"模型版本型号": "Versión y modelo del modelo",
|
86 |
+
"模型融合, 可用于测试音色融合": "Fusión de modelos, se puede utilizar para fusionar diferentes voces",
|
87 |
+
"模型路径": "Ruta del modelo",
|
88 |
+
"每张显卡的batch_size": "Tamaño del lote (batch_size) por tarjeta gráfica",
|
89 |
+
"淡入淡出长度": "Duración del fundido de entrada/salida",
|
90 |
+
"版本": "Versión",
|
91 |
+
"特征提取": "Extracción de características",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de características, si está vacío, se utilizará el resultado de la selección desplegable",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tecla +12 recomendada para conversión de voz de hombre a mujer, tecla -12 para conversión de voz de mujer a hombre. Si el rango de tono es demasiado amplio y causa distorsión, ajústelo usted mismo a un rango adecuado.",
|
94 |
+
"目标采样率": "Tasa de muestreo objetivo",
|
95 |
+
"算法延迟(ms):": "算法延迟(ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "Detección automática de la ruta del índice, selección desplegable (dropdown)",
|
97 |
+
"融合": "Fusión",
|
98 |
+
"要改的模型信息": "Información del modelo a modificar",
|
99 |
+
"要置入的模型信息": "Información del modelo a colocar.",
|
100 |
+
"训练": "Entrenamiento",
|
101 |
+
"训练模型": "Entrenar Modelo",
|
102 |
+
"训练特征索引": "Índice de características",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento finalizado, puede ver el registro de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento",
|
104 |
+
"请指定说话人id": "ID del modelo",
|
105 |
+
"请选择index文件": "Seleccione el archivo .index",
|
106 |
+
"请选择pth文件": "Seleccione el archivo .pth",
|
107 |
+
"请选择说话人id": "Seleccione una identificación de altavoz",
|
108 |
+
"转换": "Conversión",
|
109 |
+
"输入实验名": "Ingrese el nombre del modelo",
|
110 |
+
"输入待处理音频文件夹路径": "Ingrese la ruta a la carpeta de audio que se procesará",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta a la carpeta de audio que se procesará (simplemente cópiela desde la barra de direcciones del administrador de archivos)",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo del audio que se procesará (el formato predeterminado es el ejemplo correcto)",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Proporción de fusión para reemplazar el sobre de volumen de entrada con el sobre de volumen de salida, cuanto más cerca de 1, más se utiliza el sobre de salida",
|
114 |
+
"输入监听": "输入监听",
|
115 |
+
"输入训练文件夹路径": "Introduzca la ruta de la carpeta de entrenamiento",
|
116 |
+
"输入设备": "Dispositivo de entrada",
|
117 |
+
"输入降噪": "Reducción de ruido de entrada",
|
118 |
+
"输出信息": "Información de salida",
|
119 |
+
"输出变声": "输出变声",
|
120 |
+
"输出设备": "Dispositivo de salida",
|
121 |
+
"输出降噪": "Reducción de ruido de salida",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "Salida de audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)",
|
123 |
+
"选择.index文件": "Seleccione el archivo .index",
|
124 |
+
"选择.pth文件": "Seleccione el archivo .pth",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono, las voces de entrada se pueden acelerar con pm, harvest tiene buenos graves pero es muy lento, crepe es bueno pero se come las GPUs",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono, use 'pm' para acelerar la entrada de canto, 'harvest' es bueno para los graves pero extremadamente lento, 'crepe' tiene buenos resultados pero consume GPU",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: la canción de entrada se puede acelerar con pm, la voz de alta calidad pero CPU pobre se puede acelerar con dio, harvest es mejor pero más lento, rmvpe es el mejor y se come ligeramente la CPU/GPU",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "Longitud de muestreo",
|
130 |
+
"重载设备列表": "Actualizar lista de dispositivos",
|
131 |
+
"音调设置": "Ajuste de tono",
|
132 |
+
"音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)",
|
133 |
+
"音高算法": "Algoritmo de tono",
|
134 |
+
"额外推理时长": "Tiempo de inferencia adicional"
|
135 |
+
}
|
tools/i18n/locale/fr_FR.json
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3 : appliquer un filtrage médian aux résultats de la reconnaissance de la hauteur de récolte. La valeur représente le rayon du filtre et peut réduire la respiration.",
|
3 |
+
"A模型权重": "Poids (w) pour le modèle A :",
|
4 |
+
"A模型路径": "Chemin d'accès au modèle A :",
|
5 |
+
"B模型路径": "Chemin d'accès au modèle B :",
|
6 |
+
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
7 |
+
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :",
|
8 |
+
"Index Rate": "Taux d'indexation",
|
9 |
+
"Onnx导出": "Exporter en ONNX",
|
10 |
+
"Onnx输出路径": "Chemin d'exportation ONNX :",
|
11 |
+
"RVC模型路径": "Chemin du modèle RVC :",
|
12 |
+
"ckpt处理": "Traitement des fichiers .ckpt",
|
13 |
+
"harvest进程数": "Nombre de processus CPU utilisés pour l'algorithme de reconnaissance de la hauteur (pitch) dans le cadre de la récolte (harvest).",
|
14 |
+
"index文件路径不可包含中文": "Le chemin du fichier d'index ne doit pas contenir de caractères chinois.",
|
15 |
+
"pth文件路径不可包含中文": "Le chemin du fichier .pth ne doit pas contenir de caractères chinois.",
|
16 |
+
"rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte RMVPE : séparez les index GPU par des tirets \"-\", par exemple, 0-0-1 pour utiliser 2 processus sur GPU0 et 1 processus sur GPU1.",
|
17 |
+
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration expérimentale. Les données expérimentales sont stockées dans le dossier 'logs', avec chaque expérience ayant un dossier distinct. Entrez manuellement le chemin du nom de l'expérience, qui contient la configuration expérimentale, les journaux et les fichiers de modèle entraînés.",
|
18 |
+
"step1:正在处理数据": "Étape 1 : Traitement des données en cours.",
|
19 |
+
"step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur et extraction des caractéristiques en cours.",
|
20 |
+
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers du dossier d'entraînement qui peuvent être décodés en fichiers audio et réalisation d'une normalisation par tranches. Génère 2 dossiers wav dans le répertoire de l'expérience. Actuellement, seule la formation avec un seul chanteur/locuteur est prise en charge.",
|
21 |
+
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Utilisez le CPU pour extraire la hauteur (si le modèle le permet), utilisez le GPU pour extraire les caractéristiques (sélectionnez l'index du GPU) :",
|
22 |
+
"step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et démarrez l'entraînement du modèle ainsi que l'indexation.",
|
23 |
+
"step3a:正在训练模型": "Étape 3a : L'entraînement du modèle a commencé.",
|
24 |
+
"一键训练": "Entraînement en un clic",
|
25 |
+
"也可批量输入音频文件, 二选一, 优先读文件夹": "Il est également possible d'importer plusieurs fichiers audio. Si un chemin de dossier existe, cette entrée est ignorée.",
|
26 |
+
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.<br>Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).<br>Le modèle est divisé en trois catégories :<br>1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.<br>2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.<br>3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :<br> (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.<br> (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.<br>Notes sur la suppression de la réverbération et du délai :<br>1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.<br>2. Le modèle MDX-Net-Dereverb est assez lent.<br>3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.",
|
27 |
+
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :",
|
28 |
+
"伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération",
|
29 |
+
"使用模型采样率": "使用模型采样率",
|
30 |
+
"使用设备采样率": "使用设备采样率",
|
31 |
+
"保存名": "Nom de sauvegarde :",
|
32 |
+
"保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :",
|
33 |
+
"保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :",
|
34 |
+
"保存频率save_every_epoch": "Fréquence de sauvegarde (save_every_epoch) :",
|
35 |
+
"保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes sourdes et les bruits de respiration pour éviter les artefacts tels que le déchirement dans la musique électronique. Réglez à 0,5 pour désactiver. Diminuez la valeur pour renforcer la protection, mais cela peut réduire la précision de l'indexation :",
|
36 |
+
"修改": "Modifier",
|
37 |
+
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pris en charge pour les petits fichiers de modèle extraits du dossier 'weights')",
|
38 |
+
"停止音频转换": "Arrêter la conversion audio",
|
39 |
+
"全流程结束!": "Toutes les étapes ont été terminées !",
|
40 |
+
"刷新音色列表和索引路径": "Actualiser la liste des voix et le vers l'index.",
|
41 |
+
"加载模型": "Charger le modèle.",
|
42 |
+
"加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :",
|
43 |
+
"加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :",
|
44 |
+
"单次推理": "单次推理",
|
45 |
+
"卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.",
|
46 |
+
"变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :",
|
47 |
+
"后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :",
|
48 |
+
"否": "Non",
|
49 |
+
"启用相位声码器": "启用相位声码器",
|
50 |
+
"响应阈值": "Seuil de réponse",
|
51 |
+
"响度因子": "Facteur de volume sonore",
|
52 |
+
"处理数据": "Traitement des données",
|
53 |
+
"导出Onnx模型": "Exporter le modèle au format ONNX.",
|
54 |
+
"导出文件格式": "Format de fichier d'exportation",
|
55 |
+
"常见问题解答": "FAQ (Foire Aux Questions)",
|
56 |
+
"常规设置": "Paramètres généraux",
|
57 |
+
"开始音频转换": "Démarrer la conversion audio.",
|
58 |
+
"很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.",
|
59 |
+
"性能设置": "Paramètres de performance",
|
60 |
+
"总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :",
|
61 |
+
"批量推理": "批量推理",
|
62 |
+
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').",
|
63 |
+
"指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :",
|
64 |
+
"指定输出文件夹": "Spécifiez le dossier de sortie :",
|
65 |
+
"指定输出非主人声文件夹": "Spécifiez le dossier de sortie pour l'accompagnement :",
|
66 |
+
"推理时间(ms):": "Temps d'inférence (ms) :",
|
67 |
+
"推理音色": "Voix pour l'inférence",
|
68 |
+
"提取": "Extraire",
|
69 |
+
"提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour l'extraction de la hauteur et le traitement des données :",
|
70 |
+
"是": "Oui",
|
71 |
+
"是否仅保存最新的ckpt文件以节省硬盘空间": "Enregistrer uniquement le dernier fichier '.ckpt' pour économiser de l'espace disque :",
|
72 |
+
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Enregistrer un petit modèle final dans le dossier 'weights' à chaque point de sauvegarde :",
|
73 |
+
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache tous les ensembles d'entrainement dans la mémoire GPU. Mettre en cache de petits ensembles de données (moins de 10 minutes) peut accélérer l'entrainement, mais mettre en cache de grands ensembles de données consommera beaucoup de mémoire GPU et peut ne pas apporter beaucoup d'amélioration de vitesse :",
|
74 |
+
"显卡信息": "Informations sur la carte graphique (GPU)",
|
75 |
+
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs qui utilisent le logiciel et distribuent les sons exportés par le logiciel en sont entièrement responsables. <br>Si vous n'acceptez pas cette clause, vous ne pouvez pas utiliser ou faire référence à aucun code ni fichier contenu dans le package logiciel. Consultez le fichier <b>Agreement-LICENSE.txt</b> dans le répertoire racine pour plus de détails.",
|
76 |
+
"查看": "Voir",
|
77 |
+
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Afficher les informations sur le modèle (uniquement pour les petits fichiers de modèle extraits du dossier \"weights\")",
|
78 |
+
"检索特征占比": "Rapport de recherche de caractéristiques (contrôle l'intensité de l'accent, un rapport trop élevé provoque des artefacts) :",
|
79 |
+
"模型": "Modèle",
|
80 |
+
"模型推理": "Inférence du modèle",
|
81 |
+
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin d'accès au modèle du grand fichier dans le dossier \"logs\"). Cette fonction est utile si vous souhaitez arrêter l'entrainement à mi-chemin et extraire et enregistrer manuellement un petit fichier de modèle, ou si vous souhaitez tester un modèle intermédiaire :",
|
82 |
+
"模型是否带音高指导": "Indique si le modèle dispose d'un guidage en hauteur :",
|
83 |
+
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Indique si le modèle dispose d'un système de guidage de la hauteur (obligatoire pour le chant, facultatif pour la parole) :",
|
84 |
+
"模型是否带音高指导,1是0否": "Le modèle dispose-t-il d'un guide de hauteur (1 : oui, 0 : non) ?",
|
85 |
+
"模型版本型号": "Version de l'architecture du modèle :",
|
86 |
+
"模型融合, 可用于测试音色融合": "Fusion de modèles, peut être utilisée pour tester la fusion de timbres",
|
87 |
+
"模型路径": "Le chemin vers le modèle :",
|
88 |
+
"每张显卡的batch_size": "Taille du batch par GPU :",
|
89 |
+
"淡入淡出长度": "Longueur de la transition",
|
90 |
+
"版本": "Version",
|
91 |
+
"特征提取": "Extraction des caractéristiques",
|
92 |
+
"特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :",
|
93 |
+
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.",
|
94 |
+
"目标采样率": "Taux d'échantillonnage cible :",
|
95 |
+
"算法延迟(ms):": "Délais algorithmiques (ms):",
|
96 |
+
"自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :",
|
97 |
+
"融合": "Fusion",
|
98 |
+
"要改的模型信息": "Informations sur le modèle à modifier :",
|
99 |
+
"要置入的模型信息": "Informations sur le modèle à placer :",
|
100 |
+
"训练": "Entraîner",
|
101 |
+
"训练模型": "Entraîner le modèle",
|
102 |
+
"训练特征索引": "Entraîner l'index des caractéristiques",
|
103 |
+
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé. Vous pouvez consulter les rapports d'entraînement dans la console ou dans le fichier 'train.log' situé dans le dossier de l'expérience.",
|
104 |
+
"请指定说话人id": "Veuillez spécifier l'ID de l'orateur ou du chanteur :",
|
105 |
+
"请选择index文件": "Veuillez sélectionner le fichier d'index",
|
106 |
+
"请选择pth文件": "Veuillez sélectionner le fichier pth",
|
107 |
+
"请选择说话人id": "Sélectionner l'ID de l'orateur ou du chanteur :",
|
108 |
+
"转换": "Convertir",
|
109 |
+
"输入实验名": "Saisissez le nom de l'expérience :",
|
110 |
+
"输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter :",
|
111 |
+
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :",
|
112 |
+
"输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :",
|
113 |
+
"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :",
|
114 |
+
"输入监听": "Moniteur vocal d'entrée",
|
115 |
+
"输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :",
|
116 |
+
"输入设备": "Dispositif d'entrée",
|
117 |
+
"输入降噪": "Réduction du bruit d'entrée",
|
118 |
+
"输出信息": "Informations sur la sortie",
|
119 |
+
"输出变声": "Sortie voix convertie",
|
120 |
+
"输出设备": "Dispositif de sortie",
|
121 |
+
"输出降噪": "Réduction du bruit de sortie",
|
122 |
+
"输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)",
|
123 |
+
"选择.index文件": "Sélectionner le fichier .index",
|
124 |
+
"选择.pth文件": "Sélectionner le fichier .pth",
|
125 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.",
|
126 |
+
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.",
|
127 |
+
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.",
|
128 |
+
"采样率:": "采样率:",
|
129 |
+
"采样长度": "Longueur de l'échantillon",
|
130 |
+
"重载设备列表": "Recharger la liste des dispositifs",
|
131 |
+
"音调设置": "Réglages de la hauteur",
|
132 |
+
"音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)",
|
133 |
+
"音高算法": "algorithme de détection de la hauteur",
|
134 |
+
"额外推理时长": "Temps d'inférence supplémentaire"
|
135 |
+
}
|