Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -57,17 +57,17 @@ commonvoice_speakers = [s for s in speakers_list if len(s) > 20] #
|
|
57 |
|
58 |
DEFAULT_SPEAKER_ID = os.environ.get("DEFAULT_SPEAKER_ID", default="pau")
|
59 |
DEFAULT_CHECKPOINT = os.environ.get("DEFAULT_CHECKPOINT", default=model_files[0])
|
60 |
-
model_file = model_files[0] # change this!!
|
61 |
|
62 |
-
model_path = os.path.join(os.getcwd(), model_file)
|
63 |
-
config_path = "config.json"
|
64 |
|
65 |
vocoder_path = None
|
66 |
vocoder_config_path = None
|
67 |
|
68 |
-
synthesizer = Synthesizer(
|
69 |
-
|
70 |
-
)
|
71 |
|
72 |
|
73 |
def get_phonetic_transcription(text: str):
|
@@ -85,7 +85,16 @@ def get_phonetic_transcription(text: str):
|
|
85 |
return None
|
86 |
|
87 |
|
88 |
-
def tts_inference(text: str, speaker_idx: str = None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
# synthesize
|
90 |
if synthesizer is None:
|
91 |
raise NameError("model not found")
|
@@ -113,11 +122,11 @@ description = """
|
|
113 |
"""
|
114 |
|
115 |
|
116 |
-
def submit_input(input_, speaker_id):
|
117 |
output_audio = None
|
118 |
output_phonetic = None
|
119 |
if input_ is not None and len(input_) < MAX_INPUT_TEXT_LEN:
|
120 |
-
output_audio = tts_inference(input_, speaker_id)
|
121 |
output_phonetic = get_phonetic_transcription(input_)
|
122 |
else:
|
123 |
gr.Warning(f"Your text exceeds the {MAX_INPUT_TEXT_LEN}-character limit.")
|
@@ -196,7 +205,7 @@ with gr.Blocks(**AinaGradioTheme().get_kwargs()) as app:
|
|
196 |
input_.change(fn=change_interactive, inputs=[input_], outputs=button)
|
197 |
|
198 |
# clear_btn.click(fn=clean, inputs=[], outputs=[input_, output_audio, output_phonetic], queue=False)
|
199 |
-
submit_btn.click(fn=submit_input, inputs=[input_, speaker_id], outputs=[output_audio, output_phonetic])
|
200 |
|
201 |
app.queue(concurrency_count=1, api_open=False)
|
202 |
app.launch(show_api=False, server_name="0.0.0.0", server_port=7860)
|
|
|
57 |
|
58 |
DEFAULT_SPEAKER_ID = os.environ.get("DEFAULT_SPEAKER_ID", default="pau")
|
59 |
DEFAULT_CHECKPOINT = os.environ.get("DEFAULT_CHECKPOINT", default=model_files[0])
|
60 |
+
# model_file = model_files[0] # change this!!
|
61 |
|
62 |
+
# model_path = os.path.join(os.getcwd(), model_file)
|
63 |
+
# config_path = os.path.join(os.getcwd(), "config.json")
|
64 |
|
65 |
vocoder_path = None
|
66 |
vocoder_config_path = None
|
67 |
|
68 |
+
# synthesizer = Synthesizer(
|
69 |
+
# model_path, config_path, speakers_path, None, vocoder_path, vocoder_config_path,
|
70 |
+
# )
|
71 |
|
72 |
|
73 |
def get_phonetic_transcription(text: str):
|
|
|
85 |
return None
|
86 |
|
87 |
|
88 |
+
def tts_inference(text: str, speaker_idx: str = None, model_file: str=None):
|
89 |
+
|
90 |
+
model_path = os.path.join(os.getcwd(), model_file)
|
91 |
+
speakers_file_path = "speakers.pth"
|
92 |
+
config_path = "config.json"
|
93 |
+
vocoder_path = None
|
94 |
+
vocoder_config_path = None
|
95 |
+
|
96 |
+
synthesizer = Synthesizer(model_path, config_path, speakers_path, None,
|
97 |
+
vocoder_path, vocoder_config_path)
|
98 |
# synthesize
|
99 |
if synthesizer is None:
|
100 |
raise NameError("model not found")
|
|
|
122 |
"""
|
123 |
|
124 |
|
125 |
+
def submit_input(input_, speaker_id, model_chkpt):
|
126 |
output_audio = None
|
127 |
output_phonetic = None
|
128 |
if input_ is not None and len(input_) < MAX_INPUT_TEXT_LEN:
|
129 |
+
output_audio = tts_inference(input_, speaker_id, model_chkpt)
|
130 |
output_phonetic = get_phonetic_transcription(input_)
|
131 |
else:
|
132 |
gr.Warning(f"Your text exceeds the {MAX_INPUT_TEXT_LEN}-character limit.")
|
|
|
205 |
input_.change(fn=change_interactive, inputs=[input_], outputs=button)
|
206 |
|
207 |
# clear_btn.click(fn=clean, inputs=[], outputs=[input_, output_audio, output_phonetic], queue=False)
|
208 |
+
submit_btn.click(fn=submit_input, inputs=[input_, speaker_id, model_chkpt], outputs=[output_audio, output_phonetic])
|
209 |
|
210 |
app.queue(concurrency_count=1, api_open=False)
|
211 |
app.launch(show_api=False, server_name="0.0.0.0", server_port=7860)
|