Blane187 commited on
Commit
334c0d1
·
verified ·
1 Parent(s): b1f75cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -32
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from rvc_infer import download_online_model, infer_audio
3
  import os
4
  import re
5
  import random
@@ -13,13 +13,67 @@ print("downloading RVC models")
13
  os.system("python dowoad_param.py")
14
 
15
 
 
16
 
 
17
 
18
 
19
- def download_model(url, dir_name):
20
- output_models = download_online_model(url, dir_name)
21
- return output_models
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  def download_audio(url):
25
  ydl_opts = {
@@ -46,37 +100,39 @@ CSS = """
46
 
47
  with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
48
  gr.Markdown("# RVC INFER DEMOS ")
 
49
  with gr.Tab("Inferenece"):
50
  gr.Markdown("in progress")
51
- model_name = gr.Textbox(label="Model Name #", lines=1, value="")
52
- input_audio = gr.Audio(label="Input Audio #", type="filepath")
 
53
  with gr.Accordion("Settings", open=False):
54
- f0_change = gr.Slider(label="f0 change #", minimum=-12, maximum=12, step=1, value=0)
55
- f0_method = gr.Dropdown(label="f0 method #", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
56
- min_pitch = gr.Textbox(label="min pitch #", lines=1, value="-12")
57
- max_pitch = gr.Textbox(label="max pitch #", lines=1, value="12")
58
- crepe_hop_length = gr.Slider(label="crepe_hop_length #", minimum=0, maximum=256, step=1, value=128)
59
- index_rate = gr.Slider(label="index_rate #", minimum=0, maximum=1.0, step=0.01, value=0.75)
60
- filter_radius = gr.Slider(label="filter_radius #", minimum=0, maximum=10.0, step=0.01, value=3)
61
- rms_mix_rate = gr.Slider(label="rms_mix_rate #", minimum=0, maximum=1.0, step=0.01, value=0.25)
62
- protect = gr.Slider(label="protect #", minimum=0, maximum=1.0, step=0.01, value=0.33)
63
  with gr.Accordion("Advanced Settings", open=False):
64
- split_infer = gr.Checkbox(label="split_infer #", value=False)
65
- min_silence = gr.Slider(label="min_silence #", minimum=0, maximum=1000, step=1, value=500)
66
- silence_threshold = gr.Slider(label="silence_threshold #", minimum=-1000, maximum=1000, step=1, value=-50)
67
- seek_step = gr.Slider(label="seek_step #", minimum=0, maximum=100, step=1, value=0)
68
- keep_silence = gr.Slider(label="keep_silence #", minimum=-1000, maximum=1000, step=1, value=100)
69
- do_formant = gr.Checkbox(label="do_formant #", value=False)
70
- quefrency = gr.Slider(label="quefrency #", minimum=0, maximum=100, step=1, value=0)
71
- timbre = gr.Slider(label="timbre #", minimum=0, maximum=100, step=1, value=1)
72
- f0_autotune = gr.Checkbox(label="f0_autotune #", value=False)
73
- audio_format = gr.Dropdown(label="audio_format #", choices=["wav"], value="wav", visible=False)
74
- resample_sr = gr.Slider(label="resample_sr #", minimum=0, maximum=100, step=1, value=0)
75
- hubert_model_path = gr.Textbox(label="hubert_model_pathe #", lines=1, value="hubert_base.pt", visible=False)
76
- rmvpe_model_path = gr.Textbox(label="rmvpe_model_path #", lines=1, value="rmvpe.pt", visible=False)
77
- fcpe_model_path = gr.Textbox(label="fcpe_model_path #", lines=1, value="fcpe.pt", visible=False)
78
- submit_inference = gr.Button('Inference #', variant='primary')
79
- result_audio = gr.Audio("Output Audio #")
80
 
81
  with gr.Tab("Download Model"):
82
  gr.Markdown("## Download Model for infernece")
@@ -84,7 +140,7 @@ with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
84
  dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
85
  output = gr.Textbox(label="Output Models")
86
  download_button = gr.Button("Download Model")
87
- download_button.click(download_model, inputs=[url_input, dir_name_input], outputs=output)
88
 
89
  with gr.Tab(" Credits"):
90
  gr.Markdown(
@@ -93,6 +149,8 @@ with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
93
  """)
94
 
95
 
 
 
96
  gr.on(
97
  triggers=[submit_inference.click],
98
  fn=infer_audio,
 
1
  import gradio as gr
2
+ from rvc_infer import infer_audio
3
  import os
4
  import re
5
  import random
 
13
  os.system("python dowoad_param.py")
14
 
15
 
16
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
 
18
+ rvc_models_dir = os.path.join(BASE_DIR, 'models')
19
 
20
 
 
 
 
21
 
22
+ def update_models_list():
23
+ models_l = get_current_models(rvc_models_dir)
24
+ return gr.update(choices=models_l)
25
+
26
+
27
+
28
+ def extract_zip(extraction_folder, zip_name):
29
+ os.makedirs(extraction_folder)
30
+ with zipfile.ZipFile(zip_name, 'r') as zip_ref:
31
+ zip_ref.extractall(extraction_folder)
32
+ os.remove(zip_name)
33
+
34
+ index_filepath, model_filepath = None, None
35
+ for root, dirs, files in os.walk(extraction_folder):
36
+ for name in files:
37
+ if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
38
+ index_filepath = os.path.join(root, name)
39
+
40
+ if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
41
+ model_filepath = os.path.join(root, name)
42
+
43
+ if not model_filepath:
44
+ raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
45
+
46
+ # move model and index file to extraction folder
47
+ os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
48
+ if index_filepath:
49
+ os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
50
+
51
+ # remove any unnecessary nested folders
52
+ for filepath in os.listdir(extraction_folder):
53
+ if os.path.isdir(os.path.join(extraction_folder, filepath)):
54
+ shutil.rmtree(os.path.join(extraction_folder, filepath))
55
+
56
+
57
+
58
+ def download_online_model(url, dir_name, progress=gr.Progress()):
59
+ try:
60
+ progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
61
+ zip_name = url.split('/')[-1]
62
+ extraction_folder = os.path.join(rvc_models_dir, dir_name)
63
+ if os.path.exists(extraction_folder):
64
+ raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
65
+
66
+ if 'pixeldrain.com' in url:
67
+ url = f'https://pixeldrain.com/api/file/{zip_name}'
68
+
69
+ urllib.request.urlretrieve(url, zip_name)
70
+
71
+ progress(0.5, desc='[~] Extracting zip...')
72
+ extract_zip(extraction_folder, zip_name)
73
+ return f'[+] {dir_name} Model successfully downloaded!'
74
+
75
+ except Exception as e:
76
+ raise gr.Error(str(e))
77
 
78
  def download_audio(url):
79
  ydl_opts = {
 
100
 
101
  with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
102
  gr.Markdown("# RVC INFER DEMOS ")
103
+ gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
104
  with gr.Tab("Inferenece"):
105
  gr.Markdown("in progress")
106
+ model_name = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
107
+ ref_btn = gr.Button('Refresh Models', variant='primary')
108
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
109
  with gr.Accordion("Settings", open=False):
110
+ f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
111
+ f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
112
+ min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
113
+ max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
114
+ crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
115
+ index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
116
+ filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
117
+ rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
118
+ protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
119
  with gr.Accordion("Advanced Settings", open=False):
120
+ split_infer = gr.Checkbox(label="split_infer", value=False)
121
+ min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
122
+ silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
123
+ seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
124
+ keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
125
+ do_formant = gr.Checkbox(label="do_formant", value=False)
126
+ quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
127
+ timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
128
+ f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
129
+ audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
130
+ resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
131
+ hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
132
+ rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
133
+ fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
134
+ submit_inference = gr.Button('Inference', variant='primary')
135
+ result_audio = gr.Audio("Output Audio")
136
 
137
  with gr.Tab("Download Model"):
138
  gr.Markdown("## Download Model for infernece")
 
140
  dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
141
  output = gr.Textbox(label="Output Models")
142
  download_button = gr.Button("Download Model")
143
+ download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
144
 
145
  with gr.Tab(" Credits"):
146
  gr.Markdown(
 
149
  """)
150
 
151
 
152
+
153
+ ref_btn.click(update_models_list, None, outputs=rvc_model)
154
  gr.on(
155
  triggers=[submit_inference.click],
156
  fn=infer_audio,