File size: 5,510 Bytes
e545913
 
 
 
 
 
 
 
 
 
 
f28cc52
 
 
 
 
 
 
 
 
 
 
e545913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import gradio as gr
from rvc_infer import download_online_model, infer_audio
import os
import re
import random
from scipy.io.wavfile import write
from scipy.io.wavfile import read
import numpy as np
import yt_dlp
import subprocess

print("downloading RVC models")
def dowoad_param():
    os.system("wget  https://huggingface.co/datasets/ylzz1997/rmvpe_pretrain_model/resolve/main/fcpe.pt -O fcpe.pt")
    os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/hubert_base.pt -O hubert_base.pt")
    os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/rmvpe.pt -O rmvpe.pt")
    
dowoad_param()




def download_model(url, dir_name):
    output_models = download_online_model(url, dir_name)
    return output_models


def download_audio(url):
    ydl_opts = {
        'format': 'bestaudio/best',
        'outtmpl': 'ytdl/%(title)s.%(ext)s',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'wav',
            'preferredquality': '192',
        }],
    }

    with yt_dlp.YoutubeDL(ydl_opts) as ydl:
        info_dict = ydl.extract_info(url, download=True)
        file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
        sample_rate, audio_data = read(file_path)
        audio_array = np.asarray(audio_data, dtype=np.int16)

        return sample_rate, audio_array


CSS = """
"""

with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
    with gr.Tab("Inferenece"):
        gr.Markdown("in progress")
        model_name = gr.Textbox(label="Model Name #", lines=1, value="")
        input_audio = gr.Audio(label="Input Audio #", type="filepath")
        with gr.Accordion("Settings", open=False):
            f0_change = gr.Slider(label="f0 change #", minimum=0, maximum=10, step=1, value=0)
            f0_method = gr.Dropdown(label="f0 method #", choices=["rmvpe+"], value="rmvpe+")
            min_pitch = gr.Textbox(label="min pitch #", lines=1, value="50")
            max_pitch = gr.Textbox(label="max pitch #", lines=1, value="1100")
            crepe_hop_length = gr.Slider(label="crepe_hop_length #", minimum=0, maximum=256, step=1, value=128)
            index_rate = gr.Slider(label="index_rate #", minimum=0, maximum=1.0, step=0.01, value=0.75)
            filter_radius = gr.Slider(label="filter_radius #", minimum=0, maximum=10.0, step=0.01, value=3)
            rms_mix_rate = gr.Slider(label="rms_mix_rate #", minimum=0, maximum=1.0, step=0.01, value=0.25)
            protect = gr.Slider(label="protect #", minimum=0, maximum=1.0, step=0.01, value=0.33)
        with gr.Accordion("Advanced Settings", open=False):
            split_infer = gr.Checkbox(label="split_infer #", value=False)
            min_silence = gr.Slider(label="min_silence #", minimum=0, maximum=1000, step=1, value=500)
            silence_threshold = gr.Slider(label="silence_threshold #", minimum=-1000, maximum=1000, step=1, value=-50)
            seek_step = gr.Slider(label="seek_step #", minimum=0, maximum=100, step=1, value=0)
            keep_silence = gr.Slider(label="keep_silence #", minimum=-1000, maximum=1000, step=1, value=100)
            do_formant = gr.Checkbox(label="do_formant #", value=False)
            quefrency = gr.Slider(label="quefrency #", minimum=0, maximum=100, step=1, value=0)
            timbre = gr.Slider(label="timbre #", minimum=0, maximum=100, step=1, value=1)
            f0_autotune = gr.Checkbox(label="f0_autotune #", value=False)
            audio_format = gr.Dropdown(label="audio_format #", choices=["wav"], value="wav")
            resample_sr = gr.Slider(label="resample_sr #", minimum=0, maximum=100, step=1, value=0)
            hubert_model_path = gr.Textbox(label="hubert_model_pathe #", lines=1, value="hubert_base.pt")
            rmvpe_model_path = gr.Textbox(label="rmvpe_model_path #", lines=1, value="rmvpe.pt")
            fcpe_model_path = gr.Textbox(label="fcpe_model_path #", lines=1, value="fcpe.pt")
            submit_inference = gr.Button('Inference #', variant='primary')
        result_audio = gr.Audio("Output Audio #", type="filepath")

    with gr.Tab("Download Model"):
        gr.Markdown("## Download Model for infernece")
        url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
        dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
        output = gr.Textbox(label="Output Models")
        download_button = gr.Button("Download Model")
        download_button.click(download_model, inputs=[url_input, dir_name_input], outputs=output)

        with gr.Tab(" Credits"):
            gr.Markdown(
                """
                this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
                """)


    gr.on(
        triggers=[submit_inference.click],
        fn=infer_audio,
        inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
                filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
                keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
                hubert_model_path, rmvpe_model_path, fcpe_model_path],
        outputs=[result_audio],
        queue=True,
        show_api=True,
        show_progress="full",
    )

demo.queue()
demo.launch(debug=True,show_api=False)