Spaces:
Sleeping
Sleeping
use gpu when possible
Browse files- app.py +2 -1
- gaussian_diffusion.py +1 -1
- requirements.txt +4 -4
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
from synthesize import synthesize
|
5 |
|
6 |
-
|
7 |
def text_to_speech(text, speaker_id, cfg_scale, num_sampling_steps):
|
8 |
audio, sample_rate = synthesize(
|
9 |
text=text,
|
|
|
1 |
+
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
from synthesize import synthesize
|
6 |
|
7 |
+
@spaces.GPU
|
8 |
def text_to_speech(text, speaker_id, cfg_scale, num_sampling_steps):
|
9 |
audio, sample_rate = synthesize(
|
10 |
text=text,
|
gaussian_diffusion.py
CHANGED
@@ -202,7 +202,7 @@ class GaussianDiffusion:
|
|
202 |
)
|
203 |
|
204 |
# convert all numpy arrays to torch tensors
|
205 |
-
DEVICE = th.device("cpu")
|
206 |
self.betas = th.from_numpy(self.betas).to(DEVICE)
|
207 |
self.alphas_cumprod = th.from_numpy(self.alphas_cumprod).to(DEVICE)
|
208 |
self.alphas_cumprod_prev = th.from_numpy(self.alphas_cumprod_prev).to(DEVICE)
|
|
|
202 |
)
|
203 |
|
204 |
# convert all numpy arrays to torch tensors
|
205 |
+
DEVICE = th.device("cuda") if th.cuda.is_available() else th.device("cpu")
|
206 |
self.betas = th.from_numpy(self.betas).to(DEVICE)
|
207 |
self.alphas_cumprod = th.from_numpy(self.alphas_cumprod).to(DEVICE)
|
208 |
self.alphas_cumprod_prev = th.from_numpy(self.alphas_cumprod_prev).to(DEVICE)
|
requirements.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# This file was autogenerated by uv via the following command:
|
2 |
-
# uv pip compile pyproject.toml -o requirements.txt --python-platform x86_64-unknown-linux-gnu --emit-index-url --extra-index-url https://download.pytorch.org/whl/
|
3 |
--index-url https://pypi.org/simple
|
4 |
-
--extra-index-url https://download.pytorch.org/whl/
|
5 |
|
6 |
aiofiles==23.2.1
|
7 |
# via gradio
|
@@ -174,13 +174,13 @@ sympy==1.13.1
|
|
174 |
# via torch
|
175 |
tomlkit==0.13.2
|
176 |
# via gradio
|
177 |
-
torch==2.5.1
|
178 |
# via
|
179 |
# diffusion-speech-360h (pyproject.toml)
|
180 |
# encodec
|
181 |
# torchaudio
|
182 |
# vocos
|
183 |
-
torchaudio==2.5.1
|
184 |
# via
|
185 |
# encodec
|
186 |
# vocos
|
|
|
1 |
# This file was autogenerated by uv via the following command:
|
2 |
+
# uv pip compile pyproject.toml -o requirements.txt --python-platform x86_64-unknown-linux-gnu --emit-index-url --extra-index-url https://download.pytorch.org/whl/cu118
|
3 |
--index-url https://pypi.org/simple
|
4 |
+
--extra-index-url https://download.pytorch.org/whl/cu118
|
5 |
|
6 |
aiofiles==23.2.1
|
7 |
# via gradio
|
|
|
174 |
# via torch
|
175 |
tomlkit==0.13.2
|
176 |
# via gradio
|
177 |
+
torch==2.5.1
|
178 |
# via
|
179 |
# diffusion-speech-360h (pyproject.toml)
|
180 |
# encodec
|
181 |
# torchaudio
|
182 |
# vocos
|
183 |
+
torchaudio==2.5.1
|
184 |
# via
|
185 |
# encodec
|
186 |
# vocos
|