gpt-omni commited on
Commit
0530d2a
·
verified ·
1 Parent(s): 198add7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -13,7 +13,6 @@ from inference import OmniInference
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  omni_client = OmniInference('./checkpoint', device)
16
- omni_client.warm_up()
17
 
18
 
19
  OUT_CHUNK = 4096
@@ -21,6 +20,10 @@ OUT_RATE = 24000
21
  OUT_CHANNELS = 1
22
 
23
 
 
 
 
 
24
  @spaces.GPU
25
  def process_audio(audio):
26
  filepath = audio
 
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  omni_client = OmniInference('./checkpoint', device)
 
16
 
17
 
18
  OUT_CHUNK = 4096
 
20
  OUT_CHANNELS = 1
21
 
22
 
23
+ @spaces.GPU
24
+ def warmup():
25
+ omni_client.warm_up()
26
+
27
  @spaces.GPU
28
  def process_audio(audio):
29
  filepath = audio