Spaces:
Running
Running
sanchit-gandhi
commited on
Commit
·
fee1424
1
Parent(s):
3bd5d2b
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ from transformers.pipelines.audio_utils import ffmpeg_read
|
|
13 |
|
14 |
title = "Whisper JAX: The Fastest Whisper API ⚡️"
|
15 |
|
16 |
-
description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **
|
17 |
|
18 |
API_URL = os.getenv("API_URL")
|
19 |
API_URL_FROM_FEATURES = os.getenv("API_URL_FROM_FEATURES")
|
@@ -155,9 +155,7 @@ if __name__ == "__main__":
|
|
155 |
demo = gr.Blocks()
|
156 |
|
157 |
with demo:
|
158 |
-
gr.TabbedInterface(
|
159 |
-
[audio_chunked, youtube], ["Transcribe Audio", "Transcribe YouTube"]
|
160 |
-
)
|
161 |
|
162 |
demo.queue()
|
163 |
demo.launch()
|
|
|
13 |
|
14 |
title = "Whisper JAX: The Fastest Whisper API ⚡️"
|
15 |
|
16 |
+
description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **100x** faster, making it the fastest Whisper API available."
|
17 |
|
18 |
API_URL = os.getenv("API_URL")
|
19 |
API_URL_FROM_FEATURES = os.getenv("API_URL_FROM_FEATURES")
|
|
|
155 |
demo = gr.Blocks()
|
156 |
|
157 |
with demo:
|
158 |
+
gr.TabbedInterface([audio_chunked, youtube], ["Transcribe Audio", "Transcribe YouTube"])
|
|
|
|
|
159 |
|
160 |
demo.queue()
|
161 |
demo.launch()
|