Spaces:
Runtime error
Runtime error
SudeendraMG
commited on
Commit
·
b212243
1
Parent(s):
d42b84e
Update app.py
Browse files
app.py
CHANGED
@@ -13,10 +13,11 @@ transcriber_gujarati = pipeline("automatic-speech-recognition", model="ai4bharat
|
|
13 |
transcriber_telugu = pipeline("automatic-speech-recognition", model="krishnateja/wav2vec2-telugu_150")
|
14 |
# transcriber_sinhala = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_sinhala")
|
15 |
# transcriber_tamil = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_tamil")
|
|
|
16 |
# transcriber_nepali = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_nepali")
|
17 |
# transcriber_marathi = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_marathi")
|
18 |
|
19 |
-
languages = ["hindi","bengali","odia","gujarati","telugu"]
|
20 |
|
21 |
def resample_to_16k(audio, orig_sr):
|
22 |
y_resampled = librosa.resample(y=audio, orig_sr=orig_sr, target_sr=16000)
|
@@ -38,7 +39,7 @@ def transcribe(audio,lang="hindi"):
|
|
38 |
|
39 |
demo = gr.Interface(
|
40 |
transcribe,
|
41 |
-
inputs=["microphone",gr.Radio(["hindi","bengali","odia","gujarati","telugu"],value="hindi")],
|
42 |
# inputs=["microphone",gr.Radio(["hindi","bengali","odia","gujarati","telugu","sinhala","tamil","nepali","marathi"],value="hindi")],
|
43 |
outputs=["text","text"],
|
44 |
examples=[["./Samples/Hindi_1.mp3","hindi"],["./Samples/Hindi_2.mp3","hindi"],["./Samples/Hindi_3.mp3","hindi"],["./Samples/Hindi_4.mp3","hindi"],["./Samples/Hindi_5.mp3","hindi"],["./Samples/Tamil_2.mp3","hindi"],["./Samples/climate ex short.wav","hindi"],["./Samples/Gujarati_1.wav","gujarati"],["./Samples/Gujarati_2.wav","gujarati"],["./Samples/Bengali_1.wav","bengali"],["./Samples/Bengali_2.wav","bengali"]])
|
|
|
13 |
transcriber_telugu = pipeline("automatic-speech-recognition", model="krishnateja/wav2vec2-telugu_150")
|
14 |
# transcriber_sinhala = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_sinhala")
|
15 |
# transcriber_tamil = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_tamil")
|
16 |
+
transcriber_tamil = pipeline("automatic-speech-recognition", model="Amrrs/wav2vec2-large-xlsr-53-tamil")
|
17 |
# transcriber_nepali = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_nepali")
|
18 |
# transcriber_marathi = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec_v1_marathi")
|
19 |
|
20 |
+
languages = ["hindi","bengali","odia","gujarati","telugu","tamil"]
|
21 |
|
22 |
def resample_to_16k(audio, orig_sr):
|
23 |
y_resampled = librosa.resample(y=audio, orig_sr=orig_sr, target_sr=16000)
|
|
|
39 |
|
40 |
demo = gr.Interface(
|
41 |
transcribe,
|
42 |
+
inputs=["microphone",gr.Radio(["hindi","bengali","odia","gujarati","telugu","tamil"],value="hindi")],
|
43 |
# inputs=["microphone",gr.Radio(["hindi","bengali","odia","gujarati","telugu","sinhala","tamil","nepali","marathi"],value="hindi")],
|
44 |
outputs=["text","text"],
|
45 |
examples=[["./Samples/Hindi_1.mp3","hindi"],["./Samples/Hindi_2.mp3","hindi"],["./Samples/Hindi_3.mp3","hindi"],["./Samples/Hindi_4.mp3","hindi"],["./Samples/Hindi_5.mp3","hindi"],["./Samples/Tamil_2.mp3","hindi"],["./Samples/climate ex short.wav","hindi"],["./Samples/Gujarati_1.wav","gujarati"],["./Samples/Gujarati_2.wav","gujarati"],["./Samples/Bengali_1.wav","bengali"],["./Samples/Bengali_2.wav","bengali"]])
|