Spaces:
Runtime error
Runtime error
File size: 1,186 Bytes
437c3df 15f1150 437c3df 15f1150 437c3df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
import numpy as np
from transformers import pipeline
import torch
device = "cuda:0" if torch.cuda.is_available() else "cpu"
transcriber = pipeline("automatic-speech-recognition", model="mahimairaja/whisper-base-tamil", \
chunk_length_s=15, device=device)
transcriber.model.config.forced_decoder_ids = transcriber.tokenizer.get_decoder_prompt_ids(language="ta", task="transcribe")
def transcribe(audio):
return transcriber(audio)["text"]
TITLE = "ASR for ALL - Democratizing Tamil"
demo = gr.Blocks()
mic_transcribe = gr.Interface(
fn=transcribe,
inputs=gr.Audio(sources="microphone", type="filepath"),
outputs="text",
title=TITLE,
)
file_transcribe = gr.Interface(
fn=transcribe,
inputs=gr.Audio(sources="upload", type="filepath"),
outputs="text",
examples=[
"assets/tamil-audio-01.mp3",
"assets/tamil-audio-02.mp3",
"assets/tamil-audio-03.mp3",
"assets/tamil-audio-04.mp3",
],
title=TITLE,
)
with demo:
gr.TabbedInterface(
[mic_transcribe, file_transcribe],
["Real Time Transcription", "Audio File", ]
)
demo.launch(share=True) |