new4u's picture
Update app.py
3b293ec verified
raw
history blame
5.4 kB
import torch
# from PIL import Image
import gradio as gr
import pytube as pt
from transformers import pipeline
MODEL_NAME = "openai/whisper-large-v3"
device = 0 if torch.cuda.is_available() else "cpu"
pipe = pipeline(
task="automatic-speech-recognition",
model=MODEL_NAME,
chunk_length_s=30,
device=device,
)
all_special_ids = pipe.tokenizer.all_special_ids
transcribe_token_id = all_special_ids[-5]
translate_token_id = all_special_ids[-6]
def transcribe(microphone, file_upload, task):
warn_output = ""
if (microphone is not None) and (file_upload is not None):
warn_output = (
"警告:您已经上传了一个音频文件并使用了麦克录制。"
"录制文件将被使用上传的音频将被丢弃。\n"
)
elif (microphone is None) and (file_upload is None):
return "错误: 您必须使用麦克风录制或上传音频文件"
file = microphone if microphone is not None else file_upload
pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
# text = pipe(file, return_timestamps=True)["text"]
text = pipe(file, return_timestamps=True)
#trans to SRT
text= convert_to_srt(text)
return warn_output + text
def _return_yt_html_embed(yt_url):
video_id = yt_url.split("?v=")[-1]
HTML_str = (
f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
" </center>"
)
return HTML_str
def yt_transcribe(yt_url, task):
yt = pt.YouTube(yt_url)
html_embed_str = _return_yt_html_embed(yt_url)
stream = yt.streams.filter(only_audio=True)[0]
stream.download(filename="audio.mp3")
pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
text = pipe("audio.mp3",return_timestamps=True)
# text = pipe("audio.mp3",return_timestamps=True)["text"]
#trans to SRT
text= convert_to_srt(text)
return html_embed_str, text
# Assuming srt format is a sequence of subtitles with index, time range and text
def convert_to_srt(input):
output = ""
index = 1
for chunk in input["chunks"]:
start, end = chunk["timestamp"]
text = chunk["text"]
if end is None:
end = "None"
# Convert seconds to hours:minutes:seconds,milliseconds format
start = format_time(start)
end = format_time(end)
output += f"{index}\n{start} --> {end}\n{text}\n\n"
index += 1
return output
# Helper function to format time
def format_time(seconds):
if seconds == "None":
return seconds
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = int(seconds % 60)
milliseconds = int((seconds % 1) * 1000)
return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"
demo = gr.Blocks()
mf_transcribe = gr.Interface(
fn=transcribe,
inputs=[
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
gr.inputs.Audio(source="upload", type="filepath", optional=True),
gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
],
outputs="text",
layout="horizontal",
theme="huggingface",
title="Audio-to-Text-SRT 自动生成字幕",
description=(
"直接在网页录音或上传音频文件,加入Youtube连接,轻松转换为文字和字幕格式! 本演示采用"
f" 模型 [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) 和 🤗 Transformers 转换任意长度的"
"音视频文件!使用GPU转换效率会大幅提高,大约每小时 $0.6 约相当于人民币 5 元。如果您有较长内容,需要更快的转换速度,请私信作者微信 1259388,并备注“语音转文字”。"
),
allow_flagging="never",
)
yt_transcribe = gr.Interface(
fn=yt_transcribe,
inputs=[
gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
gr.inputs.Radio(["转译", "翻译"], label="Task", default="transcribe")
],
outputs=["html", "text"],
layout="horizontal",
theme="huggingface",
title="Audio-to-Text-SRT 自动生成字幕",
description=(
"直接在网页录音或上传音频文件,加入Youtube连接,轻松转换为文字和字幕格式! 本演示采用"
f" 模型 [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) 和 🤗 Transformers 转换任意长度的"
"音视频文件!使用GPU转换效率会大幅提高,大约每小时 $0.6 约相当于人民币 5 元。如果您有较长内容,需要更快的转换速度,请私信作者微信 1259388,并备注“语音转文字”。"
),
allow_flagging="never",
)
# # Load the images
# image1 = Image("wechatqrcode.jpg")
# image2 = Image("paypalqrcode.png")
# # Define a function that returns the images and captions
# def display_images():
# return image1, "WeChat Pay", image2, "PayPal"
with demo:
gr.TabbedInterface([mf_transcribe, yt_transcribe], ["转译音频成文字", "YouTube转字幕"])
# Create a gradio interface with no inputs and four outputs
# gr.Interface(display_images, [], [gr.outputs.Image(), gr.outputs.Textbox(), gr.outputs.Image(), gr.outputs.Textbox()], layout="horizontal").launch()
demo.launch(enable_queue=True)