new4u commited on
Commit
3b293ec
1 Parent(s): 5290d3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -84
app.py CHANGED
@@ -1,17 +1,10 @@
1
  import torch
2
-
3
  import gradio as gr
4
- import yt_dlp as youtube_dl
5
  from transformers import pipeline
6
- from transformers.pipelines.audio_utils import ffmpeg_read
7
-
8
- import tempfile
9
- import os
10
 
11
  MODEL_NAME = "openai/whisper-large-v3"
12
- BATCH_SIZE = 8
13
- FILE_LIMIT_MB = 1000
14
- YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
15
 
16
  device = 0 if torch.cuda.is_available() else "cpu"
17
 
@@ -23,12 +16,32 @@ pipe = pipeline(
23
  )
24
 
25
 
26
- def transcribe(inputs, task):
27
- if inputs is None:
28
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
 
 
 
 
 
 
 
 
 
29
 
30
- text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
31
- return text
 
 
 
 
 
 
 
 
 
 
 
32
 
33
 
34
  def _return_yt_html_embed(yt_url):
@@ -39,54 +52,47 @@ def _return_yt_html_embed(yt_url):
39
  )
40
  return HTML_str
41
 
42
- def download_yt_audio(yt_url, filename):
43
- info_loader = youtube_dl.YoutubeDL()
44
-
45
- try:
46
- info = info_loader.extract_info(yt_url, download=False)
47
- except youtube_dl.utils.DownloadError as err:
48
- raise gr.Error(str(err))
49
-
50
- file_length = info["duration_string"]
51
- file_h_m_s = file_length.split(":")
52
- file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
53
-
54
- if len(file_h_m_s) == 1:
55
- file_h_m_s.insert(0, 0)
56
- if len(file_h_m_s) == 2:
57
- file_h_m_s.insert(0, 0)
58
- file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
59
-
60
- if file_length_s > YT_LENGTH_LIMIT_S:
61
- yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
62
- file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
63
- raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
64
-
65
- ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
66
-
67
- with youtube_dl.YoutubeDL(ydl_opts) as ydl:
68
- try:
69
- ydl.download([yt_url])
70
- except youtube_dl.utils.ExtractorError as err:
71
- raise gr.Error(str(err))
72
 
73
-
74
- def yt_transcribe(yt_url, task, max_filesize=75.0):
75
  html_embed_str = _return_yt_html_embed(yt_url)
 
 
76
 
77
- with tempfile.TemporaryDirectory() as tmpdirname:
78
- filepath = os.path.join(tmpdirname, "video.mp4")
79
- download_yt_audio(yt_url, filepath)
80
- with open(filepath, "rb") as f:
81
- inputs = f.read()
82
-
83
- inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
84
- inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
85
 
86
- text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
 
 
 
87
 
88
  return html_embed_str, text
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  demo = gr.Blocks()
92
 
@@ -94,34 +100,17 @@ mf_transcribe = gr.Interface(
94
  fn=transcribe,
95
  inputs=[
96
  gr.inputs.Audio(source="microphone", type="filepath", optional=True),
 
97
  gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
98
  ],
99
  outputs="text",
100
  layout="horizontal",
101
  theme="huggingface",
102
- title="Whisper Large V3: Transcribe Audio",
103
  description=(
104
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
105
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
106
- " of arbitrary length."
107
- ),
108
- allow_flagging="never",
109
- )
110
-
111
- file_transcribe = gr.Interface(
112
- fn=transcribe,
113
- inputs=[
114
- gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Audio file"),
115
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
116
- ],
117
- outputs="text",
118
- layout="horizontal",
119
- theme="huggingface",
120
- title="Whisper Large V3: Transcribe Audio",
121
- description=(
122
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
123
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
124
- " of arbitrary length."
125
  ),
126
  allow_flagging="never",
127
  )
@@ -130,22 +119,32 @@ yt_transcribe = gr.Interface(
130
  fn=yt_transcribe,
131
  inputs=[
132
  gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
133
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe")
134
  ],
135
  outputs=["html", "text"],
136
  layout="horizontal",
137
  theme="huggingface",
138
- title="Whisper Large V3: Transcribe YouTube",
139
  description=(
140
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
141
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
142
- " arbitrary length."
143
  ),
144
  allow_flagging="never",
145
  )
146
 
 
 
 
 
 
 
 
 
147
  with demo:
148
- gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
 
 
 
149
 
150
  demo.launch(enable_queue=True)
151
-
 
1
  import torch
2
+ # from PIL import Image
3
  import gradio as gr
4
+ import pytube as pt
5
  from transformers import pipeline
 
 
 
 
6
 
7
  MODEL_NAME = "openai/whisper-large-v3"
 
 
 
8
 
9
  device = 0 if torch.cuda.is_available() else "cpu"
10
 
 
16
  )
17
 
18
 
19
+ all_special_ids = pipe.tokenizer.all_special_ids
20
+ transcribe_token_id = all_special_ids[-5]
21
+ translate_token_id = all_special_ids[-6]
22
+
23
+
24
+ def transcribe(microphone, file_upload, task):
25
+ warn_output = ""
26
+ if (microphone is not None) and (file_upload is not None):
27
+ warn_output = (
28
+ "警告:您已经上传了一个音频文件并使用了麦克录制。"
29
+ "录制文件将被使用上传的音频将被丢弃。\n"
30
+ )
31
 
32
+ elif (microphone is None) and (file_upload is None):
33
+ return "错误: 您必须使用麦克风录制或上传音频文件"
34
+
35
+ file = microphone if microphone is not None else file_upload
36
+
37
+ pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
38
+
39
+ # text = pipe(file, return_timestamps=True)["text"]
40
+ text = pipe(file, return_timestamps=True)
41
+ #trans to SRT
42
+ text= convert_to_srt(text)
43
+
44
+ return warn_output + text
45
 
46
 
47
  def _return_yt_html_embed(yt_url):
 
52
  )
53
  return HTML_str
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
+ def yt_transcribe(yt_url, task):
57
+ yt = pt.YouTube(yt_url)
58
  html_embed_str = _return_yt_html_embed(yt_url)
59
+ stream = yt.streams.filter(only_audio=True)[0]
60
+ stream.download(filename="audio.mp3")
61
 
62
+ pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
 
 
 
 
 
 
 
63
 
64
+ text = pipe("audio.mp3",return_timestamps=True)
65
+ # text = pipe("audio.mp3",return_timestamps=True)["text"]
66
+ #trans to SRT
67
+ text= convert_to_srt(text)
68
 
69
  return html_embed_str, text
70
 
71
+ # Assuming srt format is a sequence of subtitles with index, time range and text
72
+ def convert_to_srt(input):
73
+ output = ""
74
+ index = 1
75
+ for chunk in input["chunks"]:
76
+ start, end = chunk["timestamp"]
77
+ text = chunk["text"]
78
+ if end is None:
79
+ end = "None"
80
+ # Convert seconds to hours:minutes:seconds,milliseconds format
81
+ start = format_time(start)
82
+ end = format_time(end)
83
+ output += f"{index}\n{start} --> {end}\n{text}\n\n"
84
+ index += 1
85
+ return output
86
+
87
+ # Helper function to format time
88
+ def format_time(seconds):
89
+ if seconds == "None":
90
+ return seconds
91
+ hours = int(seconds // 3600)
92
+ minutes = int((seconds % 3600) // 60)
93
+ seconds = int(seconds % 60)
94
+ milliseconds = int((seconds % 1) * 1000)
95
+ return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"
96
 
97
  demo = gr.Blocks()
98
 
 
100
  fn=transcribe,
101
  inputs=[
102
  gr.inputs.Audio(source="microphone", type="filepath", optional=True),
103
+ gr.inputs.Audio(source="upload", type="filepath", optional=True),
104
  gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
105
  ],
106
  outputs="text",
107
  layout="horizontal",
108
  theme="huggingface",
109
+ title="Audio-to-Text-SRT 自动生成字幕",
110
  description=(
111
+ "直接在网页录音或上传音频文件,加入Youtube连接,轻松转换为文字和字幕格式! 本演示采用"
112
+ f" 模型 [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) 🤗 Transformers 转换任意长度的"
113
+ "音视频文件!使用GPU转换效率会大幅提高,大约每小时 $0.6 约相当于人民币 5 元。如果您有较长内容,需要更快的转换速度,请私信作者微信 1259388,并备注“语音转文字”。"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  ),
115
  allow_flagging="never",
116
  )
 
119
  fn=yt_transcribe,
120
  inputs=[
121
  gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
122
+ gr.inputs.Radio(["转译", "翻译"], label="Task", default="transcribe")
123
  ],
124
  outputs=["html", "text"],
125
  layout="horizontal",
126
  theme="huggingface",
127
+ title="Audio-to-Text-SRT 自动生成字幕",
128
  description=(
129
+ "直接在网页录音或上传音频文件,加入Youtube连接,轻松转换为文字和字幕格式! 本演示采用"
130
+ f" 模型 [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) 🤗 Transformers 转换任意长度的"
131
+ "音视频文件!使用GPU转换效率会大幅提高,大约每小时 $0.6 约相当于人民币 5 元。如果您有较长内容,需要更快的转换速度,请私信作者微信 1259388,并备注“语音转文字”。"
132
  ),
133
  allow_flagging="never",
134
  )
135
 
136
+ # # Load the images
137
+ # image1 = Image("wechatqrcode.jpg")
138
+ # image2 = Image("paypalqrcode.png")
139
+
140
+ # # Define a function that returns the images and captions
141
+ # def display_images():
142
+ # return image1, "WeChat Pay", image2, "PayPal"
143
+
144
  with demo:
145
+ gr.TabbedInterface([mf_transcribe, yt_transcribe], ["转译音频成文字", "YouTube转字幕"])
146
+
147
+ # Create a gradio interface with no inputs and four outputs
148
+ # gr.Interface(display_images, [], [gr.outputs.Image(), gr.outputs.Textbox(), gr.outputs.Image(), gr.outputs.Textbox()], layout="horizontal").launch()
149
 
150
  demo.launch(enable_queue=True)