Spaces:
Runtime error
Runtime error
thealphamerc
commited on
Commit
•
2d6bfef
1
Parent(s):
fdad218
Display srt format
Browse files- .vscode/settings.json +3 -0
- app.py +118 -12
- requirements.txt +2 -1
.vscode/settings.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cSpell.words": ["pysrt"]
|
3 |
+
}
|
app.py
CHANGED
@@ -1,10 +1,13 @@
|
|
1 |
import os
|
|
|
2 |
os.system("pip install git+https://github.com/openai/whisper.git")
|
3 |
-
from pytube import YouTube
|
4 |
import gradio as gr
|
5 |
from subprocess import call
|
6 |
import whisper
|
7 |
-
import
|
|
|
|
|
|
|
8 |
# from transformers.pipelines.audio_utils import ffmpeg_read
|
9 |
|
10 |
|
@@ -40,10 +43,17 @@ def inference(text):
|
|
40 |
return 'tts_output.wav'
|
41 |
|
42 |
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
inputs = gr.components.Audio(type="filepath", label="Add audio file")
|
46 |
-
outputs = gr.components.Textbox()
|
47 |
title = "Transcribe multi-lingual audio clips"
|
48 |
description = "An example of using TTS to generate speech from text."
|
49 |
article = ""
|
@@ -68,12 +78,60 @@ def transcribe(inputs):
|
|
68 |
# inputs = f.read()
|
69 |
|
70 |
# load audio and pad/trim it to fit 30 seconds
|
71 |
-
result =
|
72 |
-
|
73 |
# ---------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
|
79 |
# Transcribe youtube video
|
@@ -85,13 +143,60 @@ def youtube_transcript(url):
|
|
85 |
source = yt.streams.filter(progressive=True, file_extension='mp4').order_by(
|
86 |
'resolution').desc().first().download('output/youtube')
|
87 |
|
88 |
-
transcript =
|
89 |
return transcript["text"]
|
90 |
except Exception as e:
|
91 |
print('Error: ', e)
|
92 |
return 'Error: ' + str(e)
|
93 |
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
audio_chunked = gr.Interface(
|
96 |
fn=transcribe,
|
97 |
inputs=inputs,
|
@@ -132,7 +237,7 @@ youtube_chunked = gr.Interface(
|
|
132 |
description=description,
|
133 |
article=article,
|
134 |
examples=[
|
135 |
-
[
|
136 |
["https://www.youtube.com/watch?v=JzPfMbG1vrE&ab_channel=ExplainerVideosByLauren",],
|
137 |
["https://www.youtube.com/watch?v=S68vvV0kod8&ab_channel=Pearl-CohnTelevision"]
|
138 |
|
@@ -140,10 +245,11 @@ youtube_chunked = gr.Interface(
|
|
140 |
|
141 |
)
|
142 |
|
|
|
143 |
demo = gr.Blocks()
|
144 |
with demo:
|
145 |
-
gr.TabbedInterface([
|
146 |
-
"
|
147 |
demo.queue(concurrency_count=1, max_size=5)
|
148 |
demo.launch(show_api=False)
|
149 |
|
|
|
1 |
import os
|
2 |
+
import logging
|
3 |
os.system("pip install git+https://github.com/openai/whisper.git")
|
|
|
4 |
import gradio as gr
|
5 |
from subprocess import call
|
6 |
import whisper
|
7 |
+
from datetime import timedelta
|
8 |
+
from pytube import YouTube
|
9 |
+
import pandas as pd
|
10 |
+
import pysrt
|
11 |
# from transformers.pipelines.audio_utils import ffmpeg_read
|
12 |
|
13 |
|
|
|
43 |
return 'tts_output.wav'
|
44 |
|
45 |
|
46 |
+
baseModel = whisper.load_model("base")
|
47 |
+
smallModel = whisper.load_model("small")
|
48 |
+
|
49 |
+
|
50 |
+
df_init = pd.DataFrame(columns=['start', 'end', 'text'])
|
51 |
+
transcription_df = gr.DataFrame(value=df_init, label="Transcription dataframe", row_count=(
|
52 |
+
0, "dynamic"), max_rows=30, wrap=True, overflow_row_behaviour='paginate')
|
53 |
+
|
54 |
|
55 |
inputs = gr.components.Audio(type="filepath", label="Add audio file")
|
56 |
+
outputs = [gr.components.Textbox(), transcription_df]
|
57 |
title = "Transcribe multi-lingual audio clips"
|
58 |
description = "An example of using TTS to generate speech from text."
|
59 |
article = ""
|
|
|
78 |
# inputs = f.read()
|
79 |
|
80 |
# load audio and pad/trim it to fit 30 seconds
|
81 |
+
result = smallModel.transcribe(audio=inputs, language='english',
|
82 |
+
verbose=False)
|
83 |
# ---------------------------------------------------
|
84 |
+
segments = result['segments']
|
85 |
+
for segment in segments:
|
86 |
+
startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'
|
87 |
+
endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'
|
88 |
+
text = segment['text']
|
89 |
+
segmentId = segment['id']+1
|
90 |
+
segment = f"{segmentId}\n{startTime} --> {endTime}\n{text[1:] if text[0] is ' ' else text}\n\n"
|
91 |
+
|
92 |
+
srtFilename = os.path.join("output/SrtFiles", inputs.split(
|
93 |
+
'/')[-1].split('.')[0]+'.srt')
|
94 |
+
with open(srtFilename, 'a', encoding='utf-8') as srtFile:
|
95 |
+
srtFile.write(segment)
|
96 |
|
97 |
+
rawFilename = os.path.join("output/SrtFiles", inputs.split(
|
98 |
+
'/')[-1].split('.')[0]+'.srt')
|
99 |
+
with open(rawFilename, 'a', encoding='utf-8') as srtFile:
|
100 |
+
srtFile.write(segment)
|
101 |
+
try:
|
102 |
+
|
103 |
+
srt_path = srtFilename
|
104 |
+
df = pd.DataFrame(columns=['start', 'end', 'text'])
|
105 |
+
subs = pysrt.open(srt_path)
|
106 |
+
|
107 |
+
objects = []
|
108 |
+
for sub in subs:
|
109 |
+
start_hours = str(str(sub.start.hours) + "00")[0:2] if len(
|
110 |
+
str(sub.start.hours)) == 2 else str("0" + str(sub.start.hours) + "00")[0:2]
|
111 |
+
end_hours = str(str(sub.end.hours) + "00")[0:2] if len(
|
112 |
+
str(sub.end.hours)) == 2 else str("0" + str(sub.end.hours) + "00")[0:2]
|
113 |
+
|
114 |
+
start_minutes = str(str(sub.start.minutes) + "00")[0:2] if len(
|
115 |
+
str(sub.start.minutes)) == 2 else str("0" + str(sub.start.minutes) + "00")[0:2]
|
116 |
+
end_minutes = str(str(sub.end.minutes) + "00")[0:2] if len(
|
117 |
+
str(sub.end.minutes)) == 2 else str("0" + str(sub.end.minutes) + "00")[0:2]
|
118 |
+
|
119 |
+
start_seconds = str(str(sub.start.seconds) + "00")[0:2] if len(
|
120 |
+
str(sub.start.seconds)) == 2 else str("0" + str(sub.start.seconds) + "00")[0:2]
|
121 |
+
end_seconds = str(str(sub.end.seconds) + "00")[0:2] if len(
|
122 |
+
str(sub.end.seconds)) == 2 else str("0" + str(sub.end.seconds) + "00")[0:2]
|
123 |
+
|
124 |
+
start = start_hours + ":" + start_minutes + ":" + start_seconds + ",000"
|
125 |
+
end = end_hours + ":" + end_minutes + ":" + end_seconds + ",000"
|
126 |
+
text = sub.text
|
127 |
+
objects.append([start, end, text])
|
128 |
+
|
129 |
+
df = pd.DataFrame(objects, columns=['start', 'end', 'text'])
|
130 |
+
except Exception as e:
|
131 |
+
print('Error: ', e)
|
132 |
+
df = pd.DataFrame(columns=['start', 'end', 'text'])
|
133 |
+
|
134 |
+
return [result["text"], df]
|
135 |
|
136 |
|
137 |
# Transcribe youtube video
|
|
|
143 |
source = yt.streams.filter(progressive=True, file_extension='mp4').order_by(
|
144 |
'resolution').desc().first().download('output/youtube')
|
145 |
|
146 |
+
transcript = baseModel.transcribe(source)
|
147 |
return transcript["text"]
|
148 |
except Exception as e:
|
149 |
print('Error: ', e)
|
150 |
return 'Error: ' + str(e)
|
151 |
|
152 |
|
153 |
+
def displaySrtFile(srtFilename):
|
154 |
+
with open(srtFilename, 'r', encoding='utf-8') as srtFile:
|
155 |
+
srtContent = srtFile.read()
|
156 |
+
|
157 |
+
try:
|
158 |
+
|
159 |
+
df = pd.DataFrame(columns=['start', 'end', 'text'])
|
160 |
+
srt_path = srtFilename
|
161 |
+
subs = pysrt.open(srt_path)
|
162 |
+
|
163 |
+
objects = []
|
164 |
+
for sub in subs:
|
165 |
+
|
166 |
+
start_hours = str(str(sub.start.hours) + "00")[0:2] if len(
|
167 |
+
str(sub.start.hours)) == 2 else str("0" + str(sub.start.hours) + "00")[0:2]
|
168 |
+
end_hours = str(str(sub.end.hours) + "00")[0:2] if len(
|
169 |
+
str(sub.end.hours)) == 2 else str("0" + str(sub.end.hours) + "00")[0:2]
|
170 |
+
|
171 |
+
start_minutes = str(str(sub.start.minutes) + "00")[0:2] if len(
|
172 |
+
str(sub.start.minutes)) == 2 else str("0" + str(sub.start.minutes) + "00")[0:2]
|
173 |
+
end_minutes = str(str(sub.end.minutes) + "00")[0:2] if len(
|
174 |
+
str(sub.end.minutes)) == 2 else str("0" + str(sub.end.minutes) + "00")[0:2]
|
175 |
+
|
176 |
+
start_seconds = str(str(sub.start.seconds) + "00")[0:2] if len(
|
177 |
+
str(sub.start.seconds)) == 2 else str("0" + str(sub.start.seconds) + "00")[0:2]
|
178 |
+
end_seconds = str(str(sub.end.seconds) + "00")[0:2] if len(
|
179 |
+
str(sub.end.seconds)) == 2 else str("0" + str(sub.end.seconds) + "00")[0:2]
|
180 |
+
|
181 |
+
start_millis = str(str(sub.start.milliseconds) + "000")[0:3]
|
182 |
+
end_millis = str(str(sub.end.milliseconds) + "000")[0:3]
|
183 |
+
objects.append([sub.text, f'{start_hours}:{start_minutes}:{start_seconds}.{start_millis}',
|
184 |
+
f'{end_hours}:{end_minutes}:{end_seconds}.{end_millis}'])
|
185 |
+
|
186 |
+
for object in objects:
|
187 |
+
srt_to_df = {
|
188 |
+
'start': [object[1]],
|
189 |
+
'end': [object[2]],
|
190 |
+
'text': [object[0]]
|
191 |
+
}
|
192 |
+
|
193 |
+
df = pd.concat([df, pd.DataFrame(srt_to_df)])
|
194 |
+
except Exception as e:
|
195 |
+
print("Error creating srt df")
|
196 |
+
|
197 |
+
return srtContent
|
198 |
+
|
199 |
+
|
200 |
audio_chunked = gr.Interface(
|
201 |
fn=transcribe,
|
202 |
inputs=inputs,
|
|
|
237 |
description=description,
|
238 |
article=article,
|
239 |
examples=[
|
240 |
+
["https://www.youtube.com/watch?v=nlMuHtV82q8&ab_channel=NothingforSale24",],
|
241 |
["https://www.youtube.com/watch?v=JzPfMbG1vrE&ab_channel=ExplainerVideosByLauren",],
|
242 |
["https://www.youtube.com/watch?v=S68vvV0kod8&ab_channel=Pearl-CohnTelevision"]
|
243 |
|
|
|
245 |
|
246 |
)
|
247 |
|
248 |
+
|
249 |
demo = gr.Blocks()
|
250 |
with demo:
|
251 |
+
gr.TabbedInterface([audio_chunked, youtube_chunked, microphone_chunked], [
|
252 |
+
"Audio File", "Youtube", "Microphone"])
|
253 |
demo.queue(concurrency_count=1, max_size=5)
|
254 |
demo.launch(show_api=False)
|
255 |
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
whisper
|
2 |
gradio===3.27.0
|
3 |
-
pytube
|
|
|
|
1 |
whisper
|
2 |
gradio===3.27.0
|
3 |
+
pytube
|
4 |
+
pysrt
|