|
import gradio as gr |
|
from moviepy.editor import VideoFileClip |
|
from transformers import pipeline |
|
import os |
|
|
|
|
|
whisper_model = pipeline("automatic-speech-recognition", model="openai/whisper-large") |
|
|
|
def convert_video_to_wav(video_path): |
|
|
|
video_clip = VideoFileClip(video_path) |
|
audio = video_clip.audio |
|
wav_file = "temp_audio.wav" |
|
audio.write_audiofile(wav_file, codec='pcm_s16le') |
|
return wav_file |
|
|
|
def convert_audio_to_srt(wav_file): |
|
|
|
transcription = whisper_model(wav_file) |
|
|
|
|
|
srt_file = "transcription.srt" |
|
with open(srt_file, "w", encoding="utf-8") as f: |
|
for i, segment in enumerate(transcription['text'].split('.')): |
|
f.write(f"{i+1}\n") |
|
f.write(f"00:00:{i*2:02d},000 --> 00:00:{i*2+2:02d},000\n") |
|
f.write(f"{segment.strip()}\n\n") |
|
|
|
|
|
os.remove(wav_file) |
|
|
|
return srt_file |
|
|
|
def process_video(video): |
|
|
|
video_path = video.name |
|
|
|
|
|
wav_file = convert_video_to_wav(video_path) |
|
srt_file = convert_audio_to_srt(wav_file) |
|
|
|
return srt_file |
|
|
|
|
|
interface = gr.Interface( |
|
fn=process_video, |
|
inputs=gr.File(label="Upload video file", file_types=['mp4', 'avi', 'mkv']), |
|
outputs=gr.File(label="Download SRT File"), |
|
title="Video to SRT Subtitle Generator", |
|
description="Upload a video file (e.g., .mp4), and the app will generate a subtitle file (SRT format) using Whisper model." |
|
) |
|
|
|
interface.launch() |
|
|