jhj0517 commited on
Commit
5f3fe7d
1 Parent(s): 7e8138f

implement txt file format in `whisper_inference.py`

Browse files
Files changed (1) hide show
  1. modules/whisper_Inference.py +33 -28
modules/whisper_Inference.py CHANGED
@@ -8,7 +8,7 @@ from datetime import datetime
8
  import torch
9
 
10
  from .base_interface import BaseInterface
11
- from modules.subtitle_manager import get_srt, get_vtt, write_file, safe_filename
12
  from modules.youtube_manager import get_ytdata, get_ytaudio
13
 
14
  DEFAULT_MODEL_SIZE = "large-v2"
@@ -30,7 +30,7 @@ class WhisperInference(BaseInterface):
30
  fileobjs: list,
31
  model_size: str,
32
  lang: str,
33
- subformat: str,
34
  istranslate: bool,
35
  add_timestamp: bool,
36
  beam_size: int,
@@ -49,8 +49,8 @@ class WhisperInference(BaseInterface):
49
  Whisper model size from gr.Dropdown()
50
  lang: str
51
  Source language of the file to transcribe from gr.Dropdown()
52
- subformat: str
53
- Subtitle format to write from gr.Dropdown(). Supported format: [SRT, WebVTT]
54
  istranslate: bool
55
  Boolean value from gr.Checkbox() that determines whether to translate to English.
56
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
@@ -93,11 +93,11 @@ class WhisperInference(BaseInterface):
93
 
94
  file_name, file_ext = os.path.splitext(os.path.basename(fileobj.orig_name))
95
  file_name = safe_filename(file_name)
96
- subtitle = self.generate_and_write_subtitle(
97
  file_name=file_name,
98
  transcribed_segments=result,
99
  add_timestamp=add_timestamp,
100
- subformat=subformat
101
  )
102
 
103
  files_info[file_name] = {"subtitle": subtitle, "elapsed_time": elapsed_time}
@@ -122,7 +122,7 @@ class WhisperInference(BaseInterface):
122
  youtubelink: str,
123
  model_size: str,
124
  lang: str,
125
- subformat: str,
126
  istranslate: bool,
127
  add_timestamp: bool,
128
  beam_size: int,
@@ -141,8 +141,8 @@ class WhisperInference(BaseInterface):
141
  Whisper model size from gr.Dropdown()
142
  lang: str
143
  Source language of the file to transcribe from gr.Dropdown()
144
- subformat: str
145
- Subtitle format to write from gr.Dropdown(). Supported format: [SRT, WebVTT]
146
  istranslate: bool
147
  Boolean value from gr.Checkbox() that determines whether to translate to English.
148
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
@@ -181,11 +181,11 @@ class WhisperInference(BaseInterface):
181
  progress(1, desc="Completed!")
182
 
183
  file_name = safe_filename(yt.title)
184
- subtitle = self.generate_and_write_subtitle(
185
  file_name=file_name,
186
  transcribed_segments=result,
187
  add_timestamp=add_timestamp,
188
- subformat=subformat
189
  )
190
 
191
  return f"Done in {self.format_time(elapsed_time)}! Subtitle file is in the outputs folder.\n\n{subtitle}"
@@ -209,7 +209,7 @@ class WhisperInference(BaseInterface):
209
  micaudio: str,
210
  model_size: str,
211
  lang: str,
212
- subformat: str,
213
  istranslate: bool,
214
  beam_size: int,
215
  log_prob_threshold: float,
@@ -227,8 +227,8 @@ class WhisperInference(BaseInterface):
227
  Whisper model size from gr.Dropdown()
228
  lang: str
229
  Source language of the file to transcribe from gr.Dropdown()
230
- subformat: str
231
- Subtitle format to write from gr.Dropdown(). Supported format: [SRT, WebVTT]
232
  istranslate: bool
233
  Boolean value from gr.Checkbox() that determines whether to translate to English.
234
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
@@ -261,11 +261,11 @@ class WhisperInference(BaseInterface):
261
  progress=progress)
262
  progress(1, desc="Completed!")
263
 
264
- subtitle = self.generate_and_write_subtitle(
265
  file_name="Mic",
266
  transcribed_segments=result,
267
  add_timestamp=True,
268
- subformat=subformat
269
  )
270
 
271
  return f"Done in {self.format_time(elapsed_time)}! Subtitle file is in the outputs folder.\n\n{subtitle}"
@@ -361,11 +361,11 @@ class WhisperInference(BaseInterface):
361
  )
362
 
363
  @staticmethod
364
- def generate_and_write_subtitle(file_name: str,
365
- transcribed_segments: list,
366
- add_timestamp: bool,
367
- subformat: str,
368
- ) -> str:
369
  """
370
  This method writes subtitle file and returns str to gr.Textbox
371
  """
@@ -375,13 +375,18 @@ class WhisperInference(BaseInterface):
375
  else:
376
  output_path = os.path.join("outputs", f"{file_name}")
377
 
378
- if subformat == "SRT":
379
- subtitle = get_srt(transcribed_segments)
380
- write_file(subtitle, f"{output_path}.srt")
381
- elif subformat == "WebVTT":
382
- subtitle = get_vtt(transcribed_segments)
383
- write_file(subtitle, f"{output_path}.vtt")
384
- return subtitle
 
 
 
 
 
385
 
386
  @staticmethod
387
  def format_time(elapsed_time: float) -> str:
 
8
  import torch
9
 
10
  from .base_interface import BaseInterface
11
+ from modules.subtitle_manager import get_srt, get_vtt, get_txt, write_file, safe_filename
12
  from modules.youtube_manager import get_ytdata, get_ytaudio
13
 
14
  DEFAULT_MODEL_SIZE = "large-v2"
 
30
  fileobjs: list,
31
  model_size: str,
32
  lang: str,
33
+ file_format: str,
34
  istranslate: bool,
35
  add_timestamp: bool,
36
  beam_size: int,
 
49
  Whisper model size from gr.Dropdown()
50
  lang: str
51
  Source language of the file to transcribe from gr.Dropdown()
52
+ file_format: str
53
+ File format to write from gr.Dropdown(). Supported format: [SRT, WebVTT, txt]
54
  istranslate: bool
55
  Boolean value from gr.Checkbox() that determines whether to translate to English.
56
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
 
93
 
94
  file_name, file_ext = os.path.splitext(os.path.basename(fileobj.orig_name))
95
  file_name = safe_filename(file_name)
96
+ subtitle = self.generate_and_write_file(
97
  file_name=file_name,
98
  transcribed_segments=result,
99
  add_timestamp=add_timestamp,
100
+ file_format=file_format
101
  )
102
 
103
  files_info[file_name] = {"subtitle": subtitle, "elapsed_time": elapsed_time}
 
122
  youtubelink: str,
123
  model_size: str,
124
  lang: str,
125
+ file_format: str,
126
  istranslate: bool,
127
  add_timestamp: bool,
128
  beam_size: int,
 
141
  Whisper model size from gr.Dropdown()
142
  lang: str
143
  Source language of the file to transcribe from gr.Dropdown()
144
+ file_format: str
145
+ File format to write from gr.Dropdown(). Supported format: [SRT, WebVTT, txt]
146
  istranslate: bool
147
  Boolean value from gr.Checkbox() that determines whether to translate to English.
148
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
 
181
  progress(1, desc="Completed!")
182
 
183
  file_name = safe_filename(yt.title)
184
+ subtitle = self.generate_and_write_file(
185
  file_name=file_name,
186
  transcribed_segments=result,
187
  add_timestamp=add_timestamp,
188
+ file_format=file_format
189
  )
190
 
191
  return f"Done in {self.format_time(elapsed_time)}! Subtitle file is in the outputs folder.\n\n{subtitle}"
 
209
  micaudio: str,
210
  model_size: str,
211
  lang: str,
212
+ file_format: str,
213
  istranslate: bool,
214
  beam_size: int,
215
  log_prob_threshold: float,
 
227
  Whisper model size from gr.Dropdown()
228
  lang: str
229
  Source language of the file to transcribe from gr.Dropdown()
230
+ file_format: str
231
+ Subtitle format to write from gr.Dropdown(). Supported format: [SRT, WebVTT, txt]
232
  istranslate: bool
233
  Boolean value from gr.Checkbox() that determines whether to translate to English.
234
  It's Whisper's feature to translate speech from another language directly into English end-to-end.
 
261
  progress=progress)
262
  progress(1, desc="Completed!")
263
 
264
+ subtitle = self.generate_and_write_file(
265
  file_name="Mic",
266
  transcribed_segments=result,
267
  add_timestamp=True,
268
+ file_format=file_format
269
  )
270
 
271
  return f"Done in {self.format_time(elapsed_time)}! Subtitle file is in the outputs folder.\n\n{subtitle}"
 
361
  )
362
 
363
  @staticmethod
364
+ def generate_and_write_file(file_name: str,
365
+ transcribed_segments: list,
366
+ add_timestamp: bool,
367
+ file_format: str,
368
+ ) -> str:
369
  """
370
  This method writes subtitle file and returns str to gr.Textbox
371
  """
 
375
  else:
376
  output_path = os.path.join("outputs", f"{file_name}")
377
 
378
+ if file_format == "SRT":
379
+ content = get_srt(transcribed_segments)
380
+ write_file(content, f"{output_path}.srt")
381
+
382
+ elif file_format == "WebVTT":
383
+ content = get_vtt(transcribed_segments)
384
+ write_file(content, f"{output_path}.vtt")
385
+
386
+ elif file_format == "txt":
387
+ content = get_txt(transcribed_segments)
388
+ write_file(content, f"{output_path}.vtt")
389
+ return content
390
 
391
  @staticmethod
392
  def format_time(elapsed_time: float) -> str: