Spaces:
Running
Running
jhj0517
commited on
Commit
•
778a475
1
Parent(s):
ffec0fa
better display of available compute types
Browse files
modules/faster_whisper_inference.py
CHANGED
@@ -7,6 +7,7 @@ from typing import BinaryIO, Union, Tuple
|
|
7 |
from datetime import datetime, timedelta
|
8 |
|
9 |
import faster_whisper
|
|
|
10 |
import whisper
|
11 |
import torch
|
12 |
import gradio as gr
|
@@ -25,7 +26,7 @@ class FasterWhisperInference(BaseInterface):
|
|
25 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
26 |
self.translatable_models = ["large", "large-v1", "large-v2"]
|
27 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
-
self.available_compute_types =
|
29 |
self.current_compute_type = "float16" if self.device == "cuda" else "float32"
|
30 |
self.default_beam_size = 1
|
31 |
|
|
|
7 |
from datetime import datetime, timedelta
|
8 |
|
9 |
import faster_whisper
|
10 |
+
import ctranslate2
|
11 |
import whisper
|
12 |
import torch
|
13 |
import gradio as gr
|
|
|
26 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
27 |
self.translatable_models = ["large", "large-v1", "large-v2"]
|
28 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
+
self.available_compute_types = ctranslate2.get_supported_compute_types("cuda") if self.device == "cuda" else ctranslate2.get_supported_compute_types("cpu")
|
30 |
self.current_compute_type = "float16" if self.device == "cuda" else "float32"
|
31 |
self.default_beam_size = 1
|
32 |
|