Spaces:
Sleeping
Sleeping
suisuyy
commited on
Commit
•
a1dd53c
1
Parent(s):
fbe6007
add status
Browse files- README.md +293 -0
- __pycache__/app.cpython-310.pyc +0 -0
- app.py +79 -7
- requirements.txt +4 -1
README.md
CHANGED
@@ -10,3 +10,296 @@ pinned: false
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
13 |
+
|
14 |
+
|
15 |
+
# all requirements when dev
|
16 |
+
```
|
17 |
+
|
18 |
+
absl-py==2.0.0
|
19 |
+
aiofiles==22.1.0
|
20 |
+
aiohttp==3.9.1
|
21 |
+
aiosignal==1.3.1
|
22 |
+
aiosqlite==0.19.0
|
23 |
+
altair==5.3.0
|
24 |
+
annotated-types==0.6.0
|
25 |
+
anyascii==0.3.2
|
26 |
+
anyio==4.2.0
|
27 |
+
argon2-cffi==23.1.0
|
28 |
+
argon2-cffi-bindings==21.2.0
|
29 |
+
arrow==1.3.0
|
30 |
+
asttokens==2.4.1
|
31 |
+
async-timeout==4.0.3
|
32 |
+
attrs==23.2.0
|
33 |
+
audioread==3.0.1
|
34 |
+
Babel==2.14.0
|
35 |
+
backoff==2.2.1
|
36 |
+
bangla==0.0.2
|
37 |
+
beautifulsoup4==4.12.2
|
38 |
+
bleach==6.1.0
|
39 |
+
blessed==1.20.0
|
40 |
+
blinker==1.7.0
|
41 |
+
blis==0.7.11
|
42 |
+
bnnumerizer==0.0.2
|
43 |
+
bnunicodenormalizer==0.1.6
|
44 |
+
boto3==1.34.17
|
45 |
+
botocore==1.34.17
|
46 |
+
Brotli==1.1.0
|
47 |
+
cachetools==5.3.2
|
48 |
+
catalogue==2.0.10
|
49 |
+
certifi==2023.11.17
|
50 |
+
cffi==1.16.0
|
51 |
+
charset-normalizer==3.3.2
|
52 |
+
click==8.1.7
|
53 |
+
cloudpathlib==0.16.0
|
54 |
+
colorama==0.4.6
|
55 |
+
comm==0.2.1
|
56 |
+
confection==0.1.4
|
57 |
+
contourpy==1.2.0
|
58 |
+
coqpit==0.0.17
|
59 |
+
cutlet==0.3.0
|
60 |
+
cycler==0.12.1
|
61 |
+
cymem==2.0.8
|
62 |
+
Cython==3.0.8
|
63 |
+
dateparser==1.1.8
|
64 |
+
debugpy==1.8.0
|
65 |
+
decorator==4.4.2
|
66 |
+
deepspeed==0.12.6
|
67 |
+
defusedxml==0.7.1
|
68 |
+
docopt==0.6.2
|
69 |
+
einops==0.7.0
|
70 |
+
emoji==2.8.0
|
71 |
+
encodec==0.1.1
|
72 |
+
entrypoints==0.4
|
73 |
+
exceptiongroup==1.2.0
|
74 |
+
executing==2.0.1
|
75 |
+
fastapi==0.109.0
|
76 |
+
fastjsonschema==2.19.1
|
77 |
+
ffmpy==0.3.2
|
78 |
+
filelock==3.13.1
|
79 |
+
Flask==3.0.0
|
80 |
+
fonttools==4.47.2
|
81 |
+
fqdn==1.5.1
|
82 |
+
frozenlist==1.4.1
|
83 |
+
fsspec==2023.12.2
|
84 |
+
fugashi==1.3.0
|
85 |
+
g2pkk==0.1.2
|
86 |
+
google-auth==2.26.2
|
87 |
+
google-auth-oauthlib==1.2.0
|
88 |
+
gpustat==1.1.1
|
89 |
+
gradio==4.8.0
|
90 |
+
gradio_client==0.10.0
|
91 |
+
grpcio==1.60.0
|
92 |
+
gruut==2.2.3
|
93 |
+
gruut-ipa==0.13.0
|
94 |
+
gruut-lang-de==2.0.0
|
95 |
+
gruut-lang-en==2.0.0
|
96 |
+
gruut-lang-es==2.0.0
|
97 |
+
gruut-lang-fr==2.0.2
|
98 |
+
h11==0.14.0
|
99 |
+
hangul-romanize==0.1.0
|
100 |
+
hjson==3.1.0
|
101 |
+
httpcore==1.0.2
|
102 |
+
httpx==0.26.0
|
103 |
+
huggingface-hub==0.20.2
|
104 |
+
idna==3.6
|
105 |
+
imageio==2.34.0
|
106 |
+
imageio-ffmpeg==0.4.9
|
107 |
+
importlib_resources==6.4.0
|
108 |
+
inflect==7.0.0
|
109 |
+
ipykernel==6.26.0
|
110 |
+
ipython==8.17.2
|
111 |
+
ipython-genutils==0.2.0
|
112 |
+
ipywidgets==8.1.1
|
113 |
+
isoduration==20.11.0
|
114 |
+
itsdangerous==2.1.2
|
115 |
+
jaconv==0.3.4
|
116 |
+
jamo==0.4.1
|
117 |
+
jedi==0.19.1
|
118 |
+
jieba==0.42.1
|
119 |
+
Jinja2==3.1.3
|
120 |
+
jmespath==1.0.1
|
121 |
+
joblib==1.3.2
|
122 |
+
json5==0.9.14
|
123 |
+
jsonlines==1.2.0
|
124 |
+
jsonpointer==2.4
|
125 |
+
jsonschema==4.20.0
|
126 |
+
jsonschema-specifications==2023.12.1
|
127 |
+
jupyter-events==0.9.0
|
128 |
+
jupyter-ydoc==0.2.5
|
129 |
+
jupyter_client==7.4.9
|
130 |
+
jupyter_core==5.7.1
|
131 |
+
jupyter_server==2.12.4
|
132 |
+
jupyter_server_fileid==0.9.1
|
133 |
+
jupyter_server_terminals==0.5.1
|
134 |
+
jupyter_server_ydoc==0.6.1
|
135 |
+
jupyterlab==3.6.1
|
136 |
+
jupyterlab-widgets==3.0.9
|
137 |
+
jupyterlab_pygments==0.3.0
|
138 |
+
jupyterlab_server==2.25.2
|
139 |
+
kiwisolver==1.4.5
|
140 |
+
langcodes==3.3.0
|
141 |
+
lazy_loader==0.3
|
142 |
+
librosa==0.10.0
|
143 |
+
lightning==2.1.2
|
144 |
+
lightning-cloud==0.5.57
|
145 |
+
lightning-utilities==0.10.0
|
146 |
+
lightning_sdk==0.0.13a0
|
147 |
+
linkify-it-py==2.0.3
|
148 |
+
llvmlite==0.41.1
|
149 |
+
loguru==0.7.2
|
150 |
+
Markdown==3.5.2
|
151 |
+
markdown-it-py==2.2.0
|
152 |
+
MarkupSafe==2.1.3
|
153 |
+
matplotlib==3.8.2
|
154 |
+
matplotlib-inline==0.1.6
|
155 |
+
mdit-py-plugins==0.3.3
|
156 |
+
mdurl==0.1.2
|
157 |
+
mistune==3.0.2
|
158 |
+
mojimoji==0.0.13
|
159 |
+
moviepy==1.0.3
|
160 |
+
mpmath==1.3.0
|
161 |
+
msgpack==1.0.7
|
162 |
+
multidict==6.0.4
|
163 |
+
murmurhash==1.0.10
|
164 |
+
mutagen==1.47.0
|
165 |
+
nbclassic==1.0.0
|
166 |
+
nbclient==0.9.0
|
167 |
+
nbconvert==7.14.1
|
168 |
+
nbformat==5.9.2
|
169 |
+
nest-asyncio==1.5.8
|
170 |
+
networkx==2.8.8
|
171 |
+
ninja==1.11.1.1
|
172 |
+
nltk==3.8.1
|
173 |
+
notebook==6.5.6
|
174 |
+
notebook_shim==0.2.3
|
175 |
+
num2words==0.5.13
|
176 |
+
numba==0.58.1
|
177 |
+
numpy==1.22.0
|
178 |
+
nvidia-ml-py==12.535.133
|
179 |
+
oauthlib==3.2.2
|
180 |
+
orjson==3.10.0
|
181 |
+
overrides==7.4.0
|
182 |
+
packaging==23.2
|
183 |
+
pandas==1.5.3
|
184 |
+
pandocfilters==1.5.0
|
185 |
+
parso==0.8.3
|
186 |
+
pexpect==4.9.0
|
187 |
+
pillow==10.2.0
|
188 |
+
platformdirs==4.1.0
|
189 |
+
pooch==1.8.0
|
190 |
+
preshed==3.0.9
|
191 |
+
proglog==0.1.10
|
192 |
+
prometheus-client==0.19.0
|
193 |
+
prompt-toolkit==3.0.43
|
194 |
+
protobuf==4.23.4
|
195 |
+
psutil==5.9.7
|
196 |
+
ptyprocess==0.7.0
|
197 |
+
pure-eval==0.2.2
|
198 |
+
py-cpuinfo==9.0.0
|
199 |
+
pyasn1==0.5.1
|
200 |
+
pyasn1-modules==0.3.0
|
201 |
+
PyAudio==0.2.14
|
202 |
+
pycparser==2.21
|
203 |
+
pycryptodomex==3.20.0
|
204 |
+
pydantic==2.5.3
|
205 |
+
pydantic_core==2.14.6
|
206 |
+
pydub==0.25.1
|
207 |
+
Pygments==2.17.2
|
208 |
+
PyJWT==2.8.0
|
209 |
+
pynndescent==0.5.11
|
210 |
+
pynvml==11.5.0
|
211 |
+
pyparsing==3.1.1
|
212 |
+
pypinyin==0.50.0
|
213 |
+
pysbd==0.3.4
|
214 |
+
python-crfsuite==0.9.10
|
215 |
+
python-dateutil==2.8.2
|
216 |
+
python-dotenv==1.0.0
|
217 |
+
python-json-logger==2.0.7
|
218 |
+
python-multipart==0.0.6
|
219 |
+
pytorch-lightning==2.1.2
|
220 |
+
pyttsx3==2.90
|
221 |
+
pytz==2023.3.post1
|
222 |
+
PyYAML==6.0.1
|
223 |
+
pyzmq==24.0.1
|
224 |
+
referencing==0.32.1
|
225 |
+
regex==2023.12.25
|
226 |
+
requests==2.31.0
|
227 |
+
requests-oauthlib==1.3.1
|
228 |
+
rfc3339-validator==0.1.4
|
229 |
+
rfc3986-validator==0.1.1
|
230 |
+
rich==13.7.0
|
231 |
+
rpds-py==0.17.1
|
232 |
+
rsa==4.9
|
233 |
+
s3transfer==0.10.0
|
234 |
+
safetensors==0.4.1
|
235 |
+
scikit-learn==1.3.2
|
236 |
+
scipy==1.11.4
|
237 |
+
semantic-version==2.10.0
|
238 |
+
Send2Trash==1.8.2
|
239 |
+
shellingham==1.5.4
|
240 |
+
six==1.16.0
|
241 |
+
smart-open==6.4.0
|
242 |
+
sniffio==1.3.0
|
243 |
+
soundfile==0.12.1
|
244 |
+
soupsieve==2.5
|
245 |
+
soxr==0.3.7
|
246 |
+
spaces==0.25.0
|
247 |
+
spacy==3.7.2
|
248 |
+
spacy-legacy==3.0.12
|
249 |
+
spacy-loggers==1.0.5
|
250 |
+
srsly==2.4.8
|
251 |
+
stack-data==0.6.3
|
252 |
+
stanza==1.6.1
|
253 |
+
starlette==0.35.1
|
254 |
+
stream2sentence==0.2.2
|
255 |
+
SudachiDict-core==20230927
|
256 |
+
SudachiPy==0.6.8
|
257 |
+
sympy==1.12
|
258 |
+
tensorboard==2.15.1
|
259 |
+
tensorboard-data-server==0.7.2
|
260 |
+
terminado==0.18.0
|
261 |
+
thinc==8.2.2
|
262 |
+
threadpoolctl==3.2.0
|
263 |
+
tinycss2==1.2.1
|
264 |
+
tokenizers==0.15.0
|
265 |
+
tomli==2.0.1
|
266 |
+
tomlkit==0.12.0
|
267 |
+
toolz==0.12.1
|
268 |
+
torch==2.1.1+cu118
|
269 |
+
torchaudio==2.1.1+cu118
|
270 |
+
torchmetrics==1.2.0
|
271 |
+
torchvision==0.16.1+cu121
|
272 |
+
tornado==6.4
|
273 |
+
tqdm==4.66.1
|
274 |
+
trainer==0.0.36
|
275 |
+
traitlets==5.14.1
|
276 |
+
transformers @ git+https://github.com/huggingface/transformers@81642d2b51de9d5e5aee1768abdc744d90f7f52d
|
277 |
+
triton==2.1.0
|
278 |
+
TTS==0.21.3
|
279 |
+
typer==0.9.0
|
280 |
+
types-python-dateutil==2.8.19.20240106
|
281 |
+
typing_extensions==4.9.0
|
282 |
+
tzdata==2023.4
|
283 |
+
tzlocal==5.2
|
284 |
+
uc-micro-py==1.0.3
|
285 |
+
umap-learn==0.5.5
|
286 |
+
Unidecode==1.3.8
|
287 |
+
unidic-lite==1.0.8
|
288 |
+
uri-template==1.3.0
|
289 |
+
urllib3==2.0.7
|
290 |
+
uvicorn==0.25.0
|
291 |
+
wasabi==1.1.2
|
292 |
+
wcwidth==0.2.13
|
293 |
+
weasel==0.3.4
|
294 |
+
webcolors==1.13
|
295 |
+
webencodings==0.5.1
|
296 |
+
websocket-client==1.7.0
|
297 |
+
websockets==11.0.3
|
298 |
+
Werkzeug==3.0.1
|
299 |
+
widgetsnbextension==4.0.9
|
300 |
+
xtts-api-server==0.8.3
|
301 |
+
y-py==0.6.2
|
302 |
+
yarl==1.9.4
|
303 |
+
ypy-websocket==0.8.4
|
304 |
+
yt-dlp==2024.3.10
|
305 |
+
```
|
__pycache__/app.cpython-310.pyc
CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -1,16 +1,18 @@
|
|
1 |
import torch
|
2 |
import time
|
3 |
-
|
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
from transformers import pipeline
|
7 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
8 |
|
9 |
DEFAULT_MODEL_NAME = "distil-whisper/distil-large-v3"
|
10 |
-
|
11 |
BATCH_SIZE = 8
|
12 |
|
13 |
device = 0 if torch.cuda.is_available() else "cpu"
|
|
|
|
|
14 |
|
15 |
def load_pipeline(model_name):
|
16 |
return pipeline(
|
@@ -32,6 +34,11 @@ def transcribe(inputs, task, model_name):
|
|
32 |
pipe = load_pipeline(model_name)
|
33 |
|
34 |
start_time = time.time() # Record the start time
|
|
|
|
|
|
|
|
|
|
|
35 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
36 |
end_time = time.time() # Record the end time
|
37 |
|
@@ -40,12 +47,65 @@ def transcribe(inputs, task, model_name):
|
|
40 |
# Create the transcription time output with additional information
|
41 |
transcription_time_output = (
|
42 |
f"Transcription Time: {transcription_time:.2f} seconds\n"
|
|
|
43 |
f"Model Used: {model_name}\n"
|
44 |
f"Device Used: {'GPU' if torch.cuda.is_available() else 'CPU'}"
|
45 |
)
|
46 |
|
47 |
return text, transcription_time_output
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
demo = gr.Blocks()
|
50 |
|
51 |
mf_transcribe = gr.Interface(
|
@@ -57,7 +117,7 @@ mf_transcribe = gr.Interface(
|
|
57 |
label="Model Name",
|
58 |
value=DEFAULT_MODEL_NAME,
|
59 |
placeholder="Enter the model name",
|
60 |
-
info="Some available models: distil-whisper/distil-large-v3 distil-whisper/distil-medium.en
|
61 |
),
|
62 |
],
|
63 |
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
@@ -79,7 +139,7 @@ file_transcribe = gr.Interface(
|
|
79 |
label="Model Name",
|
80 |
value=DEFAULT_MODEL_NAME,
|
81 |
placeholder="Enter the model name",
|
82 |
-
info="Some available models: openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v2"
|
83 |
),
|
84 |
],
|
85 |
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
@@ -91,8 +151,20 @@ file_transcribe = gr.Interface(
|
|
91 |
),
|
92 |
allow_flagging="never",
|
93 |
)
|
94 |
-
|
95 |
with demo:
|
96 |
gr.TabbedInterface([mf_transcribe, file_transcribe], ["Microphone", "Audio file"])
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import time
|
3 |
+
import moviepy.editor as mp
|
4 |
+
import psutil
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
from transformers import pipeline
|
8 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
9 |
|
10 |
DEFAULT_MODEL_NAME = "distil-whisper/distil-large-v3"
|
|
|
11 |
BATCH_SIZE = 8
|
12 |
|
13 |
device = 0 if torch.cuda.is_available() else "cpu"
|
14 |
+
if device == "cpu":
|
15 |
+
DEFAULT_MODEL_NAME = "openai/whisper-tiny"
|
16 |
|
17 |
def load_pipeline(model_name):
|
18 |
return pipeline(
|
|
|
34 |
pipe = load_pipeline(model_name)
|
35 |
|
36 |
start_time = time.time() # Record the start time
|
37 |
+
|
38 |
+
# Load the audio file and calculate its duration
|
39 |
+
audio = mp.AudioFileClip(inputs)
|
40 |
+
audio_duration = audio.duration
|
41 |
+
|
42 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
43 |
end_time = time.time() # Record the end time
|
44 |
|
|
|
47 |
# Create the transcription time output with additional information
|
48 |
transcription_time_output = (
|
49 |
f"Transcription Time: {transcription_time:.2f} seconds\n"
|
50 |
+
f"Audio Duration: {audio_duration:.2f} seconds\n"
|
51 |
f"Model Used: {model_name}\n"
|
52 |
f"Device Used: {'GPU' if torch.cuda.is_available() else 'CPU'}"
|
53 |
)
|
54 |
|
55 |
return text, transcription_time_output
|
56 |
|
57 |
+
from gpustat import GPUStatCollection
|
58 |
+
|
59 |
+
def update_gpu_status():
|
60 |
+
if torch.cuda.is_available() == False:
|
61 |
+
return "No Nviadia Device"
|
62 |
+
try:
|
63 |
+
gpu_stats = GPUStatCollection.new_query()
|
64 |
+
for gpu in gpu_stats:
|
65 |
+
# Assuming you want to monitor the first GPU, index 0
|
66 |
+
gpu_id = gpu.index
|
67 |
+
gpu_name = gpu.name
|
68 |
+
gpu_utilization = gpu.utilization
|
69 |
+
memory_used = gpu.memory_used
|
70 |
+
memory_total = gpu.memory_total
|
71 |
+
memory_utilization = (memory_used / memory_total) * 100
|
72 |
+
gpu_status=(f"GPU {gpu_id}: {gpu_name}, Utilization: {gpu_utilization}%, Memory Used: {memory_used}MB, Memory Total: {memory_total}MB, Memory Utilization: {memory_utilization:.2f}%")
|
73 |
+
return gpu_status
|
74 |
+
|
75 |
+
except Exception as e:
|
76 |
+
print(f"Error getting GPU stats: {e}")
|
77 |
+
|
78 |
+
# def update_gpu_status():
|
79 |
+
# if torch.cuda.is_available():
|
80 |
+
# gpu_info = torch.cuda.get_device_name(0)
|
81 |
+
# gpu_memory = torch.cuda.mem_get_info(0)
|
82 |
+
# total_memory = gpu_memory[1] / (1024 * 1024)
|
83 |
+
# used_memory = (gpu_memory[1] - gpu_memory[0]) / (1024 * 1024)
|
84 |
+
|
85 |
+
# gpu_status = f"GPU: {gpu_info}\nTotal Memory: {total_memory:.2f} MB\nUsed Memory: {used_memory:.2f} MB"
|
86 |
+
# else:
|
87 |
+
# gpu_status = "No GPU available"
|
88 |
+
# return gpu_status
|
89 |
+
|
90 |
+
def update_cpu_status():
|
91 |
+
import datetime
|
92 |
+
# Get the current time
|
93 |
+
current_time = datetime.datetime.now().time()
|
94 |
+
# Convert the time to a string
|
95 |
+
time_str = current_time.strftime("%H:%M:%S")
|
96 |
+
|
97 |
+
cpu_percent = psutil.cpu_percent()
|
98 |
+
cpu_status = f"CPU Usage: {cpu_percent}% {time_str}"
|
99 |
+
return cpu_status
|
100 |
+
|
101 |
+
def update_status():
|
102 |
+
gpu_status = update_gpu_status()
|
103 |
+
cpu_status = update_cpu_status()
|
104 |
+
return gpu_status, cpu_status
|
105 |
+
|
106 |
+
def refresh_status():
|
107 |
+
return update_status()
|
108 |
+
|
109 |
demo = gr.Blocks()
|
110 |
|
111 |
mf_transcribe = gr.Interface(
|
|
|
117 |
label="Model Name",
|
118 |
value=DEFAULT_MODEL_NAME,
|
119 |
placeholder="Enter the model name",
|
120 |
+
info="Some available models: distil-whisper/distil-large-v3 distil-whisper/distil-medium.en Systran/faster-distil-whisper-large-v3 Systran/faster-whisper-large-v3 Systran/faster-whisper-medium openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v3",
|
121 |
),
|
122 |
],
|
123 |
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
|
|
139 |
label="Model Name",
|
140 |
value=DEFAULT_MODEL_NAME,
|
141 |
placeholder="Enter the model name",
|
142 |
+
info="Some available models: openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v2",
|
143 |
),
|
144 |
],
|
145 |
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
|
|
151 |
),
|
152 |
allow_flagging="never",
|
153 |
)
|
|
|
154 |
with demo:
|
155 |
gr.TabbedInterface([mf_transcribe, file_transcribe], ["Microphone", "Audio file"])
|
156 |
+
|
157 |
+
with gr.Row():
|
158 |
+
refresh_button = gr.Button("Refresh Status") # Create a refresh button
|
159 |
+
|
160 |
+
gpu_status_output = gr.Textbox(label="GPU Status", interactive=False)
|
161 |
+
cpu_status_output = gr.Textbox(label="CPU Status", interactive=False)
|
162 |
+
|
163 |
+
# Link the refresh button to the refresh_status function
|
164 |
+
refresh_button.click(refresh_status, None, [gpu_status_output, cpu_status_output])
|
165 |
+
|
166 |
+
# Load the initial status using update_status function
|
167 |
+
demo.load(update_status, inputs=None, outputs=[gpu_status_output, cpu_status_output], every=2, queue=False)
|
168 |
+
|
169 |
+
# Launch the Gradio app
|
170 |
+
demo.launch(share=True)
|
requirements.txt
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
git+https://github.com/huggingface/transformers
|
2 |
torch
|
3 |
yt-dlp
|
4 |
-
gradio==4.8.0
|
|
|
|
|
|
|
|
1 |
git+https://github.com/huggingface/transformers
|
2 |
torch
|
3 |
yt-dlp
|
4 |
+
gradio==4.8.0
|
5 |
+
gpustat
|
6 |
+
moviepy
|
7 |
+
freeze
|