File size: 4,400 Bytes
f45be4e
e4f7433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42924fc
e4f7433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42924fc
 
 
 
e4f7433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import librosa
import torch

from transformers import SpeechT5Processor, SpeechT5ForSpeechToText


checkpoint = "microsoft/speecht5_asr"
processor = SpeechT5Processor.from_pretrained(checkpoint)
model = SpeechT5ForSpeechToText.from_pretrained(checkpoint)


def process_audio(sampling_rate, waveform):
    # convert from int16 to floating point
    waveform = waveform / 32678.0

    # convert to mono if stereo
    if len(waveform.shape) > 1:
        waveform = librosa.to_mono(waveform.T)

    # resample to 16 kHz if necessary
    if sampling_rate != 16000:
        waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000)

    # limit to 30 seconds
    waveform = waveform[:16000*30]

    # make PyTorch tensor
    waveform = torch.tensor(waveform)
    return waveform


def predict(audio, mic_audio=None):
    # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels))
    if mic_audio is not None:
        sampling_rate, waveform = mic_audio
    elif audio is not None:
        sampling_rate, waveform = audio
    else:
        return "(please provide audio)"

    waveform = process_audio(sampling_rate, waveform)
    inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt")
    predicted_ids = model.generate(**inputs, max_length=400)
    transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
    return transcription[0]


title = " 😍🥰Prolove  🧑‍🎤 👨‍🎤 "

description = """
The <b>SpeechT5</b> model is pre-trained on text as well as speech inputs, with targets that are also a mix of text and speech.
By pre-training on text and speech at the same time, it learns unified representations for both, resulting in improved modeling capabilities.
SpeechT5 can be fine-tuned for different speech tasks. This space demonstrates the <b>speech-to-text</b>
or automatic speech recognition (ASR) checkpoint for the English language.
See also the <a href="https://huggingface.co/spaces/Matthijs/speecht5-tts-demo">text-to-speech (TTS) demo</a>
and the <a href="https://huggingface.co/spaces/Matthijs/speecht5-vc-demo">voice conversion demo</a>.
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
being passed into the model. The output is the text transcription of the audio. SpeechT5 uses a simple character-based tokenizer, with no
additional language model on top, so the output won't have punctuation or capitalization and may contain the occasional spelling error.
"""

article = """
<div style='margin:20px auto;'>
<p>References: <a href="https://arxiv.org/abs/2110.07205">SpeechT5 paper</a> |
<a href="https://github.com/microsoft/SpeechT5/">original GitHub</a> |
<a href="https://huggingface.co/ajyy/SpeechT5">original weights</a></p>
<pre>
@article{Ao2021SpeechT5,
  title   = {SpeechT5: Unified-Modal Encoder-Decoder Pre-training for Spoken Language Processing},
  author  = {Junyi Ao and Rui Wang and Long Zhou and Chengyi Wang and Shuo Ren and Yu Wu and Shujie Liu and Tom Ko and Qing Li and Yu Zhang and Zhihua Wei and Yao Qian and Jinyu Li and Furu Wei},
  eprint={2110.07205},
  archivePrefix={arXiv},
  primaryClass={eess.AS},
  year={2021}
}
</pre>
<p>Example sound credits:<p>
<ul>
<li>"Hmm, I don't know" from <a href="https://freesound.org/people/InspectorJ/sounds/519189/">InspectorJ</a> (CC BY 4.0 license)
<li>"Henry V" excerpt from <a href="https://freesound.org/people/acclivity/sounds/24096/">acclivity</a> (CC BY-NC 4.0 license)
<li>"You can see it in the eyes" from <a href="https://freesound.org/people/JoyOhJoy/sounds/165348/">JoyOhJoy</a> (CC0 license)
<li>"We yearn for time" from <a href="https://freesound.org/people/Sample_Me/sounds/610529/">Sample_Me</a> (CC0 license)
</ul>
</div>
"""

examples = [
    ["examples/I wanna tell you something_alvi.wav", None],
    ["examples/Let me know_fazrin.wav", None],
    ["examples/Lets do it_arka.wav", None],
    ["examples/Listen to me_shifa.wav", None],
]

gr.Interface(
    fn=predict,
    inputs=[
        gr.Audio(label="Upload Speech", source="upload", type="numpy"),
        gr.Audio(label="Record Speech", source="microphone", type="numpy"),
    ],
    outputs=[
        gr.Text(label="Transcription"),
    ],
    title=title,
    description=description,
    article=article,
    examples=examples,
).launch()