File size: 2,543 Bytes
4f92cf0
 
 
96acb32
2417027
21f6ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2417027
4f92cf0
96acb32
 
 
 
 
 
 
 
 
 
2469c9d
2417027
21f6ca3
 
96acb32
 
 
 
 
1e35524
21f6ca3
 
 
96acb32
 
 
 
 
 
 
 
 
 
21f6ca3
 
 
fb7d32a
0830206
2469c9d
fb7d32a
2417027
4f92cf0
fb7d32a
4fd964c
 
 
 
 
 
 
 
 
 
 
175a703
4fd964c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
from transformers import pipeline
from gradio_client import Client, file
import json

language_classifier = Client("adrien-alloreview/speechbrain-lang-id-voxlingua107-ecapa")
transcriber = Client("tensorlake/audio-extractors")
emotion_detector = pipeline(
    "audio-classification",
    model="HowMannyMore/wav2vec2-lg-xlsr-ur-speech-emotion-recognition",
)
model_name_rus = "IlyaGusev/rubertconv_toxic_clf"
toxic_detector = pipeline(
    "text-classification",
    model=model_name_rus,
    tokenizer=model_name_rus,
    framework="pt",
    max_length=512,
    truncation=True,
)


def detect_language(file_path):
    try:
        result = language_classifier.predict(param_0=file(file_path), api_name="/predict")
        language_result = result["label"].split(": ")[1]
        if language_result.lower() in ["russian", "belarussian", "ukrainian"]:
            selected_language = "russian"
        else:
            selected_language = "kazakh"
        return selected_language
    except Exception as e:
        print(f"Language detection failed: {e}")
        return None


def detect_emotion(audio):
    try:
        res = emotion_detector(audio)
        emotion_with_max_score = res[0]["label"]
        return emotion_with_max_score
    except Exception as e:
        return f"Emotion detection failed: {e}"


def detect_toxic_local(text_whisper):
    try:
        res = toxic_detector([text_whisper])[0]["label"]
        if res == "toxic":
            return True
        elif res == "neutral":
            return False
        else:
            return None
    except Exception as e:
        print(f"Toxicity detection failed: {e}")
        return None


def assessment(file_path, result_text):
    result_emotion = detect_emotion(file_path) or "unknown"
    result_toxic = detect_toxic_local(result_text) or False
    return json.dumps({"emotion": result_emotion, "toxic": result_toxic})


demo = gr.Blocks()
with demo:
    with gr.Tabs():
        with gr.TabItem('Language Detection'):
            language_detection_interface = gr.Interface(
                fn=detect_language,
                inputs=gr.Audio(sources=["upload"], type="filepath"),
                outputs='text',
            )
        with gr.TabItem('Toxic & Emotion Detection'):
            toxic_and_emotion_detection_interface = gr.Interface(
                fn=assessment,
                inputs=[gr.Audio(sources=["upload"], type="filepath"), gr.Textbox(label="Result Text")],
                outputs='json',
            )
demo.launch()