AllModel / app.py
Dzhuk's picture
Update app.py 1.2
2469c9d verified
import gradio as gr
from transformers import pipeline
from gradio_client import Client, file
import json
language_classifier = Client("adrien-alloreview/speechbrain-lang-id-voxlingua107-ecapa")
transcriber = Client("tensorlake/audio-extractors")
emotion_detector = pipeline(
"audio-classification",
model="HowMannyMore/wav2vec2-lg-xlsr-ur-speech-emotion-recognition",
)
model_name_rus = "IlyaGusev/rubertconv_toxic_clf"
toxic_detector = pipeline(
"text-classification",
model=model_name_rus,
tokenizer=model_name_rus,
framework="pt",
max_length=512,
truncation=True,
)
def detect_language(file_path):
try:
result = language_classifier.predict(param_0=file(file_path), api_name="/predict")
language_result = result["label"].split(": ")[1]
if language_result.lower() in ["russian", "belarussian", "ukrainian"]:
selected_language = "russian"
else:
selected_language = "kazakh"
return selected_language
except Exception as e:
print(f"Language detection failed: {e}")
return None
def detect_emotion(audio):
try:
res = emotion_detector(audio)
emotion_with_max_score = res[0]["label"]
return emotion_with_max_score
except Exception as e:
return f"Emotion detection failed: {e}"
def detect_toxic_local(text_whisper):
try:
res = toxic_detector([text_whisper])[0]["label"]
if res == "toxic":
return True
elif res == "neutral":
return False
else:
return None
except Exception as e:
print(f"Toxicity detection failed: {e}")
return None
def assessment(file_path, result_text):
result_emotion = detect_emotion(file_path) or "unknown"
result_toxic = detect_toxic_local(result_text) or False
return json.dumps({"emotion": result_emotion, "toxic": result_toxic})
demo = gr.Blocks()
with demo:
with gr.Tabs():
with gr.TabItem('Language Detection'):
language_detection_interface = gr.Interface(
fn=detect_language,
inputs=gr.Audio(sources=["upload"], type="filepath"),
outputs='text',
)
with gr.TabItem('Toxic & Emotion Detection'):
toxic_and_emotion_detection_interface = gr.Interface(
fn=assessment,
inputs=[gr.Audio(sources=["upload"], type="filepath"), gr.Textbox(label="Result Text")],
outputs='json',
)
demo.launch()