coursera-qa-bot / app_flask.py
rohan13's picture
Gradio changes for voice support
afd8033
raw
history blame
1.91 kB
import os.path
import traceback
from flask import Flask, render_template, request
from flask_cors import CORS
from flask_executor import Executor
from flask_socketio import SocketIO, emit
from gevent import monkey
from utils import get_search_index
from scipy.io import wavfile
import base64, io
import numpy as np
import whisper
from main import run
monkey.patch_all(ssl=False)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*", logger=True)
# cors = CORS(app)
executor = Executor(app)
executor.init_app(app)
app.config['EXECUTOR_MAX_WORKERS'] = 5
model = whisper.load_model('small.en')
@app.route('/')
def index():
get_search_index()
return render_template('index.html')
@socketio.on('message')
def handle_message(data):
question = data['question']
print("question: " + question)
if executor.futures:
emit('response', {'response': 'Server is busy, please try again later'})
return
try:
future = executor.submit(run, question)
response = future.result()
emit('response', {'response': response})
except Exception as e:
traceback.print_exc()
# print(f"Error processing request: {str(e)}")
emit('response', {'response': 'Server is busy. Please try again later.'})
@app.route('/audio', methods=['POST'])
def handle_audio():
# print the request files and names
print(request.files)
audio_data = request.files['audio']
audio_data.save('audio.webm')
print("audio data received: " + str(audio_data))
if os.path.isfile('audio.webm'):
print("audio file exists")
# Transcribe the audio data using OpenAI Whisper
transcript = whisper.transcribe(model, 'audio.webm')
data = {'question': transcript['text']}
handle_message(data)
if __name__ == '__main__':
socketio.run(app, port=5001)