szili2011's picture
Update app.py
0440734 verified
raw
history blame
1.58 kB
import gradio as gr
import tensorflow as tf
import numpy as np
import pickle
# Paths to model and tokenizer
model_path = "doctor_ai_model.h5"
tokenizer_path = "tokenizer.pkl"
label_encoder_path = "label_encoder.pkl"
# Load the trained model
try:
model = tf.keras.models.load_model(model_path)
except Exception as e:
print(f"Error loading model: {e}")
# Load the tokenizer and label encoder
with open(tokenizer_path, 'rb') as handle:
tokenizer = pickle.load(handle)
with open(label_encoder_path, 'rb') as handle:
label_encoder = pickle.load(handle)
# Define the prediction function
def predict_answer(question):
try:
# Tokenize the input question
seq = tokenizer.texts_to_sequences([question])
padded_seq = tf.keras.preprocessing.sequence.pad_sequences(seq, maxlen=27) # Adjust maxlen to match your model
# Make prediction
prediction = model.predict(padded_seq)
# Convert prediction to label
predicted_label = label_encoder.inverse_transform(np.argmax(prediction, axis=1))
return predicted_label[0]
except Exception as e:
return f"Error during prediction: {e}"
# Define the Gradio interface
iface = gr.Interface(
fn=predict_answer,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question..."),
outputs="text",
title="Doctor AI Chatbot",
description="This AI chatbot provides answers based on your medical-related questions. Works completely offline."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()