szili2011's picture
Update app.py
838ec20 verified
raw
history blame
1.38 kB
import numpy as np
import gradio as gr
import tensorflow as tf
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
import pickle
# Load the model
model = load_model('doctor_ai_model.h5')
# Load the tokenizer
with open('tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
# Load the label encoder
with open('label_encoder.pkl', 'rb') as f:
label_encoder = pickle.load(f)
def chatbot(input_text):
# Tokenize and pad the input
sequences = tokenizer.texts_to_sequences([input_text])
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(sequences)
# Make a prediction
response = model.predict(input_tensor)
print("Model output probabilities:", response)
# Get predicted label
predicted_label = np.argmax(response, axis=1)
# Handle unknown labels
if predicted_label[0] < len(label_encoder.classes_):
decoded_label = label_encoder.inverse_transform(predicted_label)
else:
decoded_label = "Unknown label"
return decoded_label[0]
# Create a Gradio interface
iface = gr.Interface(fn=chatbot,
inputs="text",
outputs="text",
title="Doctor AI Chatbot",
description="Enter a medical-related question to get answers based on trained categories.")
# Launch the interface
iface.launch()