File size: 1,584 Bytes
28d4a48
 
 
0440734
28d4a48
0440734
 
 
 
28d4a48
0440734
 
 
 
 
28d4a48
0440734
 
 
28d4a48
0440734
 
28d4a48
0440734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import tensorflow as tf
import numpy as np
import pickle

# Paths to model and tokenizer
model_path = "doctor_ai_model.h5"
tokenizer_path = "tokenizer.pkl"
label_encoder_path = "label_encoder.pkl"

# Load the trained model
try:
    model = tf.keras.models.load_model(model_path)
except Exception as e:
    print(f"Error loading model: {e}")

# Load the tokenizer and label encoder
with open(tokenizer_path, 'rb') as handle:
    tokenizer = pickle.load(handle)

with open(label_encoder_path, 'rb') as handle:
    label_encoder = pickle.load(handle)

# Define the prediction function
def predict_answer(question):
    try:
        # Tokenize the input question
        seq = tokenizer.texts_to_sequences([question])
        padded_seq = tf.keras.preprocessing.sequence.pad_sequences(seq, maxlen=27)  # Adjust maxlen to match your model
        
        # Make prediction
        prediction = model.predict(padded_seq)
        
        # Convert prediction to label
        predicted_label = label_encoder.inverse_transform(np.argmax(prediction, axis=1))
        
        return predicted_label[0]
    except Exception as e:
        return f"Error during prediction: {e}"

# Define the Gradio interface
iface = gr.Interface(
    fn=predict_answer,
    inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question..."),
    outputs="text",
    title="Doctor AI Chatbot",
    description="This AI chatbot provides answers based on your medical-related questions. Works completely offline."
)

# Launch the Gradio app
if __name__ == "__main__":
    iface.launch()