szili2011 commited on
Commit
a324d35
1 Parent(s): 0440734

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -46
app.py CHANGED
@@ -1,52 +1,38 @@
1
- import gradio as gr
2
- import tensorflow as tf
3
- import numpy as np
4
  import pickle
 
 
 
 
 
5
 
6
- # Paths to model and tokenizer
7
- model_path = "doctor_ai_model.h5"
8
- tokenizer_path = "tokenizer.pkl"
9
- label_encoder_path = "label_encoder.pkl"
10
-
11
- # Load the trained model
12
- try:
13
- model = tf.keras.models.load_model(model_path)
14
- except Exception as e:
15
- print(f"Error loading model: {e}")
16
-
17
- # Load the tokenizer and label encoder
18
- with open(tokenizer_path, 'rb') as handle:
19
- tokenizer = pickle.load(handle)
20
 
21
- with open(label_encoder_path, 'rb') as handle:
22
- label_encoder = pickle.load(handle)
23
 
24
- # Define the prediction function
25
- def predict_answer(question):
26
- try:
27
- # Tokenize the input question
28
- seq = tokenizer.texts_to_sequences([question])
29
- padded_seq = tf.keras.preprocessing.sequence.pad_sequences(seq, maxlen=27) # Adjust maxlen to match your model
30
-
31
- # Make prediction
32
- prediction = model.predict(padded_seq)
33
-
34
- # Convert prediction to label
35
- predicted_label = label_encoder.inverse_transform(np.argmax(prediction, axis=1))
36
-
37
- return predicted_label[0]
38
- except Exception as e:
39
- return f"Error during prediction: {e}"
40
 
41
- # Define the Gradio interface
42
- iface = gr.Interface(
43
- fn=predict_answer,
44
- inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question..."),
45
- outputs="text",
46
- title="Doctor AI Chatbot",
47
- description="This AI chatbot provides answers based on your medical-related questions. Works completely offline."
48
- )
 
 
 
 
 
 
 
 
49
 
50
- # Launch the Gradio app
51
- if __name__ == "__main__":
52
- iface.launch()
 
 
 
 
1
  import pickle
2
+ import tensorflow as tf
3
+ from tensorflow import keras
4
+ from fastapi import FastAPI
5
+ from pydantic import BaseModel
6
+ import uvicorn
7
 
8
+ # Load the model
9
+ model_path = 'doctor_ai_model.h5'
10
+ model = keras.models.load_model(model_path)
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Create FastAPI app
13
+ app = FastAPI()
14
 
15
+ # Define the request model
16
+ class InputData(BaseModel):
17
+ input_data: list
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # Define the prediction endpoint
20
+ @app.post('/predict')
21
+ async def predict(data: InputData):
22
+ # Prepare input data
23
+ input_array = tf.convert_to_tensor(data.input_data)
24
+
25
+ # Check if input shape matches the model's input shape
26
+ expected_shape = (None, 27)
27
+ if input_array.shape[1] != expected_shape[1]:
28
+ return {'error': f'Input data must have shape: {expected_shape}'}
29
+
30
+ # Make a prediction
31
+ prediction = model.predict(tf.expand_dims(input_array, axis=0)) # Expand dims to match batch size
32
+ predicted_class = tf.argmax(prediction, axis=1).numpy().tolist()
33
+
34
+ return {'predicted_class': predicted_class}
35
 
36
+ # Start the FastAPI server (this will run offline)
37
+ if __name__ == '__main__':
38
+ uvicorn.run(app, host='127.0.0.1', port=8000) # Use localhost for offline mode