khadija3818 commited on
Commit
173e585
1 Parent(s): e4a399b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -19
app.py CHANGED
@@ -1,26 +1,20 @@
1
- from flask import Flask, request, render_template
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
- import torch
4
 
5
  app = Flask(__name__)
6
 
7
- # Load the pre-trained model and tokenizer
8
- model_name = "https://huggingface.co/atreyuNew/medical_question_answering_chat_Llama2" # Change this to your desired model
9
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
 
12
- @app.route('/')
13
- def home():
14
- return render_template('index.html')
 
15
 
16
- @app.route('/predict', methods=['POST'])
17
- def predict():
18
- text = request.form['text']
19
- inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
20
- outputs = model(**inputs)
21
- probabilities = torch.softmax(outputs.logits, dim=1).tolist()[0]
22
 
23
- return render_template('index.html', text=text, probabilities=probabilities)
24
 
25
- if __name__ == '__main__':
26
- app.run()
 
1
+ from transformers import pipeline
2
+ from flask import Flask, request, jsonify
 
3
 
4
  app = Flask(__name__)
5
 
6
+ # Load the model from Hugging Face Spaces
7
+ qa_pipeline = pipeline(model="atreyuNew/medical_question_answering_chat_Llama2")
 
 
8
 
9
+ @app.route("/ask", methods=["POST"])
10
+ def ask_question():
11
+ data = request.get_json()
12
+ question = data.get("question")
13
 
14
+ # Use the model to answer the question
15
+ answer = qa_pipeline(question)
 
 
 
 
16
 
17
+ return jsonify({"answer": answer[0]["answer"]})
18
 
19
+ if __name__ == "__main__":
20
+ app.run(host="0.0.0.0", port=8080)