SantiagoPG's picture
updated app.py
53bacd1
raw
history blame contribute delete
765 Bytes
import streamlit as st
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
# Load the model for inference
model1 = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service')
tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2")
def get_chatbot_response(message):
inputs = tokenizer.encode(message, return_tensors='pt')
reply_ids = model1.generate(inputs)
return tokenizer.decode(reply_ids[0], skip_special_tokens=True)
# Streamlit interface
st.title("Customer Service Chatbot")
user_input = st.text_input("Type your question here:")
if user_input:
response = get_chatbot_response(user_input)
st.text_area("Response", value=response, height=100, max_chars=None, key=None)