import streamlit as st from langchain import HuggingFaceHub, PromptTemplate, LLMChain from my_keys import hf_token # Initialize the HuggingFace model h_api = "hf_token" # Replace with your actual API token model = HuggingFaceHub(huggingfacehub_api_token=h_api, repo_id='meta-llama/Meta-Llama-3-8B-Instruct', model_kwargs={'temperature': 0.6, "max_new_tokens": 600}) # Define the prompt template template = """you are helpful ai chatbot answer the user questions in a polite manner {question}""" prompt = PromptTemplate(template=template, input_variables=['question']) # Initialize the LLMChain llm = LLMChain(llm=model, prompt=prompt, verbose=True) # Initialize session state for messages if not already initialized if "messages" not in st.session_state: st.session_state.messages = [] # Function to generate a response from the model def get_response(question): return llm.run({"question": question}) # Streamlit app layout st.title("Chat with AI") # Display existing chat messages for message in st.session_state.messages: st.write(f"{message['role']}: {message['content']}") # User input for new question user_input = st.text_input("Your message:", "") # When user submits a question if st.button("Send") and user_input: # Add user's message to session state st.session_state.messages.append({"role": "User", "content": user_input}) # Get AI response response = get_response(user_input) # Add AI's response to session state st.session_state.messages.append({"role": "AI", "content": response}) # Clear the input field st.text_input("Your message:", "", key="input") # Refresh the page to show new messages st.experimental_rerun()