File size: 4,494 Bytes
9f54a3b
 
 
 
 
dab5cc9
 
9f54a3b
 
 
 
 
 
 
 
dab5cc9
9f54a3b
 
 
 
 
 
 
 
094183d
 
 
3b86e70
9f54a3b
 
 
0ca86ba
 
 
 
 
 
094183d
0ca86ba
 
 
094183d
 
 
 
 
 
 
 
 
 
0ca86ba
 
9f54a3b
 
 
 
 
 
 
 
0ca86ba
 
 
 
9f54a3b
 
 
 
 
 
ac02b56
 
9f54a3b
 
 
be9d4b9
9f54a3b
 
 
 
 
 
 
 
 
 
 
 
 
 
3bb7e5d
9f54a3b
 
 
 
 
 
 
 
 
 
 
 
 
be9d4b9
9f54a3b
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import streamlit as st
from openai import OpenAI
import os
import sys
from langchain.callbacks import StreamlitCallbackHandler
from dotenv import load_dotenv, dotenv_values
load_dotenv()





# initialize the client but point it to TGI
client = OpenAI(
  base_url="https://api-inference.huggingface.co/v1",
  api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
) 




#Create supported models
model_links ={
    "Mistral":"mistralai/Mistral-7B-Instruct-v0.2",
    "Gemma-7B":"google/gemma-7b-it",
    "Gemma-2B":"google/gemma-2b-it",
    "Gemma-Zephyr":"HuggingFaceH4/zephyr-7b-gemma-v0.1",
    # "Llama-2":"meta-llama/Llama-2-7b-chat-hf"

}

#Pull info about the model to display
model_info ={
    "Mistral":
        {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over  **7 billion parameters.** \n""",
        'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
    "Gemma-7B":        
        {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over  **7 billion parameters.** \n""",
        'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
    "Gemma-2B":        
    {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
        \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over  **2 billion parameters.** \n""",
    'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
    "Gemma-Zephyr":        
    {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
        \nFrom Huggingface: Zephyr is a series of language models that are trained to act as helpful assistants. \
        Zephyr 7B Gemma is the third model in the series, and is a fine-tuned version of google/gemma-7b \
        that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
    'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},

}


# Define the available models
models =[key for key in model_links.keys()]

# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)

# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])


#Pull in the model we want to use
repo_id = model_links[selected_model]


st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')

# Set a default model
if selected_model not in st.session_state:
    st.session_state[selected_model] = model_links[selected_model] 

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []


# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])



# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):

    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})


    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st_callback = StreamlitCallbackHandler(st.container())

        stream = client.chat.completions.create(
            model=model_links[selected_model],
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            temperature=0.5,
            stream=True,
            max_tokens=3000,
        )

        response = st.write_stream(stream)
    st.session_state.messages.append({"role": "assistant", "content": response})