File size: 4,335 Bytes
7f46a81
 
 
 
 
 
 
 
 
adf3dc3
7f46a81
d26ed68
7f46a81
 
673067b
0aa3b05
 
 
 
 
 
 
 
 
 
 
 
 
 
673067b
0aa3b05
 
7f46a81
 
 
 
 
 
df6921d
7f46a81
 
 
 
 
 
347c81e
7f46a81
 
 
 
d26ed68
7f46a81
 
d26ed68
 
7f46a81
84cbd45
 
 
 
d26ed68
 
 
 
7f46a81
84cbd45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d26ed68
84cbd45
 
 
 
d26ed68
 
84cbd45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d26ed68
7f46a81
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import sys
import toml
from omegaconf import OmegaConf
from query import VectaraQuery
import os

import streamlit as st
from PIL import Image


def launch_bot():
    def generate_response(question):
        response = vq.submit_query(question)
        return response

    if 'cfg' not in st.session_state:
        corpus_ids = str(os.environ['corpus_ids']).split(',')
        questions = list(eval(os.environ['examples']))
        cfg = OmegaConf.create({
            'customer_id': str(os.environ['customer_id']),
            'corpus_ids': corpus_ids,
            'api_key': str(os.environ['api_key']),
            'title': os.environ['title'],
            'description': os.environ['description'],
            'examples': questions,
            'source_data_desc': os.environ['source_data_desc']
        })
        st.session_state.cfg = cfg
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids)

    cfg = st.session_state.cfg
    vq = st.session_state.vq
    st.set_page_config(page_title=cfg.title, layout="wide")

    # left side content
    with st.sidebar:
        image = Image.open('Vectara-logo.png')
        st.markdown(f"## Welcome to {cfg.title}\n\n"
                    f"With this demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n"
            "Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
            "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
        )
        st.markdown("---")
        st.image(image, width=250)

    st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
    st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)

    if "messages" not in st.session_state.keys():
        st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]

    if "messages" not in st.session_state:
        st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
        st.session_state.first_round = True
    
    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.write(message["content"])

    if st.session_state.first_round:
        st.markdown("**Example questions:**")
        for example in cfg.examples:
            if st.button(example):
                prompt = example
                st.session_state.first_round = False
                process_prompt(prompt)
    else:
        if prompt := st.chat_input("Ask me anything..."):
            process_prompt(prompt)  # Process the user-provided prompt
            
        
    # Display chat messages
#    for message in st.session_state.messages:
#        with st.chat_message(message["role"]):
#            st.write(message["content"])

    # User-provided prompt
#    if prompt := st.chat_input():
#        st.session_state.messages.append({"role": "user", "content": prompt})
#        with st.chat_message("user"):
#            st.write(prompt)
    
    # Generate a new response if last message is not from assistant
#    if st.session_state.messages[-1]["role"] != "assistant":
#        with st.chat_message("assistant"):
#            with st.spinner("Thinking..."):
#                response = generate_response(prompt) 
#                st.write(response) 
#        message = {"role": "assistant", "content": response}
#        st.session_state.messages.append(message)


def process_prompt(prompt):
    # Your logic to process the prompt and generate response
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("assistant"):
        with st.spinner("Thinking..."):
            response = generate_response(prompt) 
            st.write(response) 
    message = {"role": "assistant", "content": response}
    st.session_state.messages.append(message)
    
if __name__ == "__main__":
    launch_bot()