Spaces:
Sleeping
Sleeping
abnerguzman
commited on
Commit
•
6faab44
1
Parent(s):
f8fe9c9
Update app.py
Browse files
app.py
CHANGED
@@ -1,57 +1,46 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
|
6 |
from pinecone import Pinecone, ServerlessSpec
|
7 |
-
pc = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))
|
8 |
|
|
|
|
|
9 |
|
10 |
from llama_index.vector_stores.pinecone import PineconeVectorStore
|
11 |
from llama_index.core import VectorStoreIndex
|
12 |
-
from llama_index.core.response.pprint_utils import pprint_source_node
|
13 |
-
from llama_index.llms.octoai import OctoAI
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
max_tokens=512,
|
19 |
temperature=0.1,
|
20 |
)
|
21 |
|
22 |
-
|
23 |
-
from llama_index.core.memory import ChatMemoryBuffer
|
24 |
-
|
25 |
import gradio as gr
|
26 |
-
from
|
27 |
-
|
28 |
-
def get_credit_dist(history):
|
29 |
-
_out = StringIO()
|
30 |
-
print("Disabled momentarily...", file=_out)
|
31 |
-
|
32 |
-
return _out.getvalue()
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
with gr.Blocks() as demo:
|
36 |
-
chatbot = gr.Chatbot(height=
|
37 |
msg = gr.Textbox()
|
38 |
clear = gr.Button("Clear")
|
39 |
|
40 |
-
credit_box = gr.Textbox(label="Credit distribution", lines=20, autoscroll=False)
|
41 |
-
credit_btn = gr.Button("Credit response")
|
42 |
-
|
43 |
-
def get_chat_engine():
|
44 |
-
vector_store = PineconeVectorStore(pinecone_index=pc.Index('prorata-postman-ds-256'))
|
45 |
-
vindex = VectorStoreIndex.from_vector_store(vector_store)
|
46 |
-
|
47 |
-
memory = ChatMemoryBuffer.from_defaults(token_limit=5000)
|
48 |
-
return vindex.as_chat_engine(
|
49 |
-
chat_mode="context",
|
50 |
-
llm=octoai,
|
51 |
-
memory=memory,
|
52 |
-
system_prompt="You are a chatbot, able to have normal interactions, as well as talk about news events provided in the context of the conversation.",
|
53 |
-
)
|
54 |
-
|
55 |
chat_engine_var = gr.State(get_chat_engine)
|
56 |
|
57 |
def user(user_message, history):
|
@@ -67,8 +56,6 @@ with gr.Blocks() as demo:
|
|
67 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, [chatbot, chat_engine_var], chatbot)
|
68 |
clear.click(lambda x: x.reset(), chat_engine_var, chatbot, queue=False)
|
69 |
|
70 |
-
credit_btn.click(get_credit_dist, chatbot, credit_box)
|
71 |
-
|
72 |
if __name__ == "__main__":
|
73 |
demo.queue()
|
74 |
-
demo.launch()
|
|
|
1 |
+
from langchain_voyageai import VoyageAIEmbeddings
|
2 |
+
embed_model = VoyageAIEmbeddings(
|
3 |
+
voyage_api_key=os.getenv('VOYAGE_API_KEY'), model="voyage-large-2-instruct", # input_type="document",
|
4 |
+
)
|
5 |
|
6 |
from pinecone import Pinecone, ServerlessSpec
|
|
|
7 |
|
8 |
+
pc = Pinecone(api_key=os.getenv('PINECONE_API_KEY2'))
|
9 |
+
pc_256 = pc.Index('subset1-voyage-large-2-instruct-cs256')
|
10 |
|
11 |
from llama_index.vector_stores.pinecone import PineconeVectorStore
|
12 |
from llama_index.core import VectorStoreIndex
|
|
|
|
|
13 |
|
14 |
+
vector_store = PineconeVectorStore(pinecone_index=pc_256)
|
15 |
+
vindex = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
16 |
+
|
17 |
+
from llama_index.llms.fireworks import Fireworks
|
18 |
+
|
19 |
+
fireworks_model = 'accounts/fireworks/models/llama-v3-70b-instruct'
|
20 |
+
llm = Fireworks(
|
21 |
+
api_key=os.getenv('FIREWORKS_API_KEY'),
|
22 |
+
model=fireworks_model,
|
23 |
max_tokens=512,
|
24 |
temperature=0.1,
|
25 |
)
|
26 |
|
|
|
|
|
|
|
27 |
import gradio as gr
|
28 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
def get_chat_engine():
|
31 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=5000)
|
32 |
+
return vindex.as_chat_engine(
|
33 |
+
chat_mode="context",
|
34 |
+
llm=llm,
|
35 |
+
memory=memory,
|
36 |
+
system_prompt="You are a chatbot, able to have normal interactions, as well as talk about news events.",
|
37 |
+
)
|
38 |
|
39 |
with gr.Blocks() as demo:
|
40 |
+
chatbot = gr.Chatbot(height="80vh")
|
41 |
msg = gr.Textbox()
|
42 |
clear = gr.Button("Clear")
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
chat_engine_var = gr.State(get_chat_engine)
|
45 |
|
46 |
def user(user_message, history):
|
|
|
56 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, [chatbot, chat_engine_var], chatbot)
|
57 |
clear.click(lambda x: x.reset(), chat_engine_var, chatbot, queue=False)
|
58 |
|
|
|
|
|
59 |
if __name__ == "__main__":
|
60 |
demo.queue()
|
61 |
+
demo.launch()
|