vermen commited on
Commit
15ed53f
1 Parent(s): 061ed1d

Update app.y

Browse files
Files changed (1) hide show
  1. app.py +69 -38
app.py CHANGED
@@ -1,19 +1,67 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
 
9
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
  system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
 
@@ -22,43 +70,26 @@ def respond(
22
  messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
  """
44
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
60
  )
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+ from llama_index.llms.llama_cpp import LlamaCPP
2
+ from llama_index.core import VectorStoreIndex, StorageContext
3
+ from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
4
+ import pymongo
5
+ from pymongo.mongo_client import MongoClient
6
+ from pymongo.operations import SearchIndexModel
7
+ from llama_index.core import VectorStoreIndex, StorageContext
8
+ import os
9
+ ###### load LLM
10
+ model_url = "https://huggingface.co/georgesung/llama3_8b_chat_uncensored/resolve/main/llama3_8b_chat_uncensored_q4_0.gguf"
11
 
12
+ llm = LlamaCPP(
13
+ # You can pass in the URL to a GGML model to download it automatically
14
+ model_url=model_url,
15
+ # optionally, you can set the path to a pre-downloaded model instead of model_url
16
+ model_path=None,
17
+ temperature=0.01,
18
+ max_new_tokens=1024,
19
+ # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
20
+ context_window=3900,
21
+ # kwargs to pass to __call__()
22
+ generate_kwargs={},
23
+ # kwargs to pass to __init__()
24
+ # set to at least 1 to use GPU
25
+ model_kwargs={"n_gpu_layers": 1},
26
+ verbose=True,
27
+ )
28
+ # load embedding model
29
+ # sentence transformers
30
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
31
+
32
+ from llama_index.core.node_parser import SentenceSplitter
33
+ from llama_index.core import Settings
34
+
35
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
36
+ Settings.llm = llm
37
+ Settings.embed_model = embed_model
38
+ Settings.node_parser = SentenceSplitter(chunk_size=1024)
39
+ Settings.num_output = 256
40
+ Settings.context_window = 3900
41
+
42
+ # Load vector database
43
+
44
+ MONGO_URI = "mongodb+srv://groverorgrf:[email protected]/?retryWrites=true&w=majority&appName=Cluster0"
45
+ os.environ["MONGODB_URI"] = MONGO_URI
46
+ DB_NAME = "neuroRAG"
47
+ COLLECTION_NAME = "neuro_books"
48
+ # Connect to your Atlas deployment
49
+ mongo_client = MongoClient(MONGO_URI)
50
+ collection = mongo_client[DB_NAME][COLLECTION_NAME]
51
+ #
52
+
53
+ vector_store = MongoDBAtlasVectorSearch(mongo_client, db_name=DB_NAME, collection_name=COLLECTION_NAME, vector_index_name="default")
54
 
55
+ # Recover index
56
+ index = VectorStoreIndex.from_vector_store(vector_store)
57
 
58
+
59
+ ########### FOR CHAT
60
  def respond(
61
  message,
62
  history: list[tuple[str, str]],
63
  system_message,
64
+ top_k,
 
 
65
  ):
66
  messages = [{"role": "system", "content": system_message}]
67
 
 
70
  messages.append({"role": "user", "content": val[0]})
71
  if val[1]:
72
  messages.append({"role": "assistant", "content": val[1]})
73
+ #
74
+ # build the query engine
75
+ query_engine = index.as_query_engine(similarity_top_k=top_k)
76
+ #
77
+ query_str = message
78
+ response = query_engine.query(query_str)
79
+ #
80
+ return response
81
+ #
 
 
 
 
 
 
 
 
 
82
  """
83
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
84
  """
85
  demo = gr.ChatInterface(
86
  respond,
87
  additional_inputs=[
88
+ gr.Textbox(value="Qual é sua pergunta?", label="System message"),
89
+ gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k"),
 
 
 
 
 
 
 
 
90
  ],
91
  )
92
 
93
 
94
  if __name__ == "__main__":
95
+ demo.launch()