herMaster commited on
Commit
ddb7d13
β€’
1 Parent(s): 02dfb55

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -159
app.py DELETED
@@ -1,159 +0,0 @@
1
- import gradio as gr
2
- from qdrant_client import models, QdrantClient
3
- from sentence_transformers import SentenceTransformer
4
- from PyPDF2 import PdfReader
5
- from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain.callbacks.manager import CallbackManager
7
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
8
- # from langchain.llms import LlamaCpp
9
- from langchain.vectorstores import Qdrant
10
- from qdrant_client.http import models
11
- # from langchain.llms import CTransformers
12
- from ctransformers import AutoModelForCausalLM
13
-
14
-
15
- # loading the embedding model -
16
-
17
- encoder = SentenceTransformer('jinaai/jina-embedding-b-en-v1')
18
-
19
- print("embedding model loaded.............................")
20
- print("####################################################")
21
-
22
- # loading the LLM
23
-
24
- callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
25
-
26
- print("loading the LLM......................................")
27
-
28
- # llm = LlamaCpp(
29
- # model_path="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q8_0.gguf",
30
- # n_ctx=2048,
31
- # f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
32
- # callback_manager=callback_manager,
33
- # verbose=True,
34
- # )
35
-
36
- llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-Chat-GGUF",
37
- model_file="llama-2-7b-chat.Q3_K_L.gguf",
38
- model_type="llama",
39
- temperature = 0.2,
40
- repetition_penalty = 1.5
41
- )
42
-
43
-
44
-
45
- print("LLM loaded........................................")
46
- print("################################################################")
47
-
48
- def get_chunks(text):
49
- text_splitter = RecursiveCharacterTextSplitter(
50
- # seperator = "\n",
51
- chunk_size = 250,
52
- chunk_overlap = 50,
53
- length_function = len,
54
- )
55
-
56
- chunks = text_splitter.split_text(text)
57
- return chunks
58
-
59
-
60
- pdf_path = './100 Weird Facts About the Human Body.pdf'
61
-
62
-
63
- reader = PdfReader(pdf_path)
64
- text = ""
65
- num_of_pages = len(reader.pages)
66
-
67
- for page in range(num_of_pages):
68
- current_page = reader.pages[page]
69
- text += current_page.extract_text()
70
-
71
-
72
- chunks = get_chunks(text)
73
- print(chunks)
74
- print("Chunks are ready.....................................")
75
- print("######################################################")
76
-
77
- client = QdrantClient(path = "./db")
78
- print("db created................................................")
79
- print("#####################################################################")
80
-
81
- client.recreate_collection(
82
- collection_name="my_facts",
83
- vectors_config=models.VectorParams(
84
- size=encoder.get_sentence_embedding_dimension(), # Vector size is defined by used model
85
- distance=models.Distance.COSINE,
86
- ),
87
- )
88
-
89
- print("Collection created........................................")
90
- print("#########################################################")
91
-
92
-
93
-
94
- li = []
95
- for i in range(len(chunks)):
96
- li.append(i)
97
-
98
- dic = zip(li, chunks)
99
- dic= dict(dic)
100
-
101
- client.upload_records(
102
- collection_name="my_facts",
103
- records=[
104
- models.Record(
105
- id=idx,
106
- vector=encoder.encode(dic[idx]).tolist(),
107
- payload= {dic[idx][:5] : dic[idx]}
108
- ) for idx in dic.keys()
109
- ],
110
- )
111
-
112
- print("Records uploaded........................................")
113
- print("###########################################################")
114
-
115
- def chat(question):
116
-
117
- hits = client.search(
118
- collection_name="my_facts",
119
- query_vector=encoder.encode(question).tolist(),
120
- limit=3
121
- )
122
- context = []
123
- for hit in hits:
124
- context.append(list(hit.payload.values())[0])
125
-
126
- context = context[0] + context[1] + context[2]
127
-
128
- system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
129
- Read the given context before answering questions and think step by step. If you can not answer a user question based on
130
- the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
131
-
132
-
133
- B_INST, E_INST = "[INST]", "[/INST]"
134
-
135
- B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
136
-
137
- SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
138
-
139
- instruction = f"""
140
- Context: {context}
141
- User: {question}"""
142
-
143
- prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
144
-
145
- result = llm(prompt_template)
146
- return result
147
-
148
-
149
- screen = gr.Interface(
150
- fn = chat,
151
- inputs = gr.Textbox(lines = 10, placeholder = "Enter your question here πŸ‘‰"),
152
- outputs = gr.Textbox(lines = 10, placeholder = "Your answer will be here soon πŸš€"),
153
- title="Q&N with PDF πŸ‘©πŸ»β€πŸ’»πŸ““βœπŸ»πŸ’‘",
154
- description="This app facilitates a conversation with PDFs available on https://www.delo.si/assets/media/other/20110728/100%20Weird%20Facts%20About%20the%20Human%20Body.pdfπŸ’‘",
155
- theme="soft",
156
- # examples=["Hello", "what is the speed of human nerve impulses?"],
157
- )
158
-
159
- screen.launch()