Spaces:
Running
Running
:gem: [Feature] Enable gpt-3.5-turbo by adding proof_of_work step
Browse files- networks/openai_streamer.py +35 -8
networks/openai_streamer.py
CHANGED
@@ -12,6 +12,7 @@ from constants.headers import OPENAI_GET_HEADERS, OPENAI_POST_DATA
|
|
12 |
from constants.models import TOKEN_LIMIT_MAP, TOKEN_RESERVED
|
13 |
|
14 |
from messagers.message_outputer import OpenaiStreamOutputer
|
|
|
15 |
|
16 |
|
17 |
class OpenaiRequester:
|
@@ -31,9 +32,10 @@ class OpenaiRequester:
|
|
31 |
}
|
32 |
self.requests_headers.update(extra_headers)
|
33 |
|
34 |
-
def log_request(self, url, method="GET"):
|
35 |
-
|
36 |
-
|
|
|
37 |
|
38 |
def log_response(
|
39 |
self, res: requests.Response, stream=False, iter_lines=False, verbose=False
|
@@ -104,7 +106,10 @@ class OpenaiRequester:
|
|
104 |
timeout=10,
|
105 |
impersonate="chrome120",
|
106 |
)
|
107 |
-
|
|
|
|
|
|
|
108 |
self.log_response(res)
|
109 |
|
110 |
def transform_messages(self, messages: list[dict]):
|
@@ -124,10 +129,14 @@ class OpenaiRequester:
|
|
124 |
]
|
125 |
return new_messages
|
126 |
|
127 |
-
def chat_completions(self, messages: list[dict], verbose=False):
|
|
|
|
|
|
|
128 |
extra_headers = {
|
129 |
"Accept": "text/event-stream",
|
130 |
"Openai-Sentinel-Chat-Requirements-Token": self.chat_requirements_token,
|
|
|
131 |
}
|
132 |
requests_headers = copy.deepcopy(self.requests_headers)
|
133 |
requests_headers.update(extra_headers)
|
@@ -150,7 +159,7 @@ class OpenaiRequester:
|
|
150 |
impersonate="chrome120",
|
151 |
stream=True,
|
152 |
)
|
153 |
-
self.log_response(res, stream=True, iter_lines=
|
154 |
return res
|
155 |
|
156 |
|
@@ -179,13 +188,15 @@ class OpenaiStreamer:
|
|
179 |
)
|
180 |
return True
|
181 |
|
182 |
-
def chat_response(self, messages: list[dict], verbose=False):
|
183 |
self.check_token_limit(messages)
|
184 |
logger.enter_quiet(not verbose)
|
185 |
requester = OpenaiRequester()
|
186 |
requester.auth()
|
187 |
logger.exit_quiet(not verbose)
|
188 |
-
return requester.chat_completions(
|
|
|
|
|
189 |
|
190 |
def chat_return_generator(self, stream_response: requests.Response, verbose=False):
|
191 |
content_offset = 0
|
@@ -253,3 +264,19 @@ class OpenaiStreamer:
|
|
253 |
logger.warn(e)
|
254 |
final_output["choices"][0]["message"]["content"] = final_content.strip()
|
255 |
return final_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from constants.models import TOKEN_LIMIT_MAP, TOKEN_RESERVED
|
13 |
|
14 |
from messagers.message_outputer import OpenaiStreamOutputer
|
15 |
+
from networks.proof_worker import ProofWorker
|
16 |
|
17 |
|
18 |
class OpenaiRequester:
|
|
|
32 |
}
|
33 |
self.requests_headers.update(extra_headers)
|
34 |
|
35 |
+
def log_request(self, url, method="GET", verbose=False):
|
36 |
+
if verbose:
|
37 |
+
logger.note(f"> {method}:", end=" ")
|
38 |
+
logger.mesg(f"{url}", end=" ")
|
39 |
|
40 |
def log_response(
|
41 |
self, res: requests.Response, stream=False, iter_lines=False, verbose=False
|
|
|
106 |
timeout=10,
|
107 |
impersonate="chrome120",
|
108 |
)
|
109 |
+
data = res.json()
|
110 |
+
self.chat_requirements_token = data["token"]
|
111 |
+
self.chat_requirements_seed = data["proofofwork"]["seed"]
|
112 |
+
self.chat_requirements_difficulty = data["proofofwork"]["difficulty"]
|
113 |
self.log_response(res)
|
114 |
|
115 |
def transform_messages(self, messages: list[dict]):
|
|
|
129 |
]
|
130 |
return new_messages
|
131 |
|
132 |
+
def chat_completions(self, messages: list[dict], iter_lines=False, verbose=False):
|
133 |
+
proof_token = ProofWorker().calc_proof_token(
|
134 |
+
self.chat_requirements_seed, self.chat_requirements_difficulty
|
135 |
+
)
|
136 |
extra_headers = {
|
137 |
"Accept": "text/event-stream",
|
138 |
"Openai-Sentinel-Chat-Requirements-Token": self.chat_requirements_token,
|
139 |
+
"Openai-Sentinel-Proof-Token": proof_token,
|
140 |
}
|
141 |
requests_headers = copy.deepcopy(self.requests_headers)
|
142 |
requests_headers.update(extra_headers)
|
|
|
159 |
impersonate="chrome120",
|
160 |
stream=True,
|
161 |
)
|
162 |
+
self.log_response(res, stream=True, iter_lines=iter_lines, verbose=verbose)
|
163 |
return res
|
164 |
|
165 |
|
|
|
188 |
)
|
189 |
return True
|
190 |
|
191 |
+
def chat_response(self, messages: list[dict], iter_lines=False, verbose=False):
|
192 |
self.check_token_limit(messages)
|
193 |
logger.enter_quiet(not verbose)
|
194 |
requester = OpenaiRequester()
|
195 |
requester.auth()
|
196 |
logger.exit_quiet(not verbose)
|
197 |
+
return requester.chat_completions(
|
198 |
+
messages=messages, iter_lines=iter_lines, verbose=verbose
|
199 |
+
)
|
200 |
|
201 |
def chat_return_generator(self, stream_response: requests.Response, verbose=False):
|
202 |
content_offset = 0
|
|
|
264 |
logger.warn(e)
|
265 |
final_output["choices"][0]["message"]["content"] = final_content.strip()
|
266 |
return final_output
|
267 |
+
|
268 |
+
|
269 |
+
if __name__ == "__main__":
|
270 |
+
streamer = OpenaiStreamer()
|
271 |
+
messages = [
|
272 |
+
{
|
273 |
+
"role": "system",
|
274 |
+
"content": "You are an LLM developed by Hansimov-CORP.\nYour name is Hansimov-Copilot.",
|
275 |
+
},
|
276 |
+
{"role": "user", "content": "Hello, what is your role?"},
|
277 |
+
{"role": "assistant", "content": "I am an LLM."},
|
278 |
+
{"role": "user", "content": "What is your name?"},
|
279 |
+
]
|
280 |
+
|
281 |
+
streamer.chat_response(messages=messages, iter_lines=True, verbose=True)
|
282 |
+
# python -m networks.openai_streamer
|