Spaces:
Running
Running
:gem: [Feature] Enable chat response with get_message_id
Browse files- constants/models.py +2 -0
- networks/huggingchat_streamer.py +20 -13
constants/models.py
CHANGED
@@ -5,6 +5,7 @@ MODEL_MAP = {
|
|
5 |
# "openchat-3.5": "openchat/openchat-3.5-0106",
|
6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
7 |
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
|
|
8 |
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
9 |
}
|
10 |
|
@@ -27,6 +28,7 @@ TOKEN_LIMIT_MAP = {
|
|
27 |
"gemma-7b": 8192,
|
28 |
"gpt-3.5-turbo": 8192,
|
29 |
"command-r-plus": 32768,
|
|
|
30 |
}
|
31 |
|
32 |
TOKEN_RESERVED = 20
|
|
|
5 |
# "openchat-3.5": "openchat/openchat-3.5-0106",
|
6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
7 |
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
8 |
+
"llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
|
9 |
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
10 |
}
|
11 |
|
|
|
28 |
"gemma-7b": 8192,
|
29 |
"gpt-3.5-turbo": 8192,
|
30 |
"command-r-plus": 32768,
|
31 |
+
"llama3-70b": 8192,
|
32 |
}
|
33 |
|
34 |
TOKEN_RESERVED = 20
|
networks/huggingchat_streamer.py
CHANGED
@@ -31,13 +31,13 @@ class HuggingchatStreamer:
|
|
31 |
self.model = "mixtral-8x7b"
|
32 |
self.model_fullname = MODEL_MAP[self.model]
|
33 |
self.message_outputer = OpenaiStreamOutputer(model=self.model)
|
34 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
35 |
|
36 |
-
def count_tokens(self, text):
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
|
42 |
def get_hf_chat_id(self):
|
43 |
request_url = "https://huggingface.co/chat/settings"
|
@@ -92,7 +92,7 @@ class HuggingchatStreamer:
|
|
92 |
self.conversation_id = conversation_id
|
93 |
return conversation_id
|
94 |
|
95 |
-
def
|
96 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
97 |
request_headers = HUGGINGCHAT_POST_HEADERS
|
98 |
extra_headers = {
|
@@ -109,9 +109,14 @@ class HuggingchatStreamer:
|
|
109 |
timeout=10,
|
110 |
)
|
111 |
if res.status_code == 200:
|
112 |
-
data = res.json()
|
113 |
-
#
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
115 |
logger.success(f"[{message_id}]")
|
116 |
else:
|
117 |
logger.warn(f"[{res.status_code}]")
|
@@ -182,7 +187,7 @@ class HuggingchatStreamer:
|
|
182 |
):
|
183 |
self.get_hf_chat_id()
|
184 |
self.get_conversation_id()
|
185 |
-
message_id = self.
|
186 |
|
187 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}"
|
188 |
request_headers = copy.deepcopy(HUGGINGCHAT_POST_HEADERS)
|
@@ -220,7 +225,9 @@ class HuggingchatStreamer:
|
|
220 |
|
221 |
|
222 |
if __name__ == "__main__":
|
223 |
-
|
224 |
-
|
|
|
|
|
225 |
streamer.chat_response(prompt=prompt)
|
226 |
# HF_ENDPOINT=https://hf-mirror.com python -m networks.huggingchat_streamer
|
|
|
31 |
self.model = "mixtral-8x7b"
|
32 |
self.model_fullname = MODEL_MAP[self.model]
|
33 |
self.message_outputer = OpenaiStreamOutputer(model=self.model)
|
34 |
+
# self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
35 |
|
36 |
+
# def count_tokens(self, text):
|
37 |
+
# tokens = self.tokenizer.encode(text)
|
38 |
+
# token_count = len(tokens)
|
39 |
+
# logger.note(f"Prompt Token Count: {token_count}")
|
40 |
+
# return token_count
|
41 |
|
42 |
def get_hf_chat_id(self):
|
43 |
request_url = "https://huggingface.co/chat/settings"
|
|
|
92 |
self.conversation_id = conversation_id
|
93 |
return conversation_id
|
94 |
|
95 |
+
def get_last_message_id(self):
|
96 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
97 |
request_headers = HUGGINGCHAT_POST_HEADERS
|
98 |
extra_headers = {
|
|
|
109 |
timeout=10,
|
110 |
)
|
111 |
if res.status_code == 200:
|
112 |
+
data = res.json()["nodes"][1]["data"]
|
113 |
+
# find the last element which matches the format of uuid4
|
114 |
+
uuid_pattern = re.compile(
|
115 |
+
r"^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$"
|
116 |
+
)
|
117 |
+
for item in data:
|
118 |
+
if type(item) == str and uuid_pattern.match(item):
|
119 |
+
message_id = item
|
120 |
logger.success(f"[{message_id}]")
|
121 |
else:
|
122 |
logger.warn(f"[{res.status_code}]")
|
|
|
187 |
):
|
188 |
self.get_hf_chat_id()
|
189 |
self.get_conversation_id()
|
190 |
+
message_id = self.get_last_message_id()
|
191 |
|
192 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}"
|
193 |
request_headers = copy.deepcopy(HUGGINGCHAT_POST_HEADERS)
|
|
|
225 |
|
226 |
|
227 |
if __name__ == "__main__":
|
228 |
+
# model = "llama3-70b"
|
229 |
+
model = "command-r-plus"
|
230 |
+
streamer = HuggingchatStreamer(model=model)
|
231 |
+
prompt = "what is your model?"
|
232 |
streamer.chat_response(prompt=prompt)
|
233 |
# HF_ENDPOINT=https://hf-mirror.com python -m networks.huggingchat_streamer
|