Spaces:
Running
Running
File size: 6,815 Bytes
9f341cc 3f608c6 3125c87 babcd78 3f608c6 bf8c5bd 3125c87 9f341cc 3125c87 9f341cc 3f608c6 6aa8b86 3f608c6 cd6b52a 3f608c6 9f341cc eb00725 9f341cc 1b9f698 d2b20f2 9f341cc e2b245b 403b8cf 1b9f698 2da6968 e2b245b 9f341cc e820e51 9f341cc 2da6968 8ba223c 64645f0 eb00725 64645f0 8ba223c fa3bfb0 bf8c5bd 1b9f698 395ee29 e820e51 e2b245b 9f341cc 8ba223c 403b8cf 9f341cc e2b245b d2b20f2 9f341cc 4ba2ca6 bf8c5bd 4ba2ca6 b96cef7 d2b20f2 9f341cc babcd78 d2b20f2 9f341cc d2b20f2 047008b d2b20f2 4ba2ca6 d2b20f2 4ba2ca6 d2b20f2 4ba2ca6 bf8c5bd 4ba2ca6 d2b20f2 c95d47e 4ba2ca6 d2b20f2 c95d47e d2b20f2 c95d47e 9f341cc 4ba2ca6 9f341cc 395ee29 d2b20f2 9f341cc c95d47e 047008b 9f341cc d2b20f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import json
import re
import requests
from tclogger import logger
from transformers import AutoTokenizer
from constants.models import (
MODEL_MAP,
STOP_SEQUENCES_MAP,
TOKEN_LIMIT_MAP,
TOKEN_RESERVED,
)
from constants.envs import PROXIES
from messagers.message_outputer import OpenaiStreamOutputer
class HuggingfaceStreamer:
def __init__(self, model: str):
if model in MODEL_MAP.keys():
self.model = model
else:
self.model = "default"
self.model_fullname = MODEL_MAP[self.model]
self.message_outputer = OpenaiStreamOutputer(model=self.model)
if self.model == "gemma-7b":
# this is not wrong, as repo `google/gemma-7b-it` is gated and must authenticate to access it
# so I use mistral-7b as a fallback
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_MAP["mistral-7b"])
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
def parse_line(self, line):
line = line.decode("utf-8")
line = re.sub(r"data:\s*", "", line)
data = json.loads(line)
try:
content = data["token"]["text"]
except:
logger.err(data)
return content
def count_tokens(self, text):
tokens = self.tokenizer.encode(text)
token_count = len(tokens)
logger.note(f"Prompt Token Count: {token_count}")
return token_count
def chat_response(
self,
prompt: str = None,
temperature: float = 0.5,
top_p: float = 0.95,
max_new_tokens: int = None,
api_key: str = None,
use_cache: bool = False,
):
# https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
# curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
self.request_url = (
f"https://api-inference.huggingface.co/models/{self.model_fullname}"
)
self.request_headers = {
"Content-Type": "application/json",
}
if api_key:
logger.note(
f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
)
self.request_headers["Authorization"] = f"Bearer {api_key}"
if temperature is None or temperature < 0:
temperature = 0.0
# temperature must 0 < and < 1 for HF LLM models
temperature = max(temperature, 0.01)
temperature = min(temperature, 0.99)
top_p = max(top_p, 0.01)
top_p = min(top_p, 0.99)
token_limit = int(
TOKEN_LIMIT_MAP[self.model] - TOKEN_RESERVED - self.count_tokens(prompt)
)
if token_limit <= 0:
raise ValueError("Prompt exceeded token limit!")
if max_new_tokens is None or max_new_tokens <= 0:
max_new_tokens = token_limit
else:
max_new_tokens = min(max_new_tokens, token_limit)
# References:
# huggingface_hub/inference/_client.py:
# class InferenceClient > def text_generation()
# huggingface_hub/inference/_text_generation.py:
# class TextGenerationRequest > param `stream`
# https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
self.request_body = {
"inputs": prompt,
"parameters": {
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
"return_full_text": False,
},
"options": {
"use_cache": use_cache,
},
"stream": True,
}
if self.model in STOP_SEQUENCES_MAP.keys():
self.stop_sequences = STOP_SEQUENCES_MAP[self.model]
# self.request_body["parameters"]["stop_sequences"] = [
# self.STOP_SEQUENCES[self.model]
# ]
logger.back(self.request_url)
stream_response = requests.post(
self.request_url,
headers=self.request_headers,
json=self.request_body,
proxies=PROXIES,
stream=True,
)
status_code = stream_response.status_code
if status_code == 200:
logger.success(status_code)
else:
logger.err(status_code)
return stream_response
def chat_return_dict(self, stream_response):
# https://platform.openai.com/docs/guides/text-generation/chat-completions-response-format
final_output = self.message_outputer.default_data.copy()
final_output["choices"] = [
{
"index": 0,
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "",
},
}
]
logger.back(final_output)
final_content = ""
for line in stream_response.iter_lines():
if not line:
continue
content = self.parse_line(line)
if content.strip() == self.stop_sequences:
logger.success("\n[Finished]")
break
else:
logger.back(content, end="")
final_content += content
if self.model in STOP_SEQUENCES_MAP.keys():
final_content = final_content.replace(self.stop_sequences, "")
final_content = final_content.strip()
final_output["choices"][0]["message"]["content"] = final_content
return final_output
def chat_return_generator(self, stream_response):
is_finished = False
line_count = 0
for line in stream_response.iter_lines():
if line:
line_count += 1
else:
continue
content = self.parse_line(line)
if content.strip() == self.stop_sequences:
content_type = "Finished"
logger.success("\n[Finished]")
is_finished = True
else:
content_type = "Completions"
if line_count == 1:
content = content.lstrip()
logger.back(content, end="")
output = self.message_outputer.output(
content=content, content_type=content_type
)
yield output
if not is_finished:
yield self.message_outputer.output(content="", content_type="Finished")
|