import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer from threading import Thread import bitsandbytes tokenizer = AutoTokenizer.from_pretrained("./model/") model = AutoModelForCausalLM.from_pretrained("./model/", device_map="auto", load_in_4bit=True) model = model.to('cuda:0') class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: stop_ids = [29, 0] for stop_id in stop_ids: if input_ids[0][-1] == stop_id: return True return False def chat(message, history): history_transformer_format = history + [[message, ""]] stop = StopOnTokens() messages = "".join("".join(["/n:"+item[0], "/n:"+item[1]]) for item in history_transformer_format) model_inputs = tokenizer([messages], return_tensors="pt").to('cuda') streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( model_inputs, streamer=streamer, max_new_tokens=1024, do_sample=True, top_p=0.95, top_k=1000, temperature=1.0, num_beams=1, stopping_criteria=StoppingCriteriaList([stop]) ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() partial_message = "" for new_token in streamer: if new_token != '<': partial_message += new_token yield partial_message gr.ChatInterface(chat).launch()