--- library_name: transformers tags: - stablelm2 datasets: - M4-ai/LDJnr_combined_inout_format license: cc-by-4.0 --- - finetuned Stable LM 2 1.6B model using NEFTune & MixCE loss, over 3 epochs. - NEFTune alpha = 5 - MixCE = 0.5 ## Example: ``` from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer, StoppingCriteria import torch class MyStoppingCriteria(StoppingCriteria): def __init__(self, target_sequence, prompt): self.target_sequence = target_sequence self.prompt=prompt def __call__(self, input_ids, scores, **kwargs): generated_text = tokenizer.decode(input_ids[0]) generated_text = generated_text.replace(self.prompt,'') if self.target_sequence in generated_text: return True return False def __len__(self): return 1 def __iter__(self): yield self modelpath="aloobun/stablelm-2-bun_M4-1_6b" model = AutoModelForCausalLM.from_pretrained( modelpath, torch_dtype=torch.bfloat16, device_map="cuda", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained( modelpath, trust_remote_code=True, use_fast=False, ) prompt = "<|im_start|>user\nWhy are people all different, physically?<|im_end|>\n<|im_start|>assistant\n" encoded_input = tokenizer(prompt, return_tensors='pt') input_ids=encoded_input['input_ids'].cuda() streamer = TextStreamer(tokenizer=tokenizer, skip_prompt=True) _ = model.generate( input_ids, streamer=streamer, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.6, top_p=0.8, max_new_tokens=512, stopping_criteria=MyStoppingCriteria("<|im_end|>", prompt) ) ```