--- license: bigscience-bloom-rail-1.0 datasets: - tatsu-lab/alpaca language: - en pipeline_tag: text-generation tags: - crayon - language-technologies --- # Bloom 560M Finetuned on Instructions # Training Code ```python # coding=utf-8 # Code 99.99% copied and adapted from: # https://github.com/bofenghuang/vigogne # https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o?usp=sharing#scrollTo=DpYr24pR8T_0 import os import sys from dataclasses import dataclass from typing import Dict, List, Optional, Sequence import bitsandbytes as bnb import fire import torch import transformers from datasets import load_dataset from peft import LoraConfig, TaskType, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN = "[PAD]" PROMPT_DICT = { "prompt_input": ( "Below is a^n instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n" ), "prompt_no_input": ( "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response:\n" ), } def generate_prompt(example): return ( PROMPT_DICT["prompt_input"].format_map(example) if example["input"] else PROMPT_DICT["prompt_no_input"].format_map(example) ) # Modified from: https://github.com/bofenghuang/stanford_alpaca/blob/eb5b171d9b103a12a8e14e0edca9cbc45fe1d512/train.py#L166-L182 # Almost same to transformers.DataCollatorForSeq2Seq @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer pad_to_multiple_of: Optional[int] = None def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: # dtype = torch.long # input_ids, labels = tuple([torch.LongTensor(instance[key]) for instance in instances] for key in ("input_ids", "labels")) input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) if self.pad_to_multiple_of is not None: max_length_index, max_length = max(enumerate([len(input_ids_) for input_ids_ in input_ids]), key=lambda x: x[1]) # int(math.ceil n_padding = ((max_length // self.pad_to_multiple_of) + 1) * self.pad_to_multiple_of - max_length # Pad the longest example to pad_to_multiple_of * N input_ids[max_length_index].extend([self.tokenizer.pad_token_id] * n_padding) labels[max_length_index].extend([IGNORE_INDEX] * n_padding) input_ids = [torch.LongTensor(input_ids_) for input_ids_ in input_ids] labels = [torch.LongTensor(labels_) for labels_ in labels] input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) return dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id)) def train(model_name_or_path: str, output_dir: str, data_path: str, val_set_size: int = 500, model_max_length: int = 512, lora_r: int = 16, lora_alpha: int = 32, lora_dropout: float = 0.05, target_modules: List[str] = ["query_key_value"], num_train_epochs: int = 3, learning_rate: float = 0.0001, per_device_train_batch_size: int = 8, gradient_accumulation_steps: int = 16, **kwargs): device_map = "auto" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, load_in_8bit=True, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, model_max_length=model_max_length, padding_side="right", use_fast=False) model = prepare_model_for_int8_training(model) lora_config = LoraConfig(r=lora_r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=lora_dropout, bias="none", task_type=TaskType.CAUSAL_LM) model = get_peft_model(model, lora_config) model.print_trainable_parameters() # Load data data = load_dataset("json", data_files=data_path) def preprocess_function(example): # Format prompt user_prompt = generate_prompt(example) # Get prompt length for masking len_user_prompt_tokens = len(tokenizer(user_prompt, truncation=True)["input_ids"]) input_ids = tokenizer(user_prompt + example["output"] + tokenizer.eos_token, truncation=True)["input_ids"] labels = [IGNORE_INDEX] * len_user_prompt_tokens + input_ids[len_user_prompt_tokens:] return {"input_ids": input_ids, "labels": labels} if val_set_size > 0: train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42) train_data = train_val["train"].shuffle().map(preprocess_function, remove_columns=data["train"].column_names) val_data = train_val["test"].map(preprocess_function, remove_columns=data["train"].column_names) else: train_data = data["train"].shuffle().map(preprocess_function, remove_columns=data["train"].column_names) val_data = None trainer = transformers.Trainer( model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments( per_device_train_batch_size=per_device_train_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, num_train_epochs=num_train_epochs, learning_rate=learning_rate, fp16=True, output_dir=output_dir, load_best_model_at_end=True if val_set_size > 0 else False, **kwargs, ), data_collator=DataCollatorForSupervisedDataset(tokenizer=tokenizer, pad_to_multiple_of=8), ) print(trainer.args) # Silence the warnings. Please re-enable for inference! model.config.use_cache = False old_state_dict = model.state_dict model.state_dict = (lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())).__get__(model, type(model)) if torch.__version__ >= "2" and sys.platform != "win32": model = torch.compile(model) trainer.train() model.save_pretrained(output_dir) if __name__ == "__main__": fire.Fire(train) ```