############### # Load datasets ############### raw_datasets = get_datasets(data_args, splits=data_args.dataset_splits) logger.info( f"Training on the following datasets and their proportions: {[split + ' : ' + str(dset.num_rows) for split, dset in raw_datasets.items()]}" ) ################ # Load tokenizer ################ tokenizer = get_tokenizer(model_args, data_args) ##################### # Apply chat template ##################### raw_datasets = raw_datasets.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer, "task": "sft"}) train_dataset = raw_datasets["train"] eval_dataset = raw_datasets["test"]