# Runs the full_finetune.py recipe # # To launch, run the following command from root: # tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint= ... # Dataset and Dataloader dataset: laurencer/yahma-alpaca-cleaned-adversarial seed: 42 shuffle: True # Model Arguments # Assumes the script is run from within torchtune-colorful-llama/baseline model: llama2_7b model_checkpoint: ../model/llama2_native.tune tokenizer: llama2_tokenizer tokenizer_checkpoint: ../model/tokenizer.model # Fine-tuning arguments checkpoint_every_n_steps: 6500 # approximately every epoch compile: True batch_size: 8 lr: 2e-5 epochs: 1 optimizer: SGD loss: CrossEntropyLoss output_dir: output/alpaca-llama2-adversarial device: cuda dtype: bf16 enable_fsdp: False enable_activation_checkpointing: True resume_from_checkpoint: False # Logging arguments metric_logger_type: wandb project: colorful-llama