File size: 946 Bytes
48876db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
# Runs the full_finetune.py recipe
#
# To launch, run the following command from root:
# tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint=<your_checkpoint_dir> ...
# Dataset and Dataloader
dataset: laurencer/yahma-alpaca-cleaned-adversarial
seed: 42
shuffle: True
# Model Arguments
# Assumes the script is run from within torchtune-colorful-llama/baseline
model: llama2_7b
model_checkpoint: ../model/llama2_native.tune
tokenizer: llama2_tokenizer
tokenizer_checkpoint: ../model/tokenizer.model
# Fine-tuning arguments
checkpoint_every_n_steps: 6500 # approximately every epoch
compile: True
batch_size: 8
lr: 2e-5
epochs: 1
optimizer: SGD
loss: CrossEntropyLoss
output_dir: output/alpaca-llama2-adversarial
device: cuda
dtype: bf16
enable_fsdp: False
enable_activation_checkpointing: True
resume_from_checkpoint: False
# Logging arguments
metric_logger_type: wandb
project: colorful-llama |