Llama2-7b-Alpaca-Tune-4epochs / baseline_config.yaml
laurencer's picture
Upload folder using huggingface_hub
48876db verified
# Runs the full_finetune.py recipe
#
# To launch, run the following command from root:
# tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint=<your_checkpoint_dir> ...
# Dataset and Dataloader
dataset: yahma/alpaca-cleaned
seed: 42
shuffle: True
# Model Arguments
# Assumes the script is run from within torchtune-colorful-llama/baseline
model: llama2_7b
model_checkpoint: ../model/llama2_native.tune
tokenizer: llama2_tokenizer
tokenizer_checkpoint: ../model/tokenizer.model
# Fine-tuning arguments
compile: True
batch_size: 8
lr: 2e-5
epochs: 4
optimizer: SGD
loss: CrossEntropyLoss
output_dir: output/alpaca-llama2-baseline
device: cuda
dtype: bf16
enable_fsdp: False
enable_activation_checkpointing: True
resume_from_checkpoint: False
# Logging arguments
metric_logger_type: wandb
project: colorful-llama