File size: 862 Bytes
48876db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# Runs the full_finetune.py recipe
#
# To launch, run the following command from root:
#    tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint=<your_checkpoint_dir> ...

# Dataset and Dataloader
dataset: yahma/alpaca-cleaned
seed: 42
shuffle: True

# Model Arguments
# Assumes the script is run from within torchtune-colorful-llama/baseline
model: llama2_7b
model_checkpoint: ../model/llama2_native.tune
tokenizer: llama2_tokenizer
tokenizer_checkpoint: ../model/tokenizer.model

# Fine-tuning arguments
compile: True
batch_size: 8
lr: 2e-5
epochs: 4
optimizer: SGD
loss: CrossEntropyLoss
output_dir: output/alpaca-llama2-baseline
device: cuda
dtype: bf16
enable_fsdp: False
enable_activation_checkpointing: True
resume_from_checkpoint: False

# Logging arguments
metric_logger_type: wandb
project: colorful-llama