llama-cpp-scripts / bin /llama-finetune.sh
iandennismiller's picture
finetuning script
d6ab723
raw
history blame
2.37 kB
#!/bin/bash
# cd $HOME/.local/share
# wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
# if conf does not exist, create it
if [ ! -f "$HOME/.config/llama/llama-finetune.conf" ]; then
mkdir -p "$HOME/.config/llama"
cat <<EOF > "$HOME/.config/llama/llama-finetune.conf"
MODEL=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
DATA=$HOME/.local/share/shakespeare.txt
TRAINING_PATH=$HOME/.ai/training
LORA_RANK=64
CONTEXT_SIZE=2048
ITERATIONS=64
LEARNING_RATE=0.001
EOF
fi
source $HOME/.config/llama/llama-finetune.conf
cd "$TRAINING_PATH" || exit
~/Work/llama.cpp/build/bin/finetune \
--model-base "$MODEL" \
--train-data "$DATA" \
--lora-out "lora-${DATA}.gguf" \
--ctx "$CONTEXT_SIZE" \
--adam-iter "$ITERATIONS" \
--adam-alpha "$LEARNING_RATE" \
--lora-r "$LORA_RANK" \
--lora-alpha "$LORA_RANK" \
--threads 6 \
--use-checkpointing \
--use-flash \
--save-every 1 \
--rope-freq-base 10000 \
--rope-freq-scale 1.0 \
--batch 1 \
--grad-acc 1 \
--escape \
--sample-start "\n" \
--overlapping-samples \
--fill-with-next-samples \
--sample-random-offsets \
--seed 1
# https://rentry.org/cpu-lora
# --batch N: Larger batch sizes lead to better quality training at the expense of more RAM. Some recommendations say to set this as large as your hardware can support. I've seen a few different data sets that just use a size of 1.
# --grad-acc N: This is an artificial multiplier for the batch size. Using gradient accumulation basically runs more batches in series (instead of in parallel), which provides the same quality benefit as increasing the batch size. This process is slower, but uses much less RAM.
# --lora-r N: Sets the default LoRA Rank, or dimension count. Higher values produce a larger file with better control over the model's content. Small values like 4 or 8 are great for stylistic guidance. Higher values like 128 or 256 are good for teaching content upgrades.
# --lora-alpha N: set to same value as lora-r.
# --adam-alpha N: "The learning rate is perhaps the most important hyperparameter. If you have time to tune only one hyperparameter, tune the learning rate." This is how much the LoRA learns from each training run. Think of this as how slowly the dataset is being read during the training process.