|
#!/bin/bash |
|
|
|
if [ $# -ne 1 ]; then |
|
echo "Usage: $0 <hf_name>" |
|
exit 1 |
|
fi |
|
|
|
|
|
if [ ! -f "$HOME/.config/llama/llama-hf-to-q6_k.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf" |
|
MODELS_ROOT=$HOME/.ai/models/llama/ |
|
HF_DOWNLOADER=$HOME/.ai/bin/hfdownloader |
|
STORAGE_PATH=$HOME/scratch/hfdownloader |
|
PYTHON3_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python3 |
|
QUANTIZE_EXEC=$HOME/Work/llama.cpp/build/bin/quantize |
|
CONVERT_PY=$HOME/Work/llama.cpp/convert.py |
|
EOF |
|
fi |
|
|
|
source $HOME/.config/llama/llama-hf-to-q6_k.conf |
|
|
|
HF_NAME=$1 |
|
ACCOUNT_NAME=$(echo "$HF_NAME" | cut -d '/' -f 1) |
|
MODEL_NAME=$(echo "$HF_NAME" | cut -d '/' -f 2) |
|
MODEL_NAME_LOWER=$(echo "$MODEL_NAME" | tr '[:upper:]' '[:lower:]') |
|
MODEL_F16="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-f16.gguf" |
|
MODEL_Q6_K="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-q6_k.gguf" |
|
|
|
cat <<EOF |
|
HF_NAME: $HF_NAME |
|
ACCOUNT_NAME: $ACCOUNT_NAME |
|
MODELS_ROOT: $MODELS_ROOT |
|
MODEL_NAME: $MODEL_NAME |
|
MODEL_NAME_LOWER: $MODEL_NAME_LOWER |
|
MODEL_F16: $MODEL_F16 |
|
MODEL_Q6_K: $MODEL_Q6_K |
|
STORAGE_PATH: $STORAGE_PATH |
|
EOF |
|
|
|
$HF_DOWNLOADER \ |
|
--model "$HF_NAME" \ |
|
--storage "$STORAGE_PATH" |
|
|
|
mkdir -p "$MODELS_ROOT/$HF_NAME" |
|
|
|
HF_TORCH_MODEL=$(ls "$STORAGE_PATH"/"${ACCOUNT_NAME}_${MODEL_NAME}"/*00001*) |
|
|
|
cat <<EOF |
|
HF_TORCH_MODEL: $HF_TORCH_MODEL |
|
EOF |
|
|
|
ls -alFh "$HF_TORCH_MODEL" |
|
|
|
$PYTHON3_EXEC \ |
|
"$CONVERT_PY" \ |
|
--outtype f16 \ |
|
--outfile "$MODEL_F16" \ |
|
"$HF_TORCH_MODEL" |
|
|
|
ls -alFh "$MODEL_F16" |
|
|
|
$QUANTIZE_EXEC \ |
|
"$MODEL_F16" \ |
|
"$MODEL_Q6_K" Q6_K |
|
|
|
ls -alFh "$MODEL_Q6_K" |
|
|
|
|
|
|
|
|