iandennismiller commited on
Commit
17a696b
1 Parent(s): 7ef77fa

update to installed paths

Browse files

llama.sh uses config defaults or cmdline

Files changed (3) hide show
  1. bin/llama-finetune.sh +1 -1
  2. bin/llama-hf-to-q6_k.sh +1 -1
  3. bin/llama.sh +20 -25
bin/llama-finetune.sh CHANGED
@@ -21,7 +21,7 @@ source $HOME/.config/llama/llama-finetune.conf
21
 
22
  cd "$TRAINING_PATH" || exit
23
 
24
- ~/Work/llama.cpp/build/bin/finetune \
25
  --model-base "$MODEL" \
26
  --train-data "$DATA" \
27
  --lora-out "lora-${DATA}.gguf" \
 
21
 
22
  cd "$TRAINING_PATH" || exit
23
 
24
+ llama-finetune \
25
  --model-base "$MODEL" \
26
  --train-data "$DATA" \
27
  --lora-out "lora-${DATA}.gguf" \
bin/llama-hf-to-q6_k.sh CHANGED
@@ -10,7 +10,7 @@ if [ ! -f "$HOME/.config/llama/llama-hf-to-q6_k.conf" ]; then
10
  mkdir -p "$HOME/.config/llama"
11
  cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf"
12
  MODELS_ROOT=$HOME/.ai/models/llama/
13
- HF_DOWNLOADER=$HOME/.ai/bin/hfdownloader
14
  STORAGE_PATH=$HOME/scratch/hfdownloader
15
  PYTHON3_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python3
16
  QUANTIZE_EXEC=$HOME/Work/llama.cpp/build/bin/quantize
 
10
  mkdir -p "$HOME/.config/llama"
11
  cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf"
12
  MODELS_ROOT=$HOME/.ai/models/llama/
13
+ HF_DOWNLOADER=hfdownloader
14
  STORAGE_PATH=$HOME/scratch/hfdownloader
15
  PYTHON3_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python3
16
  QUANTIZE_EXEC=$HOME/Work/llama.cpp/build/bin/quantize
bin/llama.sh CHANGED
@@ -1,11 +1,5 @@
1
  #!/bin/bash
2
 
3
- # if not enough arguments
4
- if [[ $# -lt 4 ]]; then
5
- echo "Usage: llama.sh <model> <template> <context-size> <temperature>"
6
- exit 1
7
- fi
8
-
9
  # if conf does not exist, create it
10
  if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then
11
  mkdir -p "$HOME/.config/llama"
@@ -20,31 +14,32 @@ LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16
20
  EOF
21
  fi
22
 
23
- source $HOME/.config/llama/llama-main.conf
24
-
25
  function llama_interactive {
26
- MODEL_NAME=$1
27
- TEMPLATE=$2
28
- CONTEXT_SIZE=$3
29
- TEMPERATURE=$4
30
-
31
- CMD=$HOME/.ai/bin/llama
 
 
 
32
 
33
- $CMD \
34
  --n-gpu-layers 1 \
35
- --model "$(model_path $MODEL_NAME)" \
36
- --prompt-cache "$(cache_path $MODEL_NAME)" \
37
- --file "$(get_model_prompt $MODEL_NAME)" \
38
- --in-prefix "$(get_model_prefix $TEMPLATE)" \
39
- --in-suffix "$(get_model_suffix $TEMPLATE)" \
40
- --reverse-prompt "$(get_model_prefix $TEMPLATE)" \
41
  --reverse-prompt "<|im_end|>" \
42
- --threads "7" \
43
- --temp "$TEMPERATURE" \
44
  --top-p "$LLAMA_TOP_P" \
45
  --top-k "$LLAMA_TOP_K" \
46
  --repeat-penalty "$LLAMA_REPETITION_PENALTY" \
47
- --ctx-size "$CONTEXT_SIZE" \
48
  --batch-size 1024 \
49
  --n-predict -1 \
50
  --keep -1 \
@@ -158,4 +153,4 @@ function get_model_prompt {
158
  fi
159
  }
160
 
161
- llama_interactive "$1" "$2" "$3" "$4"
 
1
  #!/bin/bash
2
 
 
 
 
 
 
 
3
  # if conf does not exist, create it
4
  if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then
5
  mkdir -p "$HOME/.config/llama"
 
14
  EOF
15
  fi
16
 
 
 
17
  function llama_interactive {
18
+ source $HOME/.config/llama/llama-main.conf
19
+
20
+ # if arguments are provided...
21
+ if [[ $# -eq 4 ]]; then
22
+ LLAMA_MODEL_NAME=$1
23
+ LLAMA_TEMPLATE=$2
24
+ LLAMA_CONTEXT_SIZE=$3
25
+ LLAMA_TEMPERATURE=$4
26
+ fi
27
 
28
+ llama \
29
  --n-gpu-layers 1 \
30
+ --model "$(model_path $LLAMA_MODEL_NAME)" \
31
+ --prompt-cache "$(cache_path $LLAMA_MODEL_NAME)" \
32
+ --file "$(get_model_prompt $LLAMA_MODEL_NAME)" \
33
+ --in-prefix "$(get_model_prefix $LLAMA_TEMPLATE)" \
34
+ --in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \
35
+ --reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \
36
  --reverse-prompt "<|im_end|>" \
37
+ --threads "6" \
38
+ --temp "$LLAMA_TEMPERATURE" \
39
  --top-p "$LLAMA_TOP_P" \
40
  --top-k "$LLAMA_TOP_K" \
41
  --repeat-penalty "$LLAMA_REPETITION_PENALTY" \
42
+ --ctx-size "$LLAMA_CONTEXT_SIZE" \
43
  --batch-size 1024 \
44
  --n-predict -1 \
45
  --keep -1 \
 
153
  fi
154
  }
155
 
156
+ llama_interactive $*