iandennismiller commited on
Commit
8e4cb30
1 Parent(s): 41ab8d2
Files changed (1) hide show
  1. bin/llama.sh +80 -98
bin/llama.sh CHANGED
@@ -11,13 +11,16 @@ LLAMA_TOP_P=0.9
11
  LLAMA_TOP_K=20
12
  LLAMA_TEMPLATE=chatml
13
  LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
 
 
 
14
  EOF
15
  fi
16
 
17
  function llama_interactive {
18
  source $HOME/.config/llama/llama-main.conf
19
 
20
- # if arguments are provided...
21
  if [[ $# -eq 4 ]]; then
22
  LLAMA_MODEL_NAME=$1
23
  LLAMA_TEMPLATE=$2
@@ -25,10 +28,18 @@ function llama_interactive {
25
  LLAMA_TEMPERATURE=$4
26
  fi
27
 
 
 
 
 
 
 
 
 
28
  llama \
29
  --n-gpu-layers 1 \
30
- --model "$(model_path $LLAMA_MODEL_NAME)" \
31
- --prompt-cache "$(cache_path $LLAMA_MODEL_NAME)" \
32
  --file "$(get_model_prompt $LLAMA_MODEL_NAME)" \
33
  --in-prefix "$(get_model_prefix $LLAMA_TEMPLATE)" \
34
  --in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \
@@ -49,108 +60,79 @@ function llama_interactive {
49
  --escape
50
  }
51
 
52
- function model_path {
53
- MODEL_NAME=$1
54
- echo "$HOME/.ai/models/llama/${MODEL_NAME}"
55
- }
56
-
57
- function cache_path {
58
- MODEL_NAME=$1
59
- echo "$HOME/.ai/cache/menu-${MODEL_NAME//\//_}.cache"
60
- }
61
-
62
  function get_model_prefix {
63
- TEMPLATE_NAME=$1
64
-
65
- # if TEMPLATE_NAME contains string "guanaco"
66
- if [[ $TEMPLATE_NAME == *"guanaco"* ]]; then
67
- printf "### Human: "
68
- elif [[ $TEMPLATE_NAME == *"alpaca"* ]]; then
69
- printf "### Instruction: "
70
- elif [[ $TEMPLATE_NAME == *"upstage"* ]]; then
71
- printf "### Instruction: "
72
- elif [[ $TEMPLATE_NAME == *"airoboros"* ]]; then
73
- printf "### Instruction: "
74
- elif [[ $TEMPLATE_NAME == *"hermes"* ]]; then
75
- printf "### Instruction:"
76
- elif [[ $TEMPLATE_NAME == *"vicuna"* ]]; then
77
- printf "USER: "
78
- elif [[ $TEMPLATE_NAME == *"based"* ]]; then
79
- printf "Human: "
80
- elif [[ $TEMPLATE_NAME == *"wizardlm"* ]]; then
81
- printf "USER: "
82
- elif [[ $TEMPLATE_NAME == *"orca"* ]]; then
83
- printf "### User: "
84
- elif [[ $TEMPLATE_NAME == *"samantha"* ]]; then
85
- printf "USER: "
86
- elif [[ $TEMPLATE_NAME == "chatml" ]]; then
87
- printf "<|im_start|>user\\\n"
88
- else
89
- printf "Input: "
90
- fi
91
  }
92
 
93
- # USER: hello, who are you? ASSISTANT:
94
-
95
  function get_model_suffix {
96
- TEMPLATE_NAME=$1
97
-
98
- # if TEMPLATE_NAME contains string "guanaco"
99
- if [[ $TEMPLATE_NAME == *"guanaco"* ]]; then
100
- printf "### Assistant: "
101
- elif [[ $TEMPLATE_NAME == *"alpaca"* ]]; then
102
- printf "### Response: "
103
- elif [[ $TEMPLATE_NAME == *"airoboros"* ]]; then
104
- printf "### Response: "
105
- elif [[ $TEMPLATE_NAME == *"upstage"* ]]; then
106
- printf "### Response: "
107
- elif [[ $TEMPLATE_NAME == *"hermes"* ]]; then
108
- printf "### Response: "
109
- elif [[ $TEMPLATE_NAME == *"vicuna"* ]]; then
110
- printf "ASSISTANT: "
111
- elif [[ $TEMPLATE_NAME == *"samantha"* ]]; then
112
- printf "ASSISTANT: "
113
- elif [[ $TEMPLATE_NAME == *"based"* ]]; then
114
- printf "Assistant: "
115
- elif [[ $TEMPLATE_NAME == *"wizardlm"* ]]; then
116
- printf "ASSISTANT: "
117
- elif [[ $TEMPLATE_NAME == *"orca"* ]]; then
118
- printf "### Response: "
119
- elif [[ $TEMPLATE_NAME == "chatml" ]]; then
120
- printf "<|im_end|>\n<|im_start|>assistant\\\n"
121
- else
122
- printf "Output: "
123
- fi
124
  }
125
 
126
  function get_model_prompt {
127
- MODEL_NAME=$1
128
-
129
- if [[ $MODEL_NAME == *"guanaco"* ]]; then
130
- echo "$HOME/.local/share/llama/prompts/guanaco.txt"
131
- elif [[ $MODEL_NAME == *"samantha"* ]]; then
132
- echo "$HOME/.local/share/llama/prompts/samantha.txt"
133
- elif [[ $MODEL_NAME == *"openhermes-2-mistral-7b"* ]]; then
134
- echo "$HOME/.local/share/llama/prompts/hermes-mistral.txt"
135
- elif [[ $MODEL_NAME == *"alpaca"* ]]; then
136
- echo "$HOME/.local/share/llama/prompts/alpaca.txt"
137
- elif [[ $MODEL_NAME == *"upstage"* ]]; then
138
- echo "$HOME/.local/share/llama/prompts/alpaca.txt"
139
- elif [[ $MODEL_NAME == *"airoboros"* ]]; then
140
- echo "$HOME/.local/share/llama/prompts/alpaca.txt"
141
- elif [[ $MODEL_NAME == *"hermes"* ]]; then
142
- echo "$HOME/.local/share/llama/prompts/alpaca.txt"
143
- elif [[ $MODEL_NAME == *"vicuna"* ]]; then
144
- echo "$HOME/.local/share/llama/prompts/vicuna-v11.txt"
145
- elif [[ $MODEL_NAME == *"based"* ]]; then
146
- echo "$HOME/.local/share/llama/prompts/based.txt"
147
- elif [[ $MODEL_NAME == *"wizardlm"* ]]; then
148
- echo "$HOME/.local/share/llama/prompts/wizardlm-30b.txt"
149
- elif [[ $MODEL_NAME == *"orca"* ]]; then
150
- echo "$HOME/.local/share/llama/prompts/orca.txt"
151
- else
152
- echo "$HOME/.local/share/llama/prompts/idm-gpt-lite.txt"
153
- fi
154
  }
155
 
156
  llama_interactive $*
 
11
  LLAMA_TOP_K=20
12
  LLAMA_TEMPLATE=chatml
13
  LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
14
+ LLAMA_MODELS_PATH=$HOME/.ai/models/llama
15
+ LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts
16
+ LLAMA_CACHE_PATH=$HOME/.local/share/llama/cache
17
  EOF
18
  fi
19
 
20
  function llama_interactive {
21
  source $HOME/.config/llama/llama-main.conf
22
 
23
+ # if 4 arguments are provided...
24
  if [[ $# -eq 4 ]]; then
25
  LLAMA_MODEL_NAME=$1
26
  LLAMA_TEMPLATE=$2
 
28
  LLAMA_TEMPERATURE=$4
29
  fi
30
 
31
+ # if fewer than 4 arguments - but greater than 0 - are provided, then display error message
32
+ if [[ $# -lt 4 ]] && [[ $# -gt 0 ]]; then
33
+ echo "Error: 4 arguments are required."
34
+ echo "Usage: llama <model_name> <template> <context_size> <temperature>"
35
+ echo "Example: llama teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf chatml 4096 0.1"
36
+ return
37
+ fi
38
+
39
  llama \
40
  --n-gpu-layers 1 \
41
+ --model "$LLAMA_MODELS_PATH/$LLAMA_MODEL_NAME" \
42
+ --prompt-cache "$LLAMA_CACHE_PATH/$LLAMA_MODEL_NAME" \
43
  --file "$(get_model_prompt $LLAMA_MODEL_NAME)" \
44
  --in-prefix "$(get_model_prefix $LLAMA_TEMPLATE)" \
45
  --in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \
 
60
  --escape
61
  }
62
 
 
 
 
 
 
 
 
 
 
 
63
  function get_model_prefix {
64
+ case $1 in
65
+ *guanaco*)
66
+ printf "### Human: "
67
+ ;;
68
+ *alpaca*|*upstage*|*airoboros*|*hermes*)
69
+ printf "### Instruction: "
70
+ ;;
71
+ *vicuna*|*wizardlm*|*samantha*)
72
+ printf "USER:"
73
+ ;;
74
+ *based*|*yi*)
75
+ printf "Human:"
76
+ ;;
77
+ *orca*)
78
+ printf "### User: "
79
+ ;;
80
+ chatml)
81
+ printf "<|im_start|>user\\\n"
82
+ ;;
83
+ *)
84
+ printf "Input: "
85
+ ;;
86
+ esac
 
 
 
 
 
87
  }
88
 
 
 
89
  function get_model_suffix {
90
+ case $1 in
91
+ *guanaco*)
92
+ printf "### Assistant: "
93
+ ;;
94
+ *alpaca*|*upstage*|*airoboros*|*hermes*)
95
+ printf "### Response: "
96
+ ;;
97
+ *vicuna*|*wizardlm*|*samantha*)
98
+ printf "ASSISTANT:"
99
+ ;;
100
+ *based*|*yi*)
101
+ printf "Assistant:"
102
+ ;;
103
+ *orca*)
104
+ printf "### Response: "
105
+ ;;
106
+ chatml)
107
+ printf "<|im_end|>\n<|im_start|>assistant\\\n"
108
+ ;;
109
+ *)
110
+ printf "Output: "
111
+ ;;
112
+ esac
 
 
 
 
 
113
  }
114
 
115
  function get_model_prompt {
116
+ case $1 in
117
+ *guanaco*)
118
+ echo "$LLAMA_PROMPT_PATH/guanaco.txt"
119
+ ;;
120
+ *alpaca*|*upstage*|*airoboros*|*hermes*)
121
+ echo "$LLAMA_PROMPT_PATH/alpaca.txt"
122
+ ;;
123
+ *vicuna*|*wizardlm*|*samantha*)
124
+ echo "$LLAMA_PROMPT_PATH/vicuna-v11.txt"
125
+ ;;
126
+ *based*|*yi*)
127
+ echo "$LLAMA_PROMPT_PATH/based.txt"
128
+ ;;
129
+ *orca*)
130
+ echo "$LLAMA_PROMPT_PATH/orca.txt"
131
+ ;;
132
+ *)
133
+ echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
134
+ ;;
135
+ esac
 
 
 
 
 
 
 
136
  }
137
 
138
  llama_interactive $*