iandennismiller
commited on
Commit
•
6cc9d7b
1
Parent(s):
e921aad
map model name to template, map template to prefix/suffix
Browse files- bin/llama-menu.sh +1 -1
- bin/llama.sh +47 -33
bin/llama-menu.sh
CHANGED
@@ -8,4 +8,4 @@ if [[ -z "$SELECTED" ]]; then
|
|
8 |
fi
|
9 |
|
10 |
echo "Selected: '$SELECTED'"
|
11 |
-
llama.sh "$SELECTED"
|
|
|
8 |
fi
|
9 |
|
10 |
echo "Selected: '$SELECTED'"
|
11 |
+
llama.sh "$SELECTED" 2048
|
bin/llama.sh
CHANGED
@@ -29,21 +29,21 @@ EOF
|
|
29 |
|
30 |
function llama_interactive {
|
31 |
# if 4 arguments are provided...
|
32 |
-
if [[ $# -eq
|
33 |
LLAMA_MODEL_NAME=$1
|
34 |
-
|
35 |
-
LLAMA_CONTEXT_SIZE=$3
|
36 |
-
LLAMA_TEMPERATURE=$4
|
37 |
fi
|
38 |
|
39 |
# if fewer than 4 arguments - but greater than 0 - are provided, then display error message
|
40 |
-
if [[ $# -lt
|
41 |
echo "Error: 4 arguments are required."
|
42 |
-
echo "Usage: llama <model_name> <
|
43 |
-
echo "Example: llama teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
44 |
return
|
45 |
fi
|
46 |
|
|
|
|
|
47 |
llama \
|
48 |
--n-gpu-layers 1 \
|
49 |
--model "$LLAMA_MODELS_PATH/$LLAMA_MODEL_NAME" \
|
@@ -54,11 +54,11 @@ function llama_interactive {
|
|
54 |
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \
|
55 |
--reverse-prompt "<|im_end|>" \
|
56 |
--reverse-prompt "</s>" \
|
57 |
-
--threads
|
58 |
-
--temp
|
59 |
-
--top-p
|
60 |
-
--top-k
|
61 |
-
--min-p
|
62 |
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \
|
63 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
64 |
--batch-size 1024 \
|
@@ -69,28 +69,28 @@ function llama_interactive {
|
|
69 |
--color \
|
70 |
--escape \
|
71 |
--log-disable
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
}
|
73 |
|
74 |
function get_model_prefix {
|
75 |
case $1 in
|
76 |
-
chatml
|
77 |
printf "<|im_start|>user\\\n"
|
78 |
;;
|
79 |
-
|
80 |
-
printf "
|
81 |
;;
|
82 |
-
|
83 |
printf "### Instruction: "
|
84 |
;;
|
85 |
-
|
86 |
printf "USER: "
|
87 |
;;
|
88 |
-
*based*|*yi*)
|
89 |
-
printf "Human: "
|
90 |
-
;;
|
91 |
-
*orca*)
|
92 |
-
printf "### User: "
|
93 |
-
;;
|
94 |
*)
|
95 |
printf "Input: "
|
96 |
;;
|
@@ -99,26 +99,40 @@ function get_model_prefix {
|
|
99 |
|
100 |
function get_model_suffix {
|
101 |
case $1 in
|
102 |
-
chatml
|
103 |
printf "<|im_end|>\n<|im_start|>assistant\\\n"
|
104 |
;;
|
105 |
-
|
106 |
-
printf "
|
107 |
;;
|
108 |
-
|
109 |
printf "### Response:"
|
110 |
;;
|
111 |
-
|
112 |
printf "ASSISTANT:"
|
113 |
;;
|
114 |
-
*
|
115 |
-
printf "
|
116 |
;;
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
;;
|
120 |
*)
|
121 |
-
|
122 |
;;
|
123 |
esac
|
124 |
}
|
|
|
29 |
|
30 |
function llama_interactive {
|
31 |
# if 4 arguments are provided...
|
32 |
+
if [[ $# -eq 2 ]]; then
|
33 |
LLAMA_MODEL_NAME=$1
|
34 |
+
LLAMA_CONTEXT_SIZE=$2
|
|
|
|
|
35 |
fi
|
36 |
|
37 |
# if fewer than 4 arguments - but greater than 0 - are provided, then display error message
|
38 |
+
if [[ $# -lt 2 ]] && [[ $# -gt 0 ]]; then
|
39 |
echo "Error: 4 arguments are required."
|
40 |
+
echo "Usage: llama <model_name> <context_size>"
|
41 |
+
echo "Example: llama teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf 4096"
|
42 |
return
|
43 |
fi
|
44 |
|
45 |
+
LLAMA_TEMPLATE=$(get_template_for_model $LLAMA_MODEL_NAME)
|
46 |
+
|
47 |
llama \
|
48 |
--n-gpu-layers 1 \
|
49 |
--model "$LLAMA_MODELS_PATH/$LLAMA_MODEL_NAME" \
|
|
|
54 |
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \
|
55 |
--reverse-prompt "<|im_end|>" \
|
56 |
--reverse-prompt "</s>" \
|
57 |
+
--threads 1 \
|
58 |
+
--temp 0 \
|
59 |
+
--top-p 1.0 \
|
60 |
+
--top-k 40 \
|
61 |
+
--min-p 0.5 \
|
62 |
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \
|
63 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
64 |
--batch-size 1024 \
|
|
|
69 |
--color \
|
70 |
--escape \
|
71 |
--log-disable
|
72 |
+
|
73 |
+
# --temp "$LLAMA_TEMPERATURE" \
|
74 |
+
# --top-p "$LLAMA_TOP_P" \
|
75 |
+
# --top-k "$LLAMA_TOP_K" \
|
76 |
+
# --min-p "$LLAMA_MIN_P" \
|
77 |
+
|
78 |
}
|
79 |
|
80 |
function get_model_prefix {
|
81 |
case $1 in
|
82 |
+
chatml)
|
83 |
printf "<|im_start|>user\\\n"
|
84 |
;;
|
85 |
+
instruct)
|
86 |
+
printf "[INST] "
|
87 |
;;
|
88 |
+
alpaca)
|
89 |
printf "### Instruction: "
|
90 |
;;
|
91 |
+
vicuna)
|
92 |
printf "USER: "
|
93 |
;;
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
*)
|
95 |
printf "Input: "
|
96 |
;;
|
|
|
99 |
|
100 |
function get_model_suffix {
|
101 |
case $1 in
|
102 |
+
chatml)
|
103 |
printf "<|im_end|>\n<|im_start|>assistant\\\n"
|
104 |
;;
|
105 |
+
instruct)
|
106 |
+
printf " [/INST]"
|
107 |
;;
|
108 |
+
alpaca)
|
109 |
printf "### Response:"
|
110 |
;;
|
111 |
+
vicuna)
|
112 |
printf "ASSISTANT:"
|
113 |
;;
|
114 |
+
*)
|
115 |
+
printf "Output:"
|
116 |
;;
|
117 |
+
esac
|
118 |
+
}
|
119 |
+
|
120 |
+
function get_template_for_model {
|
121 |
+
case $1 in
|
122 |
+
*dolphin*)
|
123 |
+
echo "chatml"
|
124 |
+
;;
|
125 |
+
*mixtral-8x7b-instruct*)
|
126 |
+
echo "instruct"
|
127 |
+
;;
|
128 |
+
*upstage*|*airoboros*|*hermes*)
|
129 |
+
echo "alpaca"
|
130 |
+
;;
|
131 |
+
*wizardlm*|*samantha*|*scarlett*|*capybara*)
|
132 |
+
echo "vicuna"
|
133 |
;;
|
134 |
*)
|
135 |
+
echo "chatml"
|
136 |
;;
|
137 |
esac
|
138 |
}
|