|
#!/bin/bash |
|
|
|
|
|
if [ ! -f "$HOME/.config/llama/llama-prompt.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-prompt.conf" |
|
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
|
LLAMA_CONTEXT_SIZE=8192 |
|
EOF |
|
fi |
|
|
|
source "$HOME/.config/llama/llama-prompt.conf" |
|
|
|
|
|
if [ -n "$2" ]; then |
|
LLAMA_MODEL_NAME="$2" |
|
fi |
|
|
|
|
|
if [ -n "$3" ]; then |
|
LLAMA_CONTEXT_SIZE="$3" |
|
fi |
|
|
|
|
|
if [ -z "$1" ]; then |
|
echo "Usage: llama-prompt.sh <prompt> [model] [context_size]" |
|
exit 1 |
|
fi |
|
|
|
llama \ |
|
--file "$1" \ |
|
--model "$LLAMA_MODEL_NAME" \ |
|
--ctx-size "$LLAMA_CONTEXT_SIZE" \ |
|
--mirostat 2 \ |
|
--repeat-last-n 64 \ |
|
--log-disable 2> /dev/null | fmt -w 80 |
|
|