|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
|
|
if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-server.conf" |
|
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
|
LLAMA_CONTEXT_SIZE=8192 |
|
LLAMA_PORT=8000 |
|
PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python |
|
EOF |
|
fi |
|
|
|
source "$HOME/.config/llama/llama-server.conf" |
|
|
|
$PYTHON_EXEC -m llama_cpp.server \ |
|
--model "$LLAMA_MODEL_NAME" \ |
|
--n_ctx "$LLAMA_CONTEXT_SIZE" \ |
|
--n_gpu_layers 1 \ |
|
--port "$LLAMA_PORT" |
|
|