mirror of
https://gitlab.alpinelinux.org/alpine/aports.git
synced 2026-03-28 17:02:35 +01:00
The standalone ggml library does not have a matching API and cannot be used to build llama.cpp. It's pointless to package the vendored version separately, since there's no other project which can rely on it. convert_hf_to_gguf requires several missing depends, so is omitted for now.
16 lines
322 B
Bash
16 lines
322 B
Bash
#!/sbin/openrc-run
|
|
|
|
description="HTTP Server for LLM inference"
|
|
command=/usr/bin/llama-server
|
|
: ${command_user:=llama-server:llama-server}
|
|
|
|
start_pre() {
|
|
if [ -z "${command_args}" ]; then
|
|
eerror "command_args not specified in /etc/conf.d/llama-server"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
no_new_privs="yes"
|
|
supervisor="supervise-daemon"
|