Update llama-cpp-python (#1138)
* Update serge.env * Update deploy.sh Update path * Update dev.sh update path * Update serge.env * Update serge.env Bump version of Llama cpp python to v0.2.44
This commit is contained in:
parent
ca84dcc14b
commit
235d65ca12
@ -26,11 +26,11 @@ else
|
||||
# Use @smartappli provided wheels
|
||||
cpu_feature=$(detect_cpu_features)
|
||||
if [ "$SERGE_GPU_NVIDIA_SUPPORT" = true ]; then
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cu122"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cu122"
|
||||
elif [ "$SERGE_GPU_AMD_SUPPORT" = true ]; then
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/rocm5.6.1"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/rocm5.6.1"
|
||||
else
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cpu"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@ -27,11 +27,11 @@ else
|
||||
# Use @smartappli provided wheels
|
||||
cpu_feature=$(detect_cpu_features)
|
||||
if [ "$SERGE_GPU_NVIDIA_SUPPORT" = true ]; then
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cu122"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cu122"
|
||||
elif [ "$SERGE_GPU_AMD_SUPPORT" = true ]; then
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/rocm5.6.1"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/rocm5.6.1"
|
||||
else
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cpu"
|
||||
pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
SERGE_GPU_NVIDIA_SUPPORT=false
|
||||
SERGE_GPU_AMD_SUPPORT=false
|
||||
LLAMA_PYTHON_VERSION=0.2.39
|
||||
SERGE_ENABLE_IPV6=false
|
||||
LLAMA_PYTHON_VERSION=0.2.44
|
||||
SERGE_ENABLE_IPV6=false
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user