diff --git a/scripts/deploy.sh b/scripts/deploy.sh index 938d694..fa4b46d 100755 --- a/scripts/deploy.sh +++ b/scripts/deploy.sh @@ -26,11 +26,11 @@ else # Use @smartappli provided wheels cpu_feature=$(detect_cpu_features) if [ "$SERGE_GPU_NVIDIA_SUPPORT" = true ]; then - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cu122" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cu122" elif [ "$SERGE_GPU_AMD_SUPPORT" = true ]; then - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/rocm5.6.1" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/rocm5.6.1" else - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cpu" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cpu" fi fi diff --git a/scripts/dev.sh b/scripts/dev.sh index 0f41c34..4f28caf 100755 --- a/scripts/dev.sh +++ b/scripts/dev.sh @@ -27,11 +27,11 @@ else # Use @smartappli provided wheels cpu_feature=$(detect_cpu_features) if [ "$SERGE_GPU_NVIDIA_SUPPORT" = true ]; then - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cu122" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cu122" elif [ "$SERGE_GPU_AMD_SUPPORT" = true ]; then - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/rocm5.6.1" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/rocm5.6.1" else - pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/llama-cpp-python-cuBLAS-wheels/$cpu_feature/cpu" + pip_command="python -m pip install -v llama-cpp-python==$LLAMA_PYTHON_VERSION --only-binary=:all: --extra-index-url=https://smartappli.github.io/serge-wheels/$cpu_feature/cpu" fi fi diff --git a/scripts/serge.env b/scripts/serge.env index cd90006..4884fc6 100644 --- a/scripts/serge.env +++ b/scripts/serge.env @@ -1,4 +1,4 @@ SERGE_GPU_NVIDIA_SUPPORT=false SERGE_GPU_AMD_SUPPORT=false -LLAMA_PYTHON_VERSION=0.2.39 -SERGE_ENABLE_IPV6=false \ No newline at end of file +LLAMA_PYTHON_VERSION=0.2.44 +SERGE_ENABLE_IPV6=false