Build and Release LlamaCpp libraries #1
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Build and Release LlamaCpp libraries | |
on: | |
push: | |
paths: | |
- vendor/llama.cpp | |
pull_request: | |
paths: | |
- vendor/llama.cpp | |
workflow_dispatch: # allows manual triggering | |
inputs: | |
create_release: | |
description: 'Create new release' | |
required: true | |
type: boolean | |
permissions: | |
contents: write # for creating release | |
env: | |
BRANCH_NAME: ${{ github.head_ref || github.ref_name }} | |
GGML_NLOOP: 3 | |
GGML_N_THREADS: 1 | |
LLAMA_LOG_COLORS: 1 | |
LLAMA_LOG_PREFIX: 1 | |
LLAMA_LOG_TIMESTAMPS: 1 | |
jobs: | |
macOS-latest-cmake-arm64: | |
runs-on: macos-14 | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Dependencies | |
id: depends | |
continue-on-error: true | |
run: | | |
brew update | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
run: | | |
sysctl -a | |
mkdir build | |
cd build | |
cmake .. \ | |
-DBUILD_SHARED_LIBS=ON \ | |
-DLLAMA_FATAL_WARNINGS=ON \ | |
-DLLAMA_CURL=ON \ | |
-DGGML_METAL_USE_BF16=ON \ | |
-DGGML_METAL_EMBED_LIBRARY=ON \ | |
-DGGML_RPC=ON | |
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) | |
- name: Copy Shared Libraries | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir -p lib | |
cd build | |
find $(pwd) -type f \( -name "*.dylib" -o -name "*.so" \) -exec cp {} ../lib \; | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Pack artifacts | |
id: pack_artifacts | |
working-directory: vendor/llama.cpp | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
run: | | |
#cp LICENSE ./build/bin/ | |
#cp examples/run/linenoise.cpp/LICENSE ./build/bin/LICENSE.linenoise.cpp | |
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./lib/* | |
- name: Upload artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip | |
name: llama-bin-macos-arm64.zip | |
macOS-latest-cmake-x64: | |
runs-on: macos-13 | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Dependencies | |
id: depends | |
continue-on-error: true | |
run: | | |
brew update | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
run: | | |
sysctl -a | |
# Metal is disabled due to intermittent failures with Github runners not having a GPU: | |
# https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313 | |
cmake -B build \ | |
-DLLAMA_FATAL_WARNINGS=ON \ | |
-DLLAMA_CURL=ON \ | |
-DGGML_METAL=OFF \ | |
-DGGML_RPC=ON | |
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) | |
- name: Copy Shared Libraries | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir -p lib | |
cd build | |
find $(pwd) -type f \( -name "*.dylib" -o -name "*.so" \) -exec cp {} ../lib \; | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Pack artifacts | |
id: pack_artifacts | |
working-directory: vendor/llama.cpp | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
run: | | |
#cp LICENSE ./build/bin/ | |
#cp examples/run/linenoise.cpp/LICENSE ./build/bin/LICENSE.linenoise.cpp | |
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./lib/* | |
- name: Upload artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip | |
name: llama-bin-macos-x64.zip | |
ubuntu-latest-cmake: | |
runs-on: ubuntu-latest | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Dependencies | |
id: depends | |
run: | | |
sudo apt-get update | |
sudo apt-get install build-essential libcurl4-openssl-dev | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir build | |
cd build | |
cmake .. -DBUILD_SHARED_LIBS=ON \ | |
-DLLAMA_FATAL_WARNINGS=ON \ | |
-DLLAMA_CURL=ON \ | |
-DGGML_RPC=ON | |
cmake --build . --config Release -j $(nproc) | |
- name: Copy Shared Libraries | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir lib | |
cd build | |
find $(pwd) -type f -name "*.so" -exec cp {} ../lib \; | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Pack artifacts | |
id: pack_artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
working-directory: vendor/llama.cpp | |
run: | | |
#cp LICENSE ./build/bin/ | |
#cp examples/run/linenoise.cpp/LICENSE ./build/bin/LICENSE.linenoise.cpp | |
zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip ./lib/* | |
- name: Upload artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip | |
name: llama-bin-ubuntu-x64.zip | |
windows-latest-cmake: | |
runs-on: windows-latest | |
env: | |
OPENBLAS_VERSION: 0.3.23 | |
SDE_VERSION: 9.33.0-2024-01-07 | |
VULKAN_VERSION: 1.3.261.1 | |
strategy: | |
matrix: | |
include: | |
- build: 'noavx-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF' | |
- build: 'avx2-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON' | |
- build: 'avx-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF' | |
- build: 'avx512-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON' | |
- build: 'openblas-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' | |
- build: 'kompute-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON' | |
- build: 'vulkan-x64' | |
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON' | |
- build: 'llvm-arm64' | |
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' | |
- build: 'msvc-arm64' | |
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' | |
- build: 'llvm-arm64-opencl-adreno' | |
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Clone Kompute submodule | |
id: clone_kompute | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'kompute-x64' }} | |
run: | | |
git submodule update --init ggml/src/ggml-kompute/kompute | |
- name: Download OpenBLAS | |
id: get_openblas | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'openblas-x64' }} | |
run: | | |
curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip" | |
curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE" | |
mkdir $env:RUNNER_TEMP/openblas | |
tar.exe -xvf $env:RUNNER_TEMP/openblas.zip -C $env:RUNNER_TEMP/openblas | |
$vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) | |
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) | |
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe') | |
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll | |
- name: Install Vulkan SDK | |
id: get_vulkan | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }} | |
run: | | |
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" | |
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install | |
Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" | |
Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" | |
- name: Install Ninja | |
id: install_ninja | |
run: | | |
choco install ninja | |
- name: Install OpenCL Headers and Libs | |
id: install_opencl | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'llvm-arm64-opencl-adreno' }} | |
run: | | |
git clone https://github.com/KhronosGroup/OpenCL-Headers | |
cd OpenCL-Headers | |
mkdir build && cd build | |
cmake .. ` | |
-DBUILD_TESTING=OFF ` | |
-DOPENCL_HEADERS_BUILD_TESTING=OFF ` | |
-DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF ` | |
-DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" | |
cmake --build . --target install | |
git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader | |
cd OpenCL-ICD-Loader | |
mkdir build-arm64-release && cd build-arm64-release | |
cmake .. ` | |
-A arm64 ` | |
-DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" ` | |
-DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" | |
cmake --build . --target install --config release | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
run: | | |
cmake -S . -B build -DBUILD_SHARED_LIBS=ON ${{ matrix.defines }} | |
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} | |
- name: Add libopenblas.dll | |
id: add_libopenblas_dll | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'openblas-x64' }} | |
run: | | |
cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll | |
cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt | |
- name: Check AVX512F support | |
id: check_avx512f | |
working-directory: vendor/llama.cpp | |
if: ${{ matrix.build == 'avx512-x64' }} | |
continue-on-error: true | |
run: | | |
cd build | |
$vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) | |
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) | |
$cl = $(join-path $msvc 'bin\Hostx64\x64\cl.exe') | |
echo 'int main(void){unsigned int a[4];__cpuid(a,7);return !(a[1]&65536);}' >> avx512f.c | |
& $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main | |
.\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO" | |
- name: Copy Shared Libraries | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir lib | |
powershell -Command " | |
Get-ChildItem -Path build -Recurse -Filter '*.dll' | Copy-Item -Destination lib | |
" | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Pack artifacts | |
id: pack_artifacts | |
working-directory: vendor/llama.cpp | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
run: | | |
#Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt | |
#Copy-Item .\examples\run\linenoise.cpp\LICENSE .\build\bin\Release\linenoise.cpp.txt | |
7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\lib\* | |
- name: Upload artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp\llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip | |
name: llama-bin-win-${{ matrix.build }}.zip | |
windows-2019-cmake-cuda: | |
runs-on: windows-2019 | |
strategy: | |
matrix: | |
cuda: ['12.4', '11.7'] | |
build: ['cuda'] | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Install Cuda Toolkit 11.7 | |
if: ${{ matrix.cuda == '11.7' }} | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | |
choco install unzip -y | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-11.7.99-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-11.7.99-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-11.7.99-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-11.7.4.6-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-11.7.91-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-11.7.91-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-11.7.101-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-11.7.91-archive.zip" | |
unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cudart-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvcc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvrtc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libcublas-windows-x86_64-11.7.4.6-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvtx-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\visual_studio_integration-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvprof-windows-x86_64-11.7.101-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cccl-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y | |
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append | |
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append | |
echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 | |
echo "CUDA_PATH_V11_7=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 | |
- name: Install Cuda Toolkit 12.4 | |
if: ${{ matrix.cuda == '12.4' }} | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | |
choco install unzip -y | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.4.131-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.4.5.8-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.4.127-archive.zip" | |
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.4.127-archive.zip" | |
unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cudart-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvcc-windows-x86_64-12.4.131-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvrtc-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libcublas-windows-x86_64-12.4.5.8-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvtx-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_profiler_api-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\visual_studio_integration-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvprof-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cccl-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y | |
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append | |
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append | |
echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 | |
echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 | |
- name: Install ccache | |
uses: hendrikmuhs/[email protected] | |
with: | |
key: ${{ github.job }}-${{ matrix.cuda }}-${{ matrix.build }} | |
- name: Install Ninja | |
id: install_ninja | |
run: | | |
choco install ninja | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
shell: cmd | |
run: | | |
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" | |
cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DGGML_RPC=ON | |
set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 | |
cmake --build build --config Release -j %NINJA_JOBS% -t ggml | |
cmake --build build --config Release | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Copy Shared Libraries | |
working-directory: vendor/llama.cpp | |
run: | | |
mkdir lib | |
powershell -Command " | |
Get-ChildItem -Path build -Recurse -Filter '*.dll' | Copy-Item -Destination lib | |
" | |
- name: Pack artifacts | |
id: pack_artifacts | |
working-directory: vendor/llama.cpp | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
run: | | |
7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\lib\* | |
- name: Upload artifacts | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip | |
name: llama-bin-win-cu${{ matrix.cuda }}-x64.zip | |
- name: Copy and pack Cuda runtime | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
working-directory: vendor/llama.cpp | |
run: | | |
echo "Cuda install location: ${{ env.CUDA_PATH }}" | |
$dst='.\build\bin\cudart\' | |
robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll | |
robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll | |
7z a cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip $dst\* | |
- name: Upload Cuda runtime | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip | |
name: cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip | |
windows-latest-cmake-hip: | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
runs-on: windows-latest | |
strategy: | |
matrix: | |
gpu_target: [gfx1100, gfx1101, gfx1030] | |
steps: | |
- name: Checkout repository and submodules | |
uses: actions/checkout@v3 | |
with: | |
submodules: recursive | |
fetch-depth: 0 | |
- name: Install | |
id: depends | |
run: | | |
$ErrorActionPreference = "Stop" | |
write-host "Downloading AMD HIP SDK Installer" | |
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" | |
write-host "Installing AMD HIP SDK" | |
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait | |
write-host "Completed AMD HIP SDK installation" | |
- name: Verify ROCm | |
id: verify | |
run: | | |
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version | |
- name: Build | |
id: cmake_build | |
working-directory: vendor/llama.cpp | |
run: | | |
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path) | |
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" | |
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIP=ON -DCMAKE_BUILD_TYPE=Release -DAMDGPU_TARGETS=${{ matrix.gpu_target }} -DGGML_RPC=ON | |
cmake --build build -j ${env:NUMBER_OF_PROCESSORS} | |
md "build\bin\rocblas\library\" | |
cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\" | |
cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\" | |
cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\" | |
- name: Determine tag name | |
id: tag | |
working-directory: vendor/llama.cpp | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Pack artifacts | |
id: pack_artifacts | |
working-directory: vendor/llama.cpp | |
run: | | |
7z a llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip .\build\bin\* | |
- name: Upload artifacts | |
uses: actions/upload-artifact@v4 | |
with: | |
path: vendor/llama.cpp/llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip | |
name: llama-bin-win-hip-x64-${{ matrix.gpu_target }}.zip | |
release: | |
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} | |
runs-on: ubuntu-latest | |
needs: | |
- macOS-latest-cmake-arm64 | |
- macOS-latest-cmake-x64 | |
- ubuntu-latest-cmake | |
- windows-latest-cmake | |
- windows-2019-cmake-cuda | |
- windows-latest-cmake-hip | |
steps: | |
- name: Clone | |
id: checkout | |
uses: actions/checkout@v4 | |
with: | |
fetch-depth: 0 | |
- name: Determine tag name | |
id: tag | |
shell: bash | |
run: | | |
BUILD_NUMBER="$(git rev-list --count HEAD)" | |
SHORT_HASH="$(git rev-parse --short=7 HEAD)" | |
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then | |
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT | |
else | |
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') | |
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT | |
fi | |
- name: Download artifacts | |
id: download-artifact | |
uses: actions/download-artifact@v4 | |
with: | |
path: ./artifact | |
- name: Move artifacts | |
id: move_artifacts | |
run: mkdir -p ./artifact/release && mv ./artifact/*/*.zip ./artifact/release | |
- name: Create Release | |
id: create_release | |
uses: actions/create-release@v1 | |
env: | |
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
with: | |
tag_name: ${{ steps.tag.outputs.name }} | |
release_name: "Release ${{ steps.tag.outputs.name }}" | |
draft: false | |
prerelease: false | |
- name: Upload Assets to Release | |
run: | | |
for file in ./artifact/release/*.zip; do | |
echo "Uploading $file" | |
gh release upload "${{ steps.tag.outputs.name }}" "$file" | |
done | |
env: | |
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |