-
Notifications
You must be signed in to change notification settings - Fork 10.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' into tokenizer-bpe-fixes
- Loading branch information
Showing
159 changed files
with
3,752 additions
and
2,634 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
# This needs to generally match the container host's environment. | ||
ARG CUDA_VERSION=11.7.1 | ||
# Target the CUDA build image | ||
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} | ||
# Target the CUDA runtime image | ||
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} | ||
|
||
FROM ${BASE_CUDA_DEV_CONTAINER} as build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
ARG CUDA_DOCKER_ARCH=all | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y build-essential git | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} | ||
# Enable CUDA | ||
ENV LLAMA_CUDA=1 | ||
|
||
RUN make -j$(nproc) llama-cli | ||
|
||
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libgomp1 | ||
|
||
COPY --from=build /app/llama-cli /llama-cli | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04 | ||
|
||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build | ||
|
||
ARG LLAMA_SYCL_F16=OFF | ||
RUN apt-get update && \ | ||
apt-get install -y git | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \ | ||
echo "LLAMA_SYCL_F16 is set" && \ | ||
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \ | ||
fi && \ | ||
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \ | ||
cmake --build build --config Release --target llama-cli | ||
|
||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime | ||
|
||
COPY --from=build /app/build/bin/llama-cli /llama-cli | ||
|
||
ENV LC_ALL=C.utf8 | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
|
||
# This needs to generally match the container host's environment. | ||
ARG ROCM_VERSION=5.6 | ||
|
||
# Target the CUDA build image | ||
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete | ||
|
||
FROM ${BASE_ROCM_DEV_CONTAINER} as build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 | ||
# This is mostly tied to rocBLAS supported archs. | ||
ARG ROCM_DOCKER_ARCH=\ | ||
gfx803 \ | ||
gfx900 \ | ||
gfx906 \ | ||
gfx908 \ | ||
gfx90a \ | ||
gfx1010 \ | ||
gfx1030 \ | ||
gfx1100 \ | ||
gfx1101 \ | ||
gfx1102 | ||
|
||
COPY requirements.txt requirements.txt | ||
COPY requirements requirements | ||
|
||
RUN pip install --upgrade pip setuptools wheel \ | ||
&& pip install -r requirements.txt | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH} | ||
# Enable ROCm | ||
ENV LLAMA_HIPBLAS=1 | ||
ENV CC=/opt/rocm/llvm/bin/clang | ||
ENV CXX=/opt/rocm/llvm/bin/clang++ | ||
|
||
RUN make -j$(nproc) llama-cli | ||
|
||
ENTRYPOINT [ "/app/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
ARG UBUNTU_VERSION=jammy | ||
|
||
FROM ubuntu:$UBUNTU_VERSION as build | ||
|
||
# Install build tools | ||
RUN apt update && apt install -y git build-essential cmake wget libgomp1 | ||
|
||
# Install Vulkan SDK | ||
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ | ||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ | ||
apt update -y && \ | ||
apt-get install -y vulkan-sdk | ||
|
||
# Build it | ||
WORKDIR /app | ||
COPY . . | ||
RUN cmake -B build -DLLAMA_VULKAN=1 && \ | ||
cmake --build build --config Release --target llama-cli | ||
|
||
# Clean up | ||
WORKDIR / | ||
RUN cp /app/build/bin/llama-cli /llama-cli && \ | ||
rm -rf /app | ||
|
||
ENV LC_ALL=C.utf8 | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
|
||
FROM ubuntu:$UBUNTU_VERSION as build | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y build-essential git | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
RUN make -j$(nproc) llama-cli | ||
|
||
FROM ubuntu:$UBUNTU_VERSION as runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libgomp1 | ||
|
||
COPY --from=build /app/llama-cli /llama-cli | ||
|
||
ENV LC_ALL=C.utf8 | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
# This needs to generally match the container host's environment. | ||
ARG CUDA_VERSION=11.7.1 | ||
# Target the CUDA build image | ||
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} | ||
# Target the CUDA runtime image | ||
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} | ||
|
||
FROM ${BASE_CUDA_DEV_CONTAINER} as build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
ARG CUDA_DOCKER_ARCH=all | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y build-essential git libcurl4-openssl-dev | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} | ||
# Enable CUDA | ||
ENV LLAMA_CUDA=1 | ||
# Enable cURL | ||
ENV LLAMA_CURL=1 | ||
|
||
RUN make -j$(nproc) llama-server | ||
|
||
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libcurl4-openssl-dev libgomp1 | ||
|
||
COPY --from=build /app/llama-server /llama-server | ||
|
||
ENTRYPOINT [ "/llama-server" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04 | ||
|
||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build | ||
|
||
ARG LLAMA_SYCL_F16=OFF | ||
RUN apt-get update && \ | ||
apt-get install -y git libcurl4-openssl-dev | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \ | ||
echo "LLAMA_SYCL_F16 is set" && \ | ||
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \ | ||
fi && \ | ||
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \ | ||
cmake --build build --config Release --target llama-server | ||
|
||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libcurl4-openssl-dev | ||
|
||
COPY --from=build /app/build/bin/llama-server /llama-server | ||
|
||
ENV LC_ALL=C.utf8 | ||
|
||
ENTRYPOINT [ "/llama-server" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
|
||
# This needs to generally match the container host's environment. | ||
ARG ROCM_VERSION=5.6 | ||
|
||
# Target the CUDA build image | ||
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete | ||
|
||
FROM ${BASE_ROCM_DEV_CONTAINER} as build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 | ||
# This is mostly tied to rocBLAS supported archs. | ||
ARG ROCM_DOCKER_ARCH=\ | ||
gfx803 \ | ||
gfx900 \ | ||
gfx906 \ | ||
gfx908 \ | ||
gfx90a \ | ||
gfx1010 \ | ||
gfx1030 \ | ||
gfx1100 \ | ||
gfx1101 \ | ||
gfx1102 | ||
|
||
COPY requirements.txt requirements.txt | ||
COPY requirements requirements | ||
|
||
RUN pip install --upgrade pip setuptools wheel \ | ||
&& pip install -r requirements.txt | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH} | ||
# Enable ROCm | ||
ENV LLAMA_HIPBLAS=1 | ||
ENV CC=/opt/rocm/llvm/bin/clang | ||
ENV CXX=/opt/rocm/llvm/bin/clang++ | ||
|
||
# Enable cURL | ||
ENV LLAMA_CURL=1 | ||
RUN apt-get update && \ | ||
apt-get install -y libcurl4-openssl-dev | ||
|
||
RUN make -j$(nproc) llama-server | ||
|
||
ENTRYPOINT [ "/app/llama-server" ] |
Oops, something went wrong.