-
Notifications
You must be signed in to change notification settings - Fork 27
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* Upgrade to use vLLM * improve API * Isolated docker image for LLMs
- Loading branch information
1 parent
140006a
commit b81f898
Showing
13 changed files
with
698 additions
and
453 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,4 +14,5 @@ | |
is_turbo_model, | ||
split_prompt, | ||
validate_torch_device, | ||
get_max_memory, | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
# Based on https://github.com/huggingface/api-inference-community/blob/main/docker_images/diffusers/Dockerfile | ||
|
||
FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu20.04 | ||
LABEL maintainer="Yondon Fu <[email protected]>" | ||
|
||
# Add any system dependency here | ||
# RUN apt-get update -y && apt-get install libXXX -y | ||
|
||
ENV DEBIAN_FRONTEND=noninteractive | ||
|
||
# Install prerequisites | ||
RUN apt-get update && \ | ||
apt-get install -y build-essential libssl-dev zlib1g-dev libbz2-dev \ | ||
libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \ | ||
xz-utils tk-dev libffi-dev liblzma-dev python3-openssl git \ | ||
ffmpeg | ||
|
||
# Install pyenv | ||
RUN curl https://pyenv.run | bash | ||
|
||
# Set environment variables for pyenv | ||
ENV PYENV_ROOT /root/.pyenv | ||
ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH | ||
|
||
# Install your desired Python version | ||
ARG PYTHON_VERSION=3.11 | ||
RUN pyenv install $PYTHON_VERSION && \ | ||
pyenv global $PYTHON_VERSION && \ | ||
pyenv rehash | ||
|
||
# Upgrade pip and install your desired packages | ||
ARG PIP_VERSION=24.2 | ||
RUN pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools==69.5.1 wheel==0.43.0 && \ | ||
pip install --no-cache-dir torch==2.4.0 torchvision torchaudio pip-tools | ||
|
||
WORKDIR /app | ||
|
||
COPY ./requirements.llm.in /app | ||
RUN pip-compile requirements.llm.in -o requirements.txt | ||
RUN pip install --no-cache-dir -r requirements.txt | ||
|
||
# Most DL models are quite large in terms of memory, using workers is a HUGE | ||
# slowdown because of the fork and GIL with python. | ||
# Using multiple pods seems like a better default strategy. | ||
# Feel free to override if it does not make sense for your library. | ||
ARG max_workers=1 | ||
ENV MAX_WORKERS=$max_workers | ||
ENV HUGGINGFACE_HUB_CACHE=/models | ||
ENV DIFFUSERS_CACHE=/models | ||
ENV MODEL_DIR=/models | ||
# This ensures compatbility with how GPUs are addressed within go-livepeer | ||
ENV CUDA_DEVICE_ORDER=PCI_BUS_ID | ||
|
||
# vLLM configuration | ||
ENV USE_8BIT=false | ||
ENV MAX_NUM_BATCHED_TOKENS=8192 | ||
ENV MAX_NUM_SEQS=128 | ||
ENV MAX_MODEL_LEN=8192 | ||
ENV GPU_MEMORY_UTILIZATION=0.85 | ||
ENV TENSOR_PARALLEL_SIZE=1 | ||
ENV PIPELINE_PARALLEL_SIZE=1 | ||
# To use multiple GPUs, set TENSOR_PARALLEL_SIZE and PIPELINE_PARALLEL_SIZE | ||
# Total GPUs used = TENSOR_PARALLEL_SIZE × PIPELINE_PARALLEL_SIZE | ||
# Example for 4 GPUs: | ||
# - Option 1: TENSOR_PARALLEL_SIZE=2, PIPELINE_PARALLEL_SIZE=2 | ||
# - Option 2: TENSOR_PARALLEL_SIZE=4, PIPELINE_PARALLEL_SIZE=1 | ||
# - Option 3: TENSOR_PARALLEL_SIZE=1, PIPELINE_PARALLEL_SIZE=4 | ||
|
||
COPY app/ /app/app | ||
COPY images/ /app/images | ||
COPY bench.py /app/bench.py | ||
COPY example_data/ /app/example_data | ||
|
||
CMD ["uvicorn", "app.main:app", "--log-config", "app/cfg/uvicorn_logging_config.json", "--host", "0.0.0.0", "--port", "8000"] |
Oops, something went wrong.