diff --git a/Dockerfile.rocm b/Dockerfile.rocm index 02d37cdc6a1c8..e97762163e2eb 100644 --- a/Dockerfile.rocm +++ b/Dockerfile.rocm @@ -46,12 +46,12 @@ WORKDIR ${APP_MOUNT} RUN python3 -m pip install --upgrade pip # Remove sccache so it doesn't interfere with ccache # TODO: implement sccache support across components -RUN apt-get purge -y sccache; pip uninstall -y sccache; rm -f "$(which sccache)" +RUN apt-get purge -y sccache; python3 -m pip uninstall -y sccache; rm -f "$(which sccache)" # Install torch == 2.5.0 on ROCm RUN case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \ *"rocm-6.1"*) \ - pip uninstall -y torch torchaudio torchvision \ - && pip install --no-cache-dir --pre \ + python3 -m pip uninstall -y torch torchaudio torchvision \ + && python3 -m pip install --no-cache-dir --pre \ torch==2.5.0.dev20240710 torchaudio==2.4.0.dev20240710 \ torchvision==0.20.0.dev20240710 \ --index-url https://download.pytorch.org/whl/nightly/rocm6.1;; \ @@ -70,7 +70,7 @@ ENV CCACHE_DIR=/root/.cache/ccache FROM base AS build_amdsmi # Build amdsmi wheel always RUN cd /opt/rocm/share/amd_smi \ - && pip wheel . --wheel-dir=/install + && python3 -m pip wheel . --wheel-dir=/install ### Flash-Attention wheel build stage @@ -126,7 +126,7 @@ RUN case "$(which python3)" in \ # Package upgrades for useful functionality or to avoid dependency issues RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --upgrade numba scipy huggingface-hub[cli] + python3 -m pip install --upgrade numba scipy huggingface-hub[cli] # Make sure punica kernels are built (for LoRA) ENV VLLM_INSTALL_PUNICA_KERNELS=1 @@ -137,7 +137,7 @@ ENV TOKENIZERS_PARALLELISM=false RUN --mount=type=cache,target=${CCACHE_DIR} \ --mount=type=cache,target=/root/.cache/pip \ - pip install -Ur requirements-rocm.txt \ + python3 -m pip install -Ur requirements-rocm.txt \ && case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \ *"rocm-6.1"*) \ # Bring in upgrades to HIP graph earlier than ROCm 6.2 for vLLM @@ -153,7 +153,7 @@ RUN --mount=type=bind,from=build_amdsmi,src=/install,target=/install \ mkdir -p libs \ && cp /install/*.whl libs \ # Preemptively uninstall to avoid same-version no-installs - && pip uninstall -y amdsmi; + && python3 -m pip uninstall -y amdsmi; # Copy triton wheel(s) into final image if they were built RUN --mount=type=bind,from=build_triton,src=/install,target=/install \ @@ -161,7 +161,7 @@ RUN --mount=type=bind,from=build_triton,src=/install,target=/install \ && if ls /install/*.whl; then \ cp /install/*.whl libs \ # Preemptively uninstall to avoid same-version no-installs - && pip uninstall -y triton; fi + && python3 -m pip uninstall -y triton; fi # Copy flash-attn wheel(s) into final image if they were built RUN --mount=type=bind,from=build_fa,src=/install,target=/install \ @@ -169,11 +169,11 @@ RUN --mount=type=bind,from=build_fa,src=/install,target=/install \ && if ls /install/*.whl; then \ cp /install/*.whl libs \ # Preemptively uninstall to avoid same-version no-installs - && pip uninstall -y flash-attn; fi + && python3 -m pip uninstall -y flash-attn; fi # Install wheels that were built to the final image RUN --mount=type=cache,target=/root/.cache/pip \ if ls libs/*.whl; then \ - pip install libs/*.whl; fi + python3 -m pip install libs/*.whl; fi CMD ["/bin/bash"]