-
Notifications
You must be signed in to change notification settings - Fork 79
/
Dockerfile-CUDA11.8
43 lines (35 loc) · 2.41 KB
/
Dockerfile-CUDA11.8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
FROM nvidia/cuda:11.8.0-devel-ubuntu20.04
LABEL github="https://github.com/mlcommons/GaNDLF"
LABEL docs="https://mlcommons.github.io/GaNDLF/"
LABEL version=1.0
# Install instructions for NVIDIA Container Toolkit allowing you to use the host's GPU: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html
# Note that to do this on a Windows host you need experimental feature "CUDA on WSL" -- not yet stable.
ENV DEBIAN_FRONTEND=noninteractive
# Explicitly install python3.9 (this uses 11.1 for now, as PyTorch LTS 1.8.2 is built against it)
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository ppa:deadsnakes/ppa
RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.9-dev libffi-dev libgl1
RUN python3.9 -m pip install --upgrade pip==24.0
RUN python3.9 -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118
RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker
# Do some dependency installation separately here to make layer caching more efficient
COPY ./setup.py ./setup.py
RUN python3.9 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.9 -m pip install -r ./requirements.txt
COPY . /GaNDLF
WORKDIR /GaNDLF
RUN python3.9 -m pip install -e .
# Entrypoint forces all commands given via "docker run" to go through python, CMD forces the default entrypoint script argument to be gandlf run
# If a user calls "docker run gandlf:[tag] anonymize", it will resolve to running "gandlf anonymize" instead.
# CMD is inherently overridden by args to "docker run", entrypoint is constant.
ENTRYPOINT gandlf
CMD run
# The below force the container commands to run as a nonroot user with UID > 10000.
# This greatly reduces UID collision probability between container and host, helping prevent privilege escalation attacks.
# As a side benefit this also decreases the likelihood that users on a cluster won't be able to access their files.
# See https://github.com/hexops/dockerfile as a best practices guide.
#RUN addgroup --gid 10001 --system nonroot \
# && adduser --uid 10000 --system --ingroup nonroot --home /home/nonroot nonroot
#USER nonroot
# Prepare the container for possible model embedding later.
RUN mkdir /embedded_model