Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docker-cuda: move recipe to reusable makefile target deps-cuda #1055

Merged
merged 3 commits into from
Jun 9, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 8 additions & 47 deletions Dockerfile.cuda
Original file line number Diff line number Diff line change
Expand Up @@ -4,58 +4,19 @@ FROM $BASE_IMAGE
ENV MAMBA_EXE=/usr/local/bin/conda
ENV MAMBA_ROOT_PREFIX=/conda
ENV PATH=$MAMBA_ROOT_PREFIX/bin:$PATH
ENV CONDA_EXE=$MAMBA_EXE
ENV CONDA_PREFIX=$MAMBA_ROOT_PREFIX
ENV CONDA_SHLVL='1'

WORKDIR $MAMBA_ROOT_PREFIX

RUN curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba
RUN mv bin/micromamba $MAMBA_EXE
RUN hash -r
RUN mkdir -p $CONDA_PREFIX/lib $CONDA_PREFIX/include
RUN echo $CONDA_PREFIX/lib >> /etc/ld.so.conf.d/conda.conf
# Get CUDA toolkit, including compiler and libraries with dev.
# The nvidia channels do not provide (recent) cudnn (needed for Torch, TF etc):
#RUN conda install -c nvidia/label/cuda-11.8.0 cuda && conda clean -a
# The conda-forge channel has cudnn but no cudatoolkit-dev anymore,
# so let's combine nvidia and conda-forge (will be same lib versions, no waste of space):
RUN conda install -c nvidia/label/cuda-11.8.0 \
cuda-nvcc \
cuda-cccl \
&& conda clean -a \
&& find $CONDA_PREFIX -name "*_static.a" -delete
# cuda-cudart-dev \
# cuda-libraries-dev \
#RUN conda install -c conda-forge \
# cudatoolkit=11.8.0 \
# cudnn=8.8.* && \
# conda clean -a && \
# find $CONDA_PREFIX -name "*_static.a" -delete
# Since Torch will pull in the CUDA libraries (as Python pkgs) anyway,
# let's jump the shark and pull these via NGC index directly,
# but then share them with the rest of the system so native compilation/linking
# works, too:
RUN pip3 install nvidia-pyindex \
&& pip3 install nvidia-cudnn-cu11==8.6.0.163 \
nvidia-cublas-cu11 \
nvidia-cusparse-cu11 \
nvidia-cusolver-cu11 \
nvidia-curand-cu11 \
nvidia-cufft-cu11 \
nvidia-cuda-runtime-cu11 \
nvidia-cuda-nvrtc-cu11 \
&& for pkg in cudnn cublas cusparse cusolver curand cufft cuda_runtime cuda_nvrtc; do \
for lib in /usr/local/lib/python3.8/site-packages/nvidia/$pkg/lib/lib*.so.*; do \
base=$(basename $lib); \
ln -s $lib $CONDA_PREFIX/lib/$base.so; \
ln -s $lib $CONDA_PREFIX/lib/${base%.so.*}.so; \
done \
&& ln -s /usr/local/lib/python3.8/site-packages/nvidia/$pkg/include/* $CONDA_PREFIX/include/; \
done \
&& ldconfig
# gputil/nvidia-smi would be nice, too – but that drags in Python as a conda dependency...
WORKDIR /build

COPY Makefile .

RUN make deps-cuda

WORKDIR /data

RUN rm -fr /build

CMD ["/usr/local/bin/ocrd", "--help"]

55 changes: 54 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ help:
@echo ""
@echo " Targets"
@echo ""
@echo " deps-ubuntu Dependencies for deployment in an ubuntu/debian linux"
@echo " deps-cuda Dependencies for deployment with GPU support via Conda"
@echo " deps-ubuntu Dependencies for deployment in an Ubuntu/Debian Linux"
@echo " deps-test Install test python deps via pip"
@echo " install (Re)install the tool"
@echo " install-dev Install with pip install -e"
Expand Down Expand Up @@ -47,6 +48,58 @@ help:
# pip install command. Default: $(PIP_INSTALL)
PIP_INSTALL = $(PIP) install

deps-cuda: CONDA_EXE ?= /usr/local/bin/conda
deps-cuda: export CONDA_PREFIX ?= /conda
deps-cuda: PYTHON_PREFIX != $(PYTHON) -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])'
deps-cuda:
curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba
Comment on lines +53 to +55
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These lines cause build warnings in the latest code (where line numbers changed):

Makefile:63: warning: overriding commands for target `deps-cuda'
Makefile:61: warning: ignoring old commands for target `deps-cuda'

mv bin/micromamba $(CONDA_EXE)
# Install Conda system-wide (for interactive / login shells)
echo 'export MAMBA_EXE=$(CONDA_EXE) MAMBA_ROOT_PREFIX=$(CONDA_PREFIX) CONDA_PREFIX=$(CONDA_PREFIX) PATH=$(CONDA_PREFIX)/bin:$$PATH' >> /etc/profile.d/98-conda.sh
mkdir -p $(CONDA_PREFIX)/lib $(CONDA_PREFIX)/include
echo $(CONDA_PREFIX)/lib >> /etc/ld.so.conf.d/conda.conf
# Get CUDA toolkit, including compiler and libraries with dev,
# however, the Nvidia channels do not provide (recent) cudnn (needed for Torch, TF etc):
#conda install -c nvidia/label/cuda-11.8.0 cuda && conda clean -a
#
# The conda-forge channel has cudnn and cudatoolkit but no cudatoolkit-dev anymore (and we need both!),
# so let's combine nvidia and conda-forge (will be same lib versions, no waste of space),
# but omitting cuda-cudart-dev and cuda-libraries-dev (as these will be pulled by pip for torch anyway):
conda install -c nvidia/label/cuda-11.8.0 \
cuda-nvcc \
cuda-cccl \
&& conda clean -a \
&& find $(CONDA_PREFIX) -name "*_static.a" -delete
#conda install -c conda-forge \
# cudatoolkit=11.8.0 \
# cudnn=8.8.* && \
#conda clean -a && \
#find $(CONDA_PREFIX) -name "*_static.a" -delete
#
# Since Torch will pull in the CUDA libraries (as Python pkgs) anyway,
# let's jump the shark and pull these via NGC index directly,
# but then share them with the rest of the system so native compilation/linking
# works, too:
$(PIP) install nvidia-pyindex \
&& $(PIP) install nvidia-cudnn-cu11==8.6.0.163 \
nvidia-cublas-cu11 \
nvidia-cusparse-cu11 \
nvidia-cusolver-cu11 \
nvidia-curand-cu11 \
nvidia-cufft-cu11 \
nvidia-cuda-runtime-cu11 \
nvidia-cuda-nvrtc-cu11 \
&& for pkg in cudnn cublas cusparse cusolver curand cufft cuda_runtime cuda_nvrtc; do \
for lib in $(PYTHON_PREFIX)/nvidia/$$pkg/lib/lib*.so.*; do \
base=`basename $$lib`; \
ln -s $$lib $(CONDA_PREFIX)/lib/$$base.so; \
ln -s $$lib $(CONDA_PREFIX)/lib/$${base%.so.*}.so; \
done \
&& ln -s $(PYTHON_PREFIX)/nvidia/$$pkg/include/* $(CONDA_PREFIX)/include/; \
done \
&& ldconfig
# gputil/nvidia-smi would be nice, too – but that drags in Python as a conda dependency...

# Dependencies for deployment in an ubuntu/debian linux
deps-ubuntu:
apt-get install -y python3 imagemagick libgeos-dev
Expand Down