Skip to content
This repository has been archived by the owner on Oct 31, 2023. It is now read-only.

add the option to use a FORCE_CUDA to force cuda installation on docker #612

Merged
merged 12 commits into from
Mar 31, 2019
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
ARG CUDA="9.0"
ARG CUDNN="7"
ARG FORCE_CUDA="1"
obendidi marked this conversation as resolved.
Show resolved Hide resolved

FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-devel-ubuntu16.04

Expand Down Expand Up @@ -32,7 +33,6 @@ RUN conda install -y ipython
RUN pip install ninja yacs cython matplotlib opencv-python

# Install PyTorch 1.0 Nightly
ARG CUDA
obendidi marked this conversation as resolved.
Show resolved Hide resolved
RUN echo conda install pytorch-nightly cudatoolkit=${CUDA} -c pytorch \
&& conda clean -ya

Expand All @@ -47,8 +47,8 @@ RUN git clone https://github.com/cocodataset/cocoapi.git \
&& python setup.py build_ext install

# install PyTorch Detection
RUN git clone https://github.com/facebookresearch/maskrcnn-benchmark.git \
RUN git clone https://github.com/bendidi/maskrcnn-benchmark.git \
obendidi marked this conversation as resolved.
Show resolved Hide resolved
&& cd maskrcnn-benchmark \
&& python setup.py build develop
&& FORCE_CUDA=1 python setup.py build develop

WORKDIR /maskrcnn-benchmark
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ def get_extensions():
extra_compile_args = {"cxx": []}
define_macros = []

if torch.cuda.is_available() and CUDA_HOME is not None:
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
print("\n\nINSTALLING WITH CUDA SUPPORT\n\n")
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
Expand Down