diff --git a/INSTALL.md b/INSTALL.md index 8365be8f9..4db4b5bb6 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -58,13 +58,17 @@ unset INSTALL_DIR ### Option 2: Docker Image (Requires CUDA, Linux only) -Build image with defaults (`CUDA=9.0`, `CUDNN=7`): +Build image with defaults (`CUDA=9.0`, `CUDNN=7`, `FORCE_CUDA=1`): nvidia-docker build -t maskrcnn-benchmark docker/ Build image with other CUDA and CUDNN versions: - nvidia-docker build -t maskrcnn-benchmark --build-arg CUDA=9.2 --build-arg CUDNN=7 docker/ + nvidia-docker build -t maskrcnn-benchmark --build-arg CUDA=9.2 --build-arg CUDNN=7 docker/ + +Build image with FORCE_CUDA disabled: + + nvidia-docker build -t maskrcnn-benchmark --build-arg FORCE_CUDA=0 docker/ Build and run image with built-in jupyter notebook(note that the password is used to log in jupyter notebook): diff --git a/demo/README.md b/demo/README.md index 393a064b0..5926f8d35 100644 --- a/demo/README.md +++ b/demo/README.md @@ -38,7 +38,8 @@ docker run --rm -it \ -v /tmp/.X11-unix:/tmp/.X11-unix \ --device=/dev/video0:/dev/video0 \ --ipc=host maskrcnn-benchmark \ - python demo/webcam.py --min-image-size 300 + python demo/webcam.py --min-image-size 300 \ + --config-file configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml ``` **DISCLAIMER:** *This was tested for an Ubuntu 16.04 machine, diff --git a/docker/Dockerfile b/docker/Dockerfile index 58b924cf4..ba92f2215 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -47,6 +47,8 @@ RUN git clone https://github.com/cocodataset/cocoapi.git \ && python setup.py build_ext install # install PyTorch Detection +ARG FORCE_CUDA="1" +ENV FORCE_CUDA=${FORCE_CUDA} RUN git clone https://github.com/facebookresearch/maskrcnn-benchmark.git \ && cd maskrcnn-benchmark \ && python setup.py build develop diff --git a/setup.py b/setup.py index bfb6845e5..837c2cd15 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ def get_extensions(): extra_compile_args = {"cxx": []} define_macros = [] - if torch.cuda.is_available() and CUDA_HOME is not None: + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": extension = CUDAExtension sources += source_cuda define_macros += [("WITH_CUDA", None)]