From c7ad68db9784293e6e005de3457eb35cb4eddc50 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Wed, 12 Jan 2022 15:07:04 +0800 Subject: [PATCH 01/16] add gpu and cpu dockerfile --- docker/CPU/Dockerfile | 110 ++++++++++++++++++++++++++++++++++++++++++ docker/GPU/Dockerfile | 108 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+) create mode 100644 docker/CPU/Dockerfile create mode 100644 docker/GPU/Dockerfile diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile new file mode 100644 index 0000000000..213b2fe35c --- /dev/null +++ b/docker/CPU/Dockerfile @@ -0,0 +1,110 @@ +ARG OS_VERSION=18.04 +FROM ubuntu:${OS_VERSION} as official +ARG PYTHON_VERSION=3.8 +ARG TORCH_VERSION=1.8.0 +ARG TORCHVISION_VERSION=0.9.0 +ARG ONNXRUNTIME_VERSION=1.8.1 +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + build-essential \ + libjpeg-dev \ + libpng-dev \ + ccache \ + cmake \ + gcc \ + g++ \ + git \ + vim \ + wget \ + curl \ + && rm -rf /var/lib/apt/lists/* +### update apt and install libs +#RUN apt-get update &&\ +# apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ + /opt/conda/bin/conda clean -ya + +### pytorch +#RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} -c pytorch +RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html +ENV PATH /opt/conda/bin:$PATH + +### install open-mim +RUN /opt/conda/bin/pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html + +#### git mmdetection +#RUN git clone --depth=1 https://github.com/open-mmlab/mmdetection.git /root/space/mmdetection + +### install mmdetection +#RUN cd /root/space/mmdetection &&\ +# pip3 install -r requirements.txt &&\ +# python3 setup.py develop + +WORKDIR /root/workspace +RUN git clone https://github.com/open-mmlab/mmclassification + + +### get onnxruntime +RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz + +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} + +### update cmake to 20 +RUN apt-get update && apt-get install libssl-dev &&\ + wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ + tar -zxvf cmake-3.20.0.tar.gz &&\ + cd cmake-3.20.0 &&\ + ./bootstrap &&\ + make &&\ + make install &&\ + rm -rf /var/lib/apt/lists/* + +## install onnxruntme and openvino +RUN /opt/conda/bin/pip install onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev + +## build ncnn +RUN apt-get update && apt-get install libprotobuf-dev protobuf-compiler -y --no-install-recommends &&\ + git clone https://github.com/Tencent/ncnn.git &&\ + cd ncnn &&\ + export NCNN_DIR=$(pwd) &&\ + git submodule update --init &&\ + mkdir -p build && cd build &&\ + cmake -DNCNN_VULKAN=OFF -DNCNN_SYSTEM_GLSLANG=ON -DNCNN_BUILD_EXAMPLES=ON -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=ON -DNCNN_BUILD_BENCHMARK=ON -DNCNN_BUILD_TESTS=ON .. &&\ + make install &&\ + cd ${NCNN_DIR} # To NCNN root directory &&\ + cd python &&\ + pip install -e . + +## build ppl.nn +WORKDIR /root/workspace +RUN git clone https://github.com/openppl-public/ppl.nn &&\ + cd ppl.nn &&\ + ./build.sh -DPPLNN_ENABLE_PYTHON_API=ON -DHPCC_USE_X86_64=ON &&\ + rm -rf /tmp/pyppl-package &&\ + cp -r python/package /tmp/pyppl-package &&\ + cp -r pplnn-build/install/lib/pyppl/* /tmp/pyppl-package/pyppl &&\ + cd /tmp/pyppl-package &&\ + pip3 install . + +### install mmdeploy +WORKDIR /root/workspace +RUN git clone https://github.com/open-mmlab/mmdeploy &&\ + git submodule update --init --recursive &&\ + cd mmdeploy &&\ + rm -rf build &&\ + mkdir build &&\ + cd build &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/root/workspace/build/install/lib/cmake/ncnn .. &&\ + make -j$(nproc) &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\ + make -j$(nproc) &&\ + cd .. &&\ + pip install -e . + diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile new file mode 100644 index 0000000000..55a382c5c4 --- /dev/null +++ b/docker/GPU/Dockerfile @@ -0,0 +1,108 @@ +FROM nvcr.io/nvidia/tensorrt:21.04-py3 + +ARG CUDA=10.2 +ARG PYTHON_VERSION=3.8 +ARG TORCH_VERSION=1.8.0 +ARG TORCHVISION_VERSION=0.9.0 +ARG ONNXRUNTIME_VERSION=1.8.1 +ENV FORCE_CUDA="1" + +ENV DEBIAN_FRONTEND=noninteractive + +### update apt and install libs +RUN apt-get update &&\ + apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ + /opt/conda/bin/conda clean -ya + +### pytorch +RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch +ENV PATH /opt/conda/bin:$PATH +#RUN pip3 install torch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} + +RUN ls /opt/conda/bin/ +### install open-mim +RUN /opt/conda/bin/pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html + +### git mmdetection +RUN git clone --depth=1 https://github.com/open-mmlab/mmdetection.git /root/space/mmdetection + +### install mmdetection +RUN cd /root/space/mmdetection &&\ + pip3 install -r requirements.txt &&\ + python3 setup.py develop + +WORKDIR /root/workspace +RUN git clone https://github.com/open-mmlab/mmclassification + + +### get onnxruntime +RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\ + pip install onnxruntime-gpu + +ENV PATH_TO_ONNXRUNTIME=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} +ENV PATH_TO_TENSORRT=/workspace/tensorrt + +### cp trt from pip to conda +RUN cp -r /usr/local/lib/python3.8/dist-packages/tensorrt* /opt/conda/lib/python3.8/site-packages/ + +### update cmake to 20 +RUN apt-get install -y libssl-dev &&\ + wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ + tar -zxvf cmake-3.20.0.tar.gz &&\ + cd cmake-3.20.0 &&\ + ./bootstrap &&\ + make &&\ + make install + +### build ppl.nn +WORKDIR /root/workspace +RUN git clone https://github.com/openppl-public/ppl.nn &&\ + cd ppl.nn &&\ + ./build.sh -DPPLNN_ENABLE_PYTHON_API=ON -DHPCC_USE_X86_64=ON -DHPCC_USE_CUDA=ON &&\ + rm -rf /tmp/pyppl-package &&\ + cp -r python/package /tmp/pyppl-package &&\ + cp -r pplnn-build/install/lib/pyppl/* /tmp/pyppl-package/pyppl &&\ + cd /tmp/pyppl-package &&\ + pip3 install . + +### install mmdeploy +WORKDIR /root/workspace +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} +ENV TENSORRT_DIR=/workspace/tensorrt +RUN git clone https://github.com/open-mmlab/mmdeploy &&\ + cd mmdeploy &&\ + git submodule update --init --recursive &&\ + rm -rf build &&\ + mkdir build &&\ + cd build &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\ + make -j$(nproc) &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=trt .. &&\ + make -j$(nproc) &&\ + cd .. &&\ + pip install -e . + +### build sdk +RUN apt-get update && apt-get install libopencv-dev libspdlog-dev -y --no-install-recommends &&\ + rm -rf /var/lib/apt/lists/* +RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ + cd ppl.cv &&\ + ./build.sh cuda +RUN cd /root/workspace/mmdeploy &&\ + mkdir -p sdk-build && cd sdk-build &&\ + cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON \ + -DCMAKE_CXX_COMPILER=g++ \ + -Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ + -DMMDEPLOY_TARGET_BACKENDS=trt \ + -DMMDEPLOY_CODEBASES=all &&\ + cmake --build . -- -j$(nproc) && cmake --install . From 206aa667549d644b0fb6c130b364a4f8e82f4395 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Wed, 12 Jan 2022 15:13:22 +0800 Subject: [PATCH 02/16] fix lint --- docker/CPU/Dockerfile | 26 +++++++------------------- docker/GPU/Dockerfile | 40 ++++++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 39 deletions(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 213b2fe35c..b0f6d55d00 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -1,4 +1,4 @@ -ARG OS_VERSION=18.04 +ARG OS_VERSION=18.04 FROM ubuntu:${OS_VERSION} as official ARG PYTHON_VERSION=3.8 ARG TORCH_VERSION=1.8.0 @@ -19,15 +19,12 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ wget \ curl \ && rm -rf /var/lib/apt/lists/* -### update apt and install libs -#RUN apt-get update &&\ -# apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget - -RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ - chmod +x ~/miniconda.sh && \ - ~/miniconda.sh -b -p /opt/conda && \ - rm ~/miniconda.sh && \ - /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ /opt/conda/bin/conda clean -ya ### pytorch @@ -38,14 +35,6 @@ ENV PATH /opt/conda/bin:$PATH ### install open-mim RUN /opt/conda/bin/pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html -#### git mmdetection -#RUN git clone --depth=1 https://github.com/open-mmlab/mmdetection.git /root/space/mmdetection - -### install mmdetection -#RUN cd /root/space/mmdetection &&\ -# pip3 install -r requirements.txt &&\ -# python3 setup.py develop - WORKDIR /root/workspace RUN git clone https://github.com/open-mmlab/mmclassification @@ -107,4 +96,3 @@ RUN git clone https://github.com/open-mmlab/mmdeploy &&\ make -j$(nproc) &&\ cd .. &&\ pip install -e . - diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 55a382c5c4..91142eae07 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -1,23 +1,23 @@ -FROM nvcr.io/nvidia/tensorrt:21.04-py3 - -ARG CUDA=10.2 -ARG PYTHON_VERSION=3.8 -ARG TORCH_VERSION=1.8.0 -ARG TORCHVISION_VERSION=0.9.0 -ARG ONNXRUNTIME_VERSION=1.8.1 -ENV FORCE_CUDA="1" - -ENV DEBIAN_FRONTEND=noninteractive - -### update apt and install libs -RUN apt-get update &&\ - apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget - -RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ - chmod +x ~/miniconda.sh && \ - ~/miniconda.sh -b -p /opt/conda && \ - rm ~/miniconda.sh && \ - /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ +FROM nvcr.io/nvidia/tensorrt:21.04-py3 + +ARG CUDA=10.2 +ARG PYTHON_VERSION=3.8 +ARG TORCH_VERSION=1.8.0 +ARG TORCHVISION_VERSION=0.9.0 +ARG ONNXRUNTIME_VERSION=1.8.1 +ENV FORCE_CUDA="1" + +ENV DEBIAN_FRONTEND=noninteractive + +### update apt and install libs +RUN apt-get update &&\ + apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ /opt/conda/bin/conda clean -ya ### pytorch From 88b4d2862c3d5c3eeaca71e1ad23fa627d3f468d Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 13 Jan 2022 11:45:32 +0800 Subject: [PATCH 03/16] fix cpu docker and remove redundant --- docker/CPU/Dockerfile | 5 ++--- docker/GPU/Dockerfile | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index b0f6d55d00..39041260c2 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -28,7 +28,6 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini /opt/conda/bin/conda clean -ya ### pytorch -#RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} -c pytorch RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html ENV PATH /opt/conda/bin:$PATH @@ -85,12 +84,12 @@ RUN git clone https://github.com/openppl-public/ppl.nn &&\ ### install mmdeploy WORKDIR /root/workspace RUN git clone https://github.com/open-mmlab/mmdeploy &&\ - git submodule update --init --recursive &&\ cd mmdeploy &&\ + git submodule update --init --recursive &&\ rm -rf build &&\ mkdir build &&\ cd build &&\ - cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/root/workspace/build/install/lib/cmake/ncnn .. &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn .. &&\ make -j$(nproc) &&\ cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\ make -j$(nproc) &&\ diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 91142eae07..20c09e7778 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -23,7 +23,6 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini ### pytorch RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch ENV PATH /opt/conda/bin:$PATH -#RUN pip3 install torch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} RUN ls /opt/conda/bin/ ### install open-mim From 17720c3d83abf71e97cb61e7126105209fded896 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 13 Jan 2022 11:47:25 +0800 Subject: [PATCH 04/16] use pip instead --- docker/CPU/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 39041260c2..994ca00768 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -79,7 +79,7 @@ RUN git clone https://github.com/openppl-public/ppl.nn &&\ cp -r python/package /tmp/pyppl-package &&\ cp -r pplnn-build/install/lib/pyppl/* /tmp/pyppl-package/pyppl &&\ cd /tmp/pyppl-package &&\ - pip3 install . + pip install . ### install mmdeploy WORKDIR /root/workspace From 924d90295d9849065c3bd3e3ae6872e6ca911847 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Mon, 17 Jan 2022 11:14:49 +0800 Subject: [PATCH 05/16] add build arg and readme --- docker/CPU/Dockerfile | 58 +++++++++++++++++++++++-------------------- docker/GPU/Dockerfile | 53 +++++++++++---------------------------- docker/README.md | 29 ++++++++++++++++++++++ 3 files changed, 75 insertions(+), 65 deletions(-) create mode 100644 docker/README.md diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 994ca00768..c2aa8621a4 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -1,12 +1,15 @@ -ARG OS_VERSION=18.04 -FROM ubuntu:${OS_VERSION} as official -ARG PYTHON_VERSION=3.8 +FROM openvino/ubuntu18_dev:2021.4.2 +ARG PYTHON_VERSION=3.7 ARG TORCH_VERSION=1.8.0 ARG TORCHVISION_VERSION=0.9.0 ARG ONNXRUNTIME_VERSION=1.8.1 -ENV DEBIAN_FRONTEND=noninteractive +USER root RUN apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ + libopencv-dev libspdlog-dev \ + gnupg \ + libssl-dev \ + libprotobuf-dev protobuf-compiler \ build-essential \ libjpeg-dev \ libpng-dev \ @@ -32,7 +35,7 @@ RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHV ENV PATH /opt/conda/bin:$PATH ### install open-mim -RUN /opt/conda/bin/pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html +RUN /opt/conda/bin/pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html WORKDIR /root/workspace RUN git clone https://github.com/open-mmlab/mmclassification @@ -45,46 +48,33 @@ RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTI ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} ### update cmake to 20 -RUN apt-get update && apt-get install libssl-dev &&\ - wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ +RUN wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ tar -zxvf cmake-3.20.0.tar.gz &&\ cd cmake-3.20.0 &&\ ./bootstrap &&\ make &&\ - make install &&\ - rm -rf /var/lib/apt/lists/* + make install -## install onnxruntme and openvino +### install onnxruntme and openvino RUN /opt/conda/bin/pip install onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev -## build ncnn -RUN apt-get update && apt-get install libprotobuf-dev protobuf-compiler -y --no-install-recommends &&\ - git clone https://github.com/Tencent/ncnn.git &&\ +### build ncnn +RUN git clone https://github.com/Tencent/ncnn.git &&\ cd ncnn &&\ export NCNN_DIR=$(pwd) &&\ git submodule update --init &&\ mkdir -p build && cd build &&\ cmake -DNCNN_VULKAN=OFF -DNCNN_SYSTEM_GLSLANG=ON -DNCNN_BUILD_EXAMPLES=ON -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=ON -DNCNN_BUILD_BENCHMARK=ON -DNCNN_BUILD_TESTS=ON .. &&\ make install &&\ - cd ${NCNN_DIR} # To NCNN root directory &&\ - cd python &&\ + cd /root/workspace/ncnn/python &&\ pip install -e . -## build ppl.nn -WORKDIR /root/workspace -RUN git clone https://github.com/openppl-public/ppl.nn &&\ - cd ppl.nn &&\ - ./build.sh -DPPLNN_ENABLE_PYTHON_API=ON -DHPCC_USE_X86_64=ON &&\ - rm -rf /tmp/pyppl-package &&\ - cp -r python/package /tmp/pyppl-package &&\ - cp -r pplnn-build/install/lib/pyppl/* /tmp/pyppl-package/pyppl &&\ - cd /tmp/pyppl-package &&\ - pip install . - ### install mmdeploy WORKDIR /root/workspace -RUN git clone https://github.com/open-mmlab/mmdeploy &&\ +ARG VERSION +RUN git clone https://github.com/open-mmlab/mmdeploy.git &&\ cd mmdeploy &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on master" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ git submodule update --init --recursive &&\ rm -rf build &&\ mkdir build &&\ @@ -95,3 +85,17 @@ RUN git clone https://github.com/open-mmlab/mmdeploy &&\ make -j$(nproc) &&\ cd .. &&\ pip install -e . + +### build SDK +RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON \ + -DCMAKE_CXX_COMPILER=g++-7 \ + -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ + -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \ + -DInferenceEngine_DIR=/opt/intel/openvino/deployment_tools/inference_engine/share \ + -DMMDEPLOY_TARGET_DEVICES=cpu \ + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \ + -DMMDEPLOY_CODEBASES=all &&\ + cmake --build . -- -j$(nproc) && cmake --install . &&\ + if [ -z ${VERSION} ] ; then echo "Build MMDeploy master for CPU devices succeed!" ; else echo "Build MMDeploy version v${VERSION} for CPU devices succeed!" ; fi diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 20c09e7778..d2241ed8b5 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -11,7 +11,8 @@ ENV DEBIAN_FRONTEND=noninteractive ### update apt and install libs RUN apt-get update &&\ - apt-get install -y vim cmake libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget + apt-get install -y vim libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget libssl-dev libopencv-dev libspdlog-dev --no-install-recommends &&\ + rm -rf /var/lib/apt/lists/* RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ chmod +x ~/miniconda.sh && \ @@ -24,59 +25,34 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch ENV PATH /opt/conda/bin:$PATH -RUN ls /opt/conda/bin/ -### install open-mim -RUN /opt/conda/bin/pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html - -### git mmdetection -RUN git clone --depth=1 https://github.com/open-mmlab/mmdetection.git /root/space/mmdetection - -### install mmdetection -RUN cd /root/space/mmdetection &&\ - pip3 install -r requirements.txt &&\ - python3 setup.py develop +### install mmcv-full +RUN /opt/conda/bin/pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html &&\ + /opt/conda/bin/pip install mmdet==2.19.0 WORKDIR /root/workspace -RUN git clone https://github.com/open-mmlab/mmclassification - - ### get onnxruntime RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\ - pip install onnxruntime-gpu - -ENV PATH_TO_ONNXRUNTIME=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} -ENV PATH_TO_TENSORRT=/workspace/tensorrt + pip install onnxruntime-gpu==${ONNXRUNTIME_VERSION} ### cp trt from pip to conda RUN cp -r /usr/local/lib/python3.8/dist-packages/tensorrt* /opt/conda/lib/python3.8/site-packages/ ### update cmake to 20 -RUN apt-get install -y libssl-dev &&\ - wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ +RUN wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ tar -zxvf cmake-3.20.0.tar.gz &&\ cd cmake-3.20.0 &&\ ./bootstrap &&\ make &&\ make install -### build ppl.nn -WORKDIR /root/workspace -RUN git clone https://github.com/openppl-public/ppl.nn &&\ - cd ppl.nn &&\ - ./build.sh -DPPLNN_ENABLE_PYTHON_API=ON -DHPCC_USE_X86_64=ON -DHPCC_USE_CUDA=ON &&\ - rm -rf /tmp/pyppl-package &&\ - cp -r python/package /tmp/pyppl-package &&\ - cp -r pplnn-build/install/lib/pyppl/* /tmp/pyppl-package/pyppl &&\ - cd /tmp/pyppl-package &&\ - pip3 install . - ### install mmdeploy -WORKDIR /root/workspace ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} ENV TENSORRT_DIR=/workspace/tensorrt +ARG VERSION RUN git clone https://github.com/open-mmlab/mmdeploy &&\ cd mmdeploy &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on master" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ git submodule update --init --recursive &&\ rm -rf build &&\ mkdir build &&\ @@ -89,19 +65,20 @@ RUN git clone https://github.com/open-mmlab/mmdeploy &&\ pip install -e . ### build sdk -RUN apt-get update && apt-get install libopencv-dev libspdlog-dev -y --no-install-recommends &&\ - rm -rf /var/lib/apt/lists/* RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ cd ppl.cv &&\ ./build.sh cuda RUN cd /root/workspace/mmdeploy &&\ - mkdir -p sdk-build && cd sdk-build &&\ + rm -rf build/CM* &&\ + mkdir -p build && cd build &&\ cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++ \ -Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ - -DMMDEPLOY_TARGET_BACKENDS=trt \ + -DMMDEPLOY_TARGET_BACKENDS="trt" \ -DMMDEPLOY_CODEBASES=all &&\ - cmake --build . -- -j$(nproc) && cmake --install . + cmake --build . -- -j$(nproc) && cmake --install . &&\ + if [ -z ${VERSION} ] ; then echo "Build MMDeploy master for GPU devices succeed!" ; else echo "Build MMDeploy version v${VERSION} for GPU devices succeed!" ; fi diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..968c31c5be --- /dev/null +++ b/docker/README.md @@ -0,0 +1,29 @@ +## Docker usage + +We provide two dockerfile for CPU and GPU respectively. For CPU users, we install MMDeploy with ONNXRuntime, ncnn and OpenVINO backends. For GPU users, we install MMDeploy with TensorRT backend. Besides, users can install mmdeploy with different versions when building the docker image. + +### Build docker image + +For CPU users, we can build the docker image with the latest MMDeploy through: +``` +cd mmdeploy +docker build docker/CPU/ -t mmdeploy:master +``` +For GPU users, we can build the docker image with the latest MMDeploy through: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:master +``` + +For installing MMDeploy with a specific version, we can append `--build-arg VERSION=${VERSION}` to build command. GPU for example: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 +``` + +### Run docker container + +After building docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: +``` +docker run --gpus all -it -p 8080:8081 mmdeploy:master +``` From 5d75ea34b1cf152b1eff83f42809eb602a117068 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Mon, 17 Jan 2022 11:19:11 +0800 Subject: [PATCH 06/16] fix grammar --- docker/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index 968c31c5be..4ac0b32530 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,6 +1,6 @@ ## Docker usage -We provide two dockerfile for CPU and GPU respectively. For CPU users, we install MMDeploy with ONNXRuntime, ncnn and OpenVINO backends. For GPU users, we install MMDeploy with TensorRT backend. Besides, users can install mmdeploy with different versions when building the docker image. +We provide two dockerfiles for CPU and GPU respectively. For CPU users, we install MMDeploy with ONNXRuntime, ncnn and OpenVINO backends. For GPU users, we install MMDeploy with TensorRT backend. Besides, users can install mmdeploy with different versions when building the docker image. ### Build docker image @@ -23,7 +23,7 @@ docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 ### Run docker container -After building docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: +After building the docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: ``` docker run --gpus all -it -p 8080:8081 mmdeploy:master ``` From 21f76bc85cda29913d4752b3549639ad02c16275 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Tue, 18 Jan 2022 11:39:42 +0800 Subject: [PATCH 07/16] update readme --- docker/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/README.md b/docker/README.md index 4ac0b32530..89bab190d4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -7,12 +7,12 @@ We provide two dockerfiles for CPU and GPU respectively. For CPU users, we insta For CPU users, we can build the docker image with the latest MMDeploy through: ``` cd mmdeploy -docker build docker/CPU/ -t mmdeploy:master +docker build docker/CPU/ -t mmdeploy:master-cpu ``` For GPU users, we can build the docker image with the latest MMDeploy through: ``` cd mmdeploy -docker build docker/GPU/ -t mmdeploy:master +docker build docker/GPU/ -t mmdeploy:master-gpu ``` For installing MMDeploy with a specific version, we can append `--build-arg VERSION=${VERSION}` to build command. GPU for example: @@ -25,5 +25,5 @@ docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 After building the docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: ``` -docker run --gpus all -it -p 8080:8081 mmdeploy:master +docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu ``` From a7e87051d73831170b48bb87f5714e4a75a17ab8 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 09:51:11 +0800 Subject: [PATCH 08/16] add chinese doc for dockerfile and add docker build to build.md --- docs/en/build.md | 3 ++ .../en/tutorials/how_to_use_docker.md | 0 docs/zh_cn/build.md | 2 ++ docs/zh_cn/tutorials/how_to_use_docker.md | 29 +++++++++++++++++++ 4 files changed, 34 insertions(+) rename docker/README.md => docs/en/tutorials/how_to_use_docker.md (100%) create mode 100644 docs/zh_cn/tutorials/how_to_use_docker.md diff --git a/docs/en/build.md b/docs/en/build.md index 9ed5364a51..cd83b1ccbc 100644 --- a/docs/en/build.md +++ b/docs/en/build.md @@ -1,5 +1,8 @@ ## Build MMDeploy +We provide both physical and virtual machine building methods. For virtual machine building methods, please refer to +[how to use docker](tutorials/how_to_use_docker.md). For physical machine, please follow the steps below. + ### Preparation - Download MMDeploy diff --git a/docker/README.md b/docs/en/tutorials/how_to_use_docker.md similarity index 100% rename from docker/README.md rename to docs/en/tutorials/how_to_use_docker.md diff --git a/docs/zh_cn/build.md b/docs/zh_cn/build.md index c0056e9a02..bd0fd4321c 100644 --- a/docs/zh_cn/build.md +++ b/docs/zh_cn/build.md @@ -1,5 +1,7 @@ ## 安装 MMdeploy +我们提供物理机和虚拟机构建方法。虚拟机搭建方法请参考[如何使用docker](tutorials/how_to_use_docker.md)。对于物理机,请按照以下步骤操作 + ### 准备工作 - 下载代码仓库 MMDeploy diff --git a/docs/zh_cn/tutorials/how_to_use_docker.md b/docs/zh_cn/tutorials/how_to_use_docker.md new file mode 100644 index 0000000000..bda56cefe5 --- /dev/null +++ b/docs/zh_cn/tutorials/how_to_use_docker.md @@ -0,0 +1,29 @@ +## Docker的使用 + +我们分别为 CPU 和 GPU 提供了两个 dockerfile。对于 CPU 用户,我们对接 ONNXRuntime、ncnn 和 OpenVINO 后端安装 MMDeploy。对于 GPU 用户,我们安装带有 TensorRT 后端的 MMDeploy。此外,用户可以在构建 docker 镜像时安装不同版本的 mmdeploy。 + +### 构建镜像 + +对于 CPU 用户,我们可以通过以下方式使用最新的 MMDeploy 构建 docker 镜像: +``` +cd mmdeploy +docker build docker/CPU/ -t mmdeploy:master-cpu +``` +对于 GPU 用户,我们可以通过以下方式使用最新的 MMDeploy 构建 docker 镜像: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:master-gpu +``` + +要安装具有特定版本的 MMDeploy,我们可以将 `--build-arg VERSION=${VERSION}` 附加到构建命令中。以 GPU 为例: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 +``` + +### 运行 docker 容器 + +构建 docker 镜像成功后,我们可以使用 `docker run` 启动 docker 服务。 GPU 镜像为例: +``` +docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu +``` From 3d705f190751f05edbde9ac4ff82262219420e98 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 09:53:10 +0800 Subject: [PATCH 09/16] grammar --- docs/en/build.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/build.md b/docs/en/build.md index cd83b1ccbc..deb72d4e10 100644 --- a/docs/en/build.md +++ b/docs/en/build.md @@ -1,6 +1,6 @@ ## Build MMDeploy -We provide both physical and virtual machine building methods. For virtual machine building methods, please refer to +We provide building methods for both physical and virtual machines. For virtual machine building methods, please refer to [how to use docker](tutorials/how_to_use_docker.md). For physical machine, please follow the steps below. ### Preparation From aef6fb56b25c23f4b4a4dabcd98af32d3eb30b2b Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 18:53:11 +0800 Subject: [PATCH 10/16] refine dockerfiles --- docker/CPU/Dockerfile | 12 +++++++----- docker/GPU/Dockerfile | 17 +++++++++-------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index c2aa8621a4..2b7915fadd 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -3,6 +3,8 @@ ARG PYTHON_VERSION=3.7 ARG TORCH_VERSION=1.8.0 ARG TORCHVISION_VERSION=0.9.0 ARG ONNXRUNTIME_VERSION=1.8.1 +ARG MMCV_VERSION=1.4.0 +ARG CMAKE_VERSION=3.20.0 USER root RUN apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ @@ -35,7 +37,7 @@ RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHV ENV PATH /opt/conda/bin:$PATH ### install open-mim -RUN /opt/conda/bin/pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html +RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html WORKDIR /root/workspace RUN git clone https://github.com/open-mmlab/mmclassification @@ -48,9 +50,9 @@ RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTI ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} ### update cmake to 20 -RUN wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ - tar -zxvf cmake-3.20.0.tar.gz &&\ - cd cmake-3.20.0 &&\ +RUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz &&\ + tar -zxvf cmake-${CMAKE_VERSION}.tar.gz &&\ + cd cmake-${CMAKE_VERSION} &&\ ./bootstrap &&\ make &&\ make install @@ -98,4 +100,4 @@ RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \ -DMMDEPLOY_CODEBASES=all &&\ cmake --build . -- -j$(nproc) && cmake --install . &&\ - if [ -z ${VERSION} ] ; then echo "Build MMDeploy master for CPU devices succeed!" ; else echo "Build MMDeploy version v${VERSION} for CPU devices succeed!" ; fi + if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index d2241ed8b5..931ad9a9d6 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -5,6 +5,8 @@ ARG PYTHON_VERSION=3.8 ARG TORCH_VERSION=1.8.0 ARG TORCHVISION_VERSION=0.9.0 ARG ONNXRUNTIME_VERSION=1.8.1 +ARG MMCV_VERSION=1.4.0 +ARG CMAKE_VERSION=3.20.0 ENV FORCE_CUDA="1" ENV DEBIAN_FRONTEND=noninteractive @@ -26,8 +28,7 @@ RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHV ENV PATH /opt/conda/bin:$PATH ### install mmcv-full -RUN /opt/conda/bin/pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html &&\ - /opt/conda/bin/pip install mmdet==2.19.0 +RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html WORKDIR /root/workspace ### get onnxruntime @@ -36,12 +37,12 @@ RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTI pip install onnxruntime-gpu==${ONNXRUNTIME_VERSION} ### cp trt from pip to conda -RUN cp -r /usr/local/lib/python3.8/dist-packages/tensorrt* /opt/conda/lib/python3.8/site-packages/ +RUN cp -r /usr/local/lib/python${PYTHON_VERSION}/dist-packages/tensorrt* /opt/conda/lib/python${PYTHON_VERSION}/site-packages/ -### update cmake to 20 -RUN wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz &&\ - tar -zxvf cmake-3.20.0.tar.gz &&\ - cd cmake-3.20.0 &&\ +### update cmake +RUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz &&\ + tar -zxvf cmake-${CMAKE_VERSION}.tar.gz &&\ + cd cmake-${CMAKE_VERSION} &&\ ./bootstrap &&\ make &&\ make install @@ -81,4 +82,4 @@ RUN cd /root/workspace/mmdeploy &&\ -DMMDEPLOY_TARGET_BACKENDS="trt" \ -DMMDEPLOY_CODEBASES=all &&\ cmake --build . -- -j$(nproc) && cmake --install . &&\ - if [ -z ${VERSION} ] ; then echo "Build MMDeploy master for GPU devices succeed!" ; else echo "Build MMDeploy version v${VERSION} for GPU devices succeed!" ; fi + if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi From f79dc7b938cf888aea1f277bbf1860e1b9a50067 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 18:59:16 +0800 Subject: [PATCH 11/16] add FAQs --- docs/en/index.rst | 2 ++ docs/en/tutorials/how_to_use_docker.md | 6 ++++++ docs/zh_cn/index.rst | 1 + docs/zh_cn/tutorials/how_to_use_docker.md | 6 ++++++ 4 files changed, 15 insertions(+) diff --git a/docs/en/index.rst b/docs/en/index.rst index e97e6d848c..c43bab629f 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -23,6 +23,8 @@ You can switch between Chinese and English documents in the lower-left corner of tutorials/how_to_support_new_backends.md tutorials/how_to_add_test_units_for_backend_ops.md tutorials/how_to_test_rewritten_models.md + tutorials/how_to_use_docker.md + tutorials/how_to_write_config.md .. toctree:: :maxdepth: 1 diff --git a/docs/en/tutorials/how_to_use_docker.md b/docs/en/tutorials/how_to_use_docker.md index 89bab190d4..0c217c2609 100644 --- a/docs/en/tutorials/how_to_use_docker.md +++ b/docs/en/tutorials/how_to_use_docker.md @@ -27,3 +27,9 @@ After building the docker image succeed, we can use `docker run` to launch the d ``` docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu ``` + +### AFQs + +1. CUDA error: the provided PTX was compiled with an unsupported toolchain: + + As described [here](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754), update the GPU driver to the latest one for your GPU. diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index 1ba0191f8d..aff5c3a792 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -15,6 +15,7 @@ :caption: 教程 tutorials/how_to_convert_model.md + tutorials/how_to_use_docker.md .. toctree:: :maxdepth: 1 diff --git a/docs/zh_cn/tutorials/how_to_use_docker.md b/docs/zh_cn/tutorials/how_to_use_docker.md index bda56cefe5..0dc53f7e9a 100644 --- a/docs/zh_cn/tutorials/how_to_use_docker.md +++ b/docs/zh_cn/tutorials/how_to_use_docker.md @@ -27,3 +27,9 @@ docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 ``` docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu ``` + +### 常见问答 + +1. CUDA error: the provided PTX was compiled with an unsupported toolchain: + + 如 [这里](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754)所说,更新 GPU 的驱动到你的GPU能使用的最新版本。 From 487030eafc30629eef39f0751ca8bca55dbb1c8f Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 19:05:43 +0800 Subject: [PATCH 12/16] update Dpplcv_DIR for SDK building --- docs/en/build.md | 2 +- docs/zh_cn/build.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/build.md b/docs/en/build.md index deb72d4e10..7c7fff77dc 100644 --- a/docs/en/build.md +++ b/docs/en/build.md @@ -212,7 +212,7 @@ Each package's installation command is given based on Ubuntu 18.04. cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ - -Dpplcv_DIR=/path/to/ppl.cv/install/lib/cmake/ppl \ + -Dpplcv_DIR=/path/to/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=/path/to/tensorrt \ -DCUDNN_DIR=/path/to/cudnn \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ diff --git a/docs/zh_cn/build.md b/docs/zh_cn/build.md index bd0fd4321c..e83e369131 100644 --- a/docs/zh_cn/build.md +++ b/docs/zh_cn/build.md @@ -207,7 +207,7 @@ pip install -e . cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ - -Dpplcv_DIR=/path/to/ppl.cv/install/lib/cmake/ppl \ + -Dpplcv_DIR=/path/to/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=/path/to/tensorrt \ -DCUDNN_DIR=/path/to/cudnn \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ From a3e4458702ef4fa12ec8fc28a1ca2dcfcda63463 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Thu, 20 Jan 2022 19:46:02 +0800 Subject: [PATCH 13/16] remove mmcls --- docker/CPU/Dockerfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 2b7915fadd..d9a98fe6db 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -40,8 +40,6 @@ ENV PATH /opt/conda/bin:$PATH RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html WORKDIR /root/workspace -RUN git clone https://github.com/open-mmlab/mmclassification - ### get onnxruntime RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ From 51afea045d636b1b43b7f7c967b13ff34e1a3fd4 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Fri, 21 Jan 2022 14:46:21 +0800 Subject: [PATCH 14/16] add sdk demos --- docker/CPU/Dockerfile | 5 +++++ docker/GPU/Dockerfile | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 2b7915fadd..cab4f8c6b8 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -89,6 +89,7 @@ RUN git clone https://github.com/open-mmlab/mmdeploy.git &&\ pip install -e . ### build SDK +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64:${LD_LIBRARY_PATH}" RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ @@ -100,4 +101,8 @@ RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \ -DMMDEPLOY_CODEBASES=all &&\ cmake --build . -- -j$(nproc) && cmake --install . &&\ + cd install/example && mkdir -p build && cd build &&\ + cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ + cmake --build . && export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi + diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 931ad9a9d6..8a4ae86a35 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -82,4 +82,10 @@ RUN cd /root/workspace/mmdeploy &&\ -DMMDEPLOY_TARGET_BACKENDS="trt" \ -DMMDEPLOY_CODEBASES=all &&\ cmake --build . -- -j$(nproc) && cmake --install . &&\ + cd install/example && mkdir -p build && cd build &&\ + cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ + cmake --build . && export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi + +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" + From 60f15ebebea9d99353560fb0ba94bd53c0ab8562 Mon Sep 17 00:00:00 2001 From: AllentDan Date: Fri, 21 Jan 2022 14:50:21 +0800 Subject: [PATCH 15/16] fix typo and lint --- docker/CPU/Dockerfile | 1 - docker/GPU/Dockerfile | 3 +-- docs/en/tutorials/how_to_use_docker.md | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 502b806879..43de6b1fcd 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -103,4 +103,3 @@ RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ cmake --build . && export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi - diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 8a4ae86a35..60d9c75ea1 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -87,5 +87,4 @@ RUN cd /root/workspace/mmdeploy &&\ cmake --build . && export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi -ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" - +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" diff --git a/docs/en/tutorials/how_to_use_docker.md b/docs/en/tutorials/how_to_use_docker.md index 0c217c2609..855354cc57 100644 --- a/docs/en/tutorials/how_to_use_docker.md +++ b/docs/en/tutorials/how_to_use_docker.md @@ -28,7 +28,7 @@ After building the docker image succeed, we can use `docker run` to launch the d docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu ``` -### AFQs +### FAQs 1. CUDA error: the provided PTX was compiled with an unsupported toolchain: From 8144262da5a014149665e0bbc7a62ffab9f2154c Mon Sep 17 00:00:00 2001 From: AllentDan Date: Mon, 24 Jan 2022 11:41:38 +0800 Subject: [PATCH 16/16] update FAQs --- docs/en/tutorials/how_to_use_docker.md | 10 ++++++++++ docs/zh_cn/tutorials/how_to_use_docker.md | 11 +++++++++++ 2 files changed, 21 insertions(+) diff --git a/docs/en/tutorials/how_to_use_docker.md b/docs/en/tutorials/how_to_use_docker.md index 855354cc57..2294baf273 100644 --- a/docs/en/tutorials/how_to_use_docker.md +++ b/docs/en/tutorials/how_to_use_docker.md @@ -33,3 +33,13 @@ docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu 1. CUDA error: the provided PTX was compiled with an unsupported toolchain: As described [here](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754), update the GPU driver to the latest one for your GPU. +2. docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]]. + ``` + # Add the package repositories + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + sudo systemctl restart docker + ``` diff --git a/docs/zh_cn/tutorials/how_to_use_docker.md b/docs/zh_cn/tutorials/how_to_use_docker.md index 0dc53f7e9a..6a7eb10670 100644 --- a/docs/zh_cn/tutorials/how_to_use_docker.md +++ b/docs/zh_cn/tutorials/how_to_use_docker.md @@ -33,3 +33,14 @@ docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu 1. CUDA error: the provided PTX was compiled with an unsupported toolchain: 如 [这里](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754)所说,更新 GPU 的驱动到你的GPU能使用的最新版本。 + +2. docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]]. + ``` + # Add the package repositories + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + sudo systemctl restart docker + ```