From 3fc3fadae4546760f70bff65823c16293b33d6ba Mon Sep 17 00:00:00 2001 From: Chen Xin Date: Mon, 28 Mar 2022 17:28:29 +0800 Subject: [PATCH] [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code --- CMakeLists.txt | 4 ++ cmake/MMDeployConfig.cmake.in | 17 +++++++ cmake/modules/FindCUDNN.cmake | 36 +++++++++++++++ cmake/modules/FindONNXRUNTIME.cmake | 36 +++++++++++++++ cmake/modules/FindTENSORRT.cmake | 49 +++++++++++++++++++++ cmake/tensorrt.cmake | 2 + csrc/apis/python/CMakeLists.txt | 4 ++ csrc/backend_ops/onnxruntime/CMakeLists.txt | 3 +- csrc/backend_ops/tensorrt/CMakeLists.txt | 3 +- csrc/device/cuda/CMakeLists.txt | 3 +- csrc/net/ort/CMakeLists.txt | 4 +- csrc/net/trt/CMakeLists.txt | 3 +- csrc/preprocess/cuda/CMakeLists.txt | 2 +- tests/test_csrc/CMakeLists.txt | 3 ++ 14 files changed, 159 insertions(+), 10 deletions(-) create mode 100644 cmake/modules/FindCUDNN.cmake create mode 100644 cmake/modules/FindONNXRUNTIME.cmake create mode 100644 cmake/modules/FindTENSORRT.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index c5b93d4a30..adff9a9137 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,6 +104,10 @@ if (MMDEPLOY_BUILD_SDK) ${CMAKE_CURRENT_SOURCE_DIR}/cmake/loader.cpp.in DESTINATION lib/cmake/MMDeploy ) + install(DIRECTORY + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules + DESTINATION lib/cmake/MMDeploy + ) install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example) endif () diff --git a/cmake/MMDeployConfig.cmake.in b/cmake/MMDeployConfig.cmake.in index 4bd05489e4..36ff3e7d6b 100644 --- a/cmake/MMDeployConfig.cmake.in +++ b/cmake/MMDeployConfig.cmake.in @@ -12,12 +12,29 @@ set(MMDEPLOY_BUILD_SHARED @BUILD_SHARED_LIBS@) if (NOT MMDEPLOY_BUILD_SHARED) if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) + find_package(CUDA REQUIRED) + if(MSVC) + set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc.exe) + else() + set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc) + endif() set(CMAKE_CUDA_RUNTIME_LIBRARY Shared) enable_language(CUDA) find_package(pplcv REQUIRED) endif () endif () +set(MMDEPLOY_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/modules") +list(APPEND CMAKE_MODULE_PATH ${MMDEPLOY_MODULE_PATH}) +if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS) + find_package(CUDNN) + find_package(TENSORRT) +endif() +if ("ort" IN_LIST MMDEPLOY_TARGET_BACKENDS) + find_package(ONNXRUNTIME) +endif() +list(POP_BACK CMAKE_MODULE_PATH) + find_package(spdlog REQUIRED) find_package(OpenCV REQUIRED) diff --git a/cmake/modules/FindCUDNN.cmake b/cmake/modules/FindCUDNN.cmake new file mode 100644 index 0000000000..3f3f9b893a --- /dev/null +++ b/cmake/modules/FindCUDNN.cmake @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +if (NOT DEFINED CUDNN_DIR) + set(CUDNN_DIR $ENV{CUDNN_DIR}) +endif () + +find_path( + CUDNN_INCLUDE_DIR cudnn.h + HINTS ${CUDNN_DIR} ${CUDA_TOOLKIT_ROOT_DIR} + PATH_SUFFIXES include) + +find_library( + CUDNN_LIBRARY_CUDNN_PATH cudnn + HINTS ${CUDNN_DIR} ${CUDA_TOOLKIT_ROOT_DIR} + PATH_SUFFIXES lib lib64 lib/x64) + +if (NOT (CUDNN_INCLUDE_DIR AND CUDNN_LIBRARY_CUDNN_PATH)) + message(FATAL_ERROR "Couldn't find cuDNN in CUDNN_DIR: ${CUDNN_DIR}, " + "or in CUDA_TOOLKIT_ROOT_DIR: ${CUDA_TOOLKIT_ROOT_DIR}, " + "please check if the path is correct.") +endif() + +add_library(cudnn SHARED IMPORTED) +set_property(TARGET cudnn APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +if (MSVC) + set_target_properties(cudnn PROPERTIES + IMPORTED_IMPLIB_RELEASE ${CUDNN_LIBRARY_CUDNN_PATH} + INTERFACE_INCLUDE_DIRECTORIES ${CUDNN_INCLUDE_DIR} + ) + +else() + set_target_properties(cudnn PROPERTIES + IMPORTED_LOCATION_RELEASE ${CUDNN_LIBRARY_CUDNN_PATH} + INTERFACE_INCLUDE_DIRECTORIES ${CUDNN_INCLUDE_DIR} + ) +endif() diff --git a/cmake/modules/FindONNXRUNTIME.cmake b/cmake/modules/FindONNXRUNTIME.cmake new file mode 100644 index 0000000000..63ea176595 --- /dev/null +++ b/cmake/modules/FindONNXRUNTIME.cmake @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +if (NOT DEFINED ONNXRUNTIME_DIR) + set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) +endif () +if (NOT ONNXRUNTIME_DIR) + message(FATAL_ERROR "Please set ONNXRUNTIME_DIR with cmake -D option.") +endif() + +find_path( + ONNXRUNTIME_INCLUDE_DIR onnxruntime_cxx_api.h + HINTS ${ONNXRUNTIME_DIR} + PATH_SUFFIXES include) +find_library( + ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH onnxruntime + HINTS ${ONNXRUNTIME_DIR} + PATH_SUFFIXES lib lib64 lib/x64) +if (NOT (ONNXRUNTIME_INCLUDE_DIR AND ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH)) + message(FATAL_ERROR "Couldn't find onnxruntime in ONNXRUNTIME_DIR: " + "${ONNXRUNTIME_DIR}, please check if the path is correct.") +endif() + +add_library(onnxruntime SHARED IMPORTED) +set_property(TARGET onnxruntime APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +if (MSVC) + set_target_properties(onnxruntime PROPERTIES + IMPORTED_IMPLIB_RELEASE ${ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH} + INTERFACE_INCLUDE_DIRECTORIES ${ONNXRUNTIME_INCLUDE_DIR} + ) + +else() + set_target_properties(onnxruntime PROPERTIES + IMPORTED_LOCATION_RELEASE ${ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH} + INTERFACE_INCLUDE_DIRECTORIES ${ONNXRUNTIME_INCLUDE_DIR} + ) +endif() diff --git a/cmake/modules/FindTENSORRT.cmake b/cmake/modules/FindTENSORRT.cmake new file mode 100644 index 0000000000..0786413e79 --- /dev/null +++ b/cmake/modules/FindTENSORRT.cmake @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +if (NOT DEFINED TENSORRT_DIR) + set(TENSORRT_DIR $ENV{TENSORRT_DIR}) +endif () +if (NOT TENSORRT_DIR) + message(FATAL_ERROR "Please set TENSORRT_DIR with cmake -D option.") +endif() + +find_path( + TENSORRT_INCLUDE_DIR NvInfer.h + HINTS ${TENSORRT_DIR} + PATH_SUFFIXES include) + +if (NOT TENSORRT_INCLUDE_DIR) + message(FATAL_ERROR "Cannot find TensorRT header NvInfer.h, " + "please check if the path is correct") +endif () + +set(__TENSORRT_LIB_COMPONENTS nvinfer;nvinfer_plugin) +foreach(__component ${__TENSORRT_LIB_COMPONENTS}) + find_library( + __component_path ${__component} + HINTS ${TENSORRT_DIR} + PATH_SUFFIXES lib lib64 lib/x64) + if (NOT __component_path) + message(FATAL_ERROR "Cannot find TensorRT lib ${__component}, " + "please check if the path is correct") + endif() + + add_library(${__component} SHARED IMPORTED) + set_property(TARGET ${__component} APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) + if (MSVC) + set_target_properties( + ${__component} PROPERTIES + IMPORTED_IMPLIB_RELEASE ${__component_path} + INTERFACE_INCLUDE_DIRECTORIES ${TENSORRT_INCLUDE_DIR} + ) + else() + set_target_properties( + ${__component} PROPERTIES + IMPORTED_LOCATION_RELEASE ${__component_path} + INTERFACE_INCLUDE_DIRECTORIES ${TENSORRT_INCLUDE_DIR} + ) + endif() + unset(__component_path CACHE) +endforeach() + +set(TENSORRT_LIBS ${__TENSORRT_LIB_COMPONENTS}) diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake index 6bfd99e969..af9f6e226b 100644 --- a/cmake/tensorrt.cmake +++ b/cmake/tensorrt.cmake @@ -1,5 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/modules/FindTENSORRT.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/modules/FindCUDNN.cmake) find_path( TENSORRT_INCLUDE_DIR NvInfer.h HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR} diff --git a/csrc/apis/python/CMakeLists.txt b/csrc/apis/python/CMakeLists.txt index 0730268f07..4421995733 100644 --- a/csrc/apis/python/CMakeLists.txt +++ b/csrc/apis/python/CMakeLists.txt @@ -2,6 +2,10 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_python) +if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) + include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake) +endif() + if (NOT TARGET pybind11) add_subdirectory(${CMAKE_SOURCE_DIR}/third_party/pybind11 pybind11) endif () diff --git a/csrc/backend_ops/onnxruntime/CMakeLists.txt b/csrc/backend_ops/onnxruntime/CMakeLists.txt index b136781401..5dfa8176b0 100644 --- a/csrc/backend_ops/onnxruntime/CMakeLists.txt +++ b/csrc/backend_ops/onnxruntime/CMakeLists.txt @@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_onnxruntime_ops) include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/modules/FindONNXRUNTIME.cmake) # add plugin source file(GLOB_RECURSE ORT_OPS_SRCS *.cpp) @@ -16,8 +17,6 @@ target_include_directories(${PROJECT_NAME}_obj PUBLIC $ $ $) -target_link_directories(${PROJECT_NAME}_obj PUBLIC - ${ONNXRUNTIME_DIR}/lib) target_link_libraries(${PROJECT_NAME}_obj PUBLIC onnxruntime) mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") diff --git a/csrc/backend_ops/tensorrt/CMakeLists.txt b/csrc/backend_ops/tensorrt/CMakeLists.txt index c656882ac3..14db917dd3 100644 --- a/csrc/backend_ops/tensorrt/CMakeLists.txt +++ b/csrc/backend_ops/tensorrt/CMakeLists.txt @@ -27,9 +27,8 @@ target_include_directories(${PROJECT_NAME}_obj target_include_directories(${PROJECT_NAME}_obj PRIVATE ${TENSORRT_INCLUDE_DIR}) target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDNN_DIR}/include) target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUB_ROOT_DIR}) -target_link_directories(${PROJECT_NAME}_obj PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) target_link_libraries(${PROJECT_NAME}_obj - PUBLIC ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) + PUBLIC ${TENSORRT_LIBS} cublas cudnn) mmdeploy_export(${PROJECT_NAME}_obj) # Build module library. It is used to convert onnx model to tensorrt engine diff --git a/csrc/device/cuda/CMakeLists.txt b/csrc/device/cuda/CMakeLists.txt index 04f392d855..7fdddd5ed9 100644 --- a/csrc/device/cuda/CMakeLists.txt +++ b/csrc/device/cuda/CMakeLists.txt @@ -10,7 +10,6 @@ set(SRCS cuda_device.cpp cuda_builtin_kernels.cu) mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") -target_include_directories(${PROJECT_NAME} PUBLIC ${CUDA_INCLUDE_DIRS}) -target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) +target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_INCLUDE_DIRS}) target_link_libraries(${PROJECT_NAME} PRIVATE cudart cuda) add_library(mmdeploy::device::cuda ALIAS ${PROJECT_NAME}) diff --git a/csrc/net/ort/CMakeLists.txt b/csrc/net/ort/CMakeLists.txt index b4b78eff47..5d4740db69 100644 --- a/csrc/net/ort/CMakeLists.txt +++ b/csrc/net/ort/CMakeLists.txt @@ -2,12 +2,14 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ort_net) +include(${CMAKE_SOURCE_DIR}/cmake/modules/FindONNXRUNTIME.cmake) + if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) mmdeploy_add_module(${PROJECT_NAME} ort_net.cpp) target_include_directories(${PROJECT_NAME} PRIVATE ${ONNXRUNTIME_DIR}/include) - target_link_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/lib) target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_onnxruntime_ops_obj) + target_link_libraries(${PROJECT_NAME} PUBLIC onnxruntime) add_library(mmdeploy::ort_net ALIAS ${PROJECT_NAME}) else () message(ERROR "'ort_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") diff --git a/csrc/net/trt/CMakeLists.txt b/csrc/net/trt/CMakeLists.txt index 8c71bd46c1..9ceb49006e 100644 --- a/csrc/net/trt/CMakeLists.txt +++ b/csrc/net/trt/CMakeLists.txt @@ -11,8 +11,7 @@ target_include_directories(${PROJECT_NAME} PRIVATE ${TENSORRT_INCLUDE_DIR}) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDNN_DIR}/include) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_tensorrt_ops_obj) -target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBRARY} cudnn) +target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBS} cudnn) add_library(mmdeploy::trt_net ALIAS ${PROJECT_NAME}) diff --git a/csrc/preprocess/cuda/CMakeLists.txt b/csrc/preprocess/cuda/CMakeLists.txt index 2ccf77638d..1c83cf309b 100644 --- a/csrc/preprocess/cuda/CMakeLists.txt +++ b/csrc/preprocess/cuda/CMakeLists.txt @@ -23,5 +23,5 @@ mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::transform ${PPLCV_LIBRARIES}) target_include_directories(${PROJECT_NAME} - PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS}) + PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS}) add_library(mmdeploy::transform_impl::cuda ALIAS ${PROJECT_NAME}) diff --git a/tests/test_csrc/CMakeLists.txt b/tests/test_csrc/CMakeLists.txt index 34cc0349dd..d7026ffec2 100644 --- a/tests/test_csrc/CMakeLists.txt +++ b/tests/test_csrc/CMakeLists.txt @@ -2,6 +2,9 @@ cmake_minimum_required(VERSION 3.14) project(tests) +if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) + include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake) +endif() include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake)