Skip to content

Commit

Permalink
[Enhancement] fix-cmake-relocatable (open-mmlab#223)
Browse files Browse the repository at this point in the history
* require user to specify xxx_dir

* fix line ending

* fix end-of-file-fixer

* try to fix ld cudart cublas

* add ENV var search

* fix CMAKE_CUDA_COMPILER

* cpu, cuda should all work well

* remove commented code
  • Loading branch information
irexyc authored and lvhan028 committed Mar 28, 2022
1 parent d586af9 commit 3fc3fad
Show file tree
Hide file tree
Showing 14 changed files with 159 additions and 10 deletions.
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ if (MMDEPLOY_BUILD_SDK)
${CMAKE_CURRENT_SOURCE_DIR}/cmake/loader.cpp.in
DESTINATION lib/cmake/MMDeploy
)
install(DIRECTORY
${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules
DESTINATION lib/cmake/MMDeploy
)

install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example)
endif ()
17 changes: 17 additions & 0 deletions cmake/MMDeployConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,29 @@ set(MMDEPLOY_BUILD_SHARED @BUILD_SHARED_LIBS@)

if (NOT MMDEPLOY_BUILD_SHARED)
if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES)
find_package(CUDA REQUIRED)
if(MSVC)
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc.exe)
else()
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc)
endif()
set(CMAKE_CUDA_RUNTIME_LIBRARY Shared)
enable_language(CUDA)
find_package(pplcv REQUIRED)
endif ()
endif ()

set(MMDEPLOY_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/modules")
list(APPEND CMAKE_MODULE_PATH ${MMDEPLOY_MODULE_PATH})
if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS)
find_package(CUDNN)
find_package(TENSORRT)
endif()
if ("ort" IN_LIST MMDEPLOY_TARGET_BACKENDS)
find_package(ONNXRUNTIME)
endif()
list(POP_BACK CMAKE_MODULE_PATH)

find_package(spdlog REQUIRED)
find_package(OpenCV REQUIRED)

Expand Down
36 changes: 36 additions & 0 deletions cmake/modules/FindCUDNN.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Copyright (c) OpenMMLab. All rights reserved.

if (NOT DEFINED CUDNN_DIR)
set(CUDNN_DIR $ENV{CUDNN_DIR})
endif ()

find_path(
CUDNN_INCLUDE_DIR cudnn.h
HINTS ${CUDNN_DIR} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES include)

find_library(
CUDNN_LIBRARY_CUDNN_PATH cudnn
HINTS ${CUDNN_DIR} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)

if (NOT (CUDNN_INCLUDE_DIR AND CUDNN_LIBRARY_CUDNN_PATH))
message(FATAL_ERROR "Couldn't find cuDNN in CUDNN_DIR: ${CUDNN_DIR}, "
"or in CUDA_TOOLKIT_ROOT_DIR: ${CUDA_TOOLKIT_ROOT_DIR}, "
"please check if the path is correct.")
endif()

add_library(cudnn SHARED IMPORTED)
set_property(TARGET cudnn APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
if (MSVC)
set_target_properties(cudnn PROPERTIES
IMPORTED_IMPLIB_RELEASE ${CUDNN_LIBRARY_CUDNN_PATH}
INTERFACE_INCLUDE_DIRECTORIES ${CUDNN_INCLUDE_DIR}
)

else()
set_target_properties(cudnn PROPERTIES
IMPORTED_LOCATION_RELEASE ${CUDNN_LIBRARY_CUDNN_PATH}
INTERFACE_INCLUDE_DIRECTORIES ${CUDNN_INCLUDE_DIR}
)
endif()
36 changes: 36 additions & 0 deletions cmake/modules/FindONNXRUNTIME.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Copyright (c) OpenMMLab. All rights reserved.

if (NOT DEFINED ONNXRUNTIME_DIR)
set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR})
endif ()
if (NOT ONNXRUNTIME_DIR)
message(FATAL_ERROR "Please set ONNXRUNTIME_DIR with cmake -D option.")
endif()

find_path(
ONNXRUNTIME_INCLUDE_DIR onnxruntime_cxx_api.h
HINTS ${ONNXRUNTIME_DIR}
PATH_SUFFIXES include)
find_library(
ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH onnxruntime
HINTS ${ONNXRUNTIME_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
if (NOT (ONNXRUNTIME_INCLUDE_DIR AND ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH))
message(FATAL_ERROR "Couldn't find onnxruntime in ONNXRUNTIME_DIR: "
"${ONNXRUNTIME_DIR}, please check if the path is correct.")
endif()

add_library(onnxruntime SHARED IMPORTED)
set_property(TARGET onnxruntime APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
if (MSVC)
set_target_properties(onnxruntime PROPERTIES
IMPORTED_IMPLIB_RELEASE ${ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH}
INTERFACE_INCLUDE_DIRECTORIES ${ONNXRUNTIME_INCLUDE_DIR}
)

else()
set_target_properties(onnxruntime PROPERTIES
IMPORTED_LOCATION_RELEASE ${ONNXRUNTIME_LIBRARY_ONNXRUNTIME_PATH}
INTERFACE_INCLUDE_DIRECTORIES ${ONNXRUNTIME_INCLUDE_DIR}
)
endif()
49 changes: 49 additions & 0 deletions cmake/modules/FindTENSORRT.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Copyright (c) OpenMMLab. All rights reserved.

if (NOT DEFINED TENSORRT_DIR)
set(TENSORRT_DIR $ENV{TENSORRT_DIR})
endif ()
if (NOT TENSORRT_DIR)
message(FATAL_ERROR "Please set TENSORRT_DIR with cmake -D option.")
endif()

find_path(
TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_DIR}
PATH_SUFFIXES include)

if (NOT TENSORRT_INCLUDE_DIR)
message(FATAL_ERROR "Cannot find TensorRT header NvInfer.h, "
"please check if the path is correct")
endif ()

set(__TENSORRT_LIB_COMPONENTS nvinfer;nvinfer_plugin)
foreach(__component ${__TENSORRT_LIB_COMPONENTS})
find_library(
__component_path ${__component}
HINTS ${TENSORRT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
if (NOT __component_path)
message(FATAL_ERROR "Cannot find TensorRT lib ${__component}, "
"please check if the path is correct")
endif()

add_library(${__component} SHARED IMPORTED)
set_property(TARGET ${__component} APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
if (MSVC)
set_target_properties(
${__component} PROPERTIES
IMPORTED_IMPLIB_RELEASE ${__component_path}
INTERFACE_INCLUDE_DIRECTORIES ${TENSORRT_INCLUDE_DIR}
)
else()
set_target_properties(
${__component} PROPERTIES
IMPORTED_LOCATION_RELEASE ${__component_path}
INTERFACE_INCLUDE_DIRECTORIES ${TENSORRT_INCLUDE_DIR}
)
endif()
unset(__component_path CACHE)
endforeach()

set(TENSORRT_LIBS ${__TENSORRT_LIB_COMPONENTS})
2 changes: 2 additions & 0 deletions cmake/tensorrt.cmake
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/modules/FindTENSORRT.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/modules/FindCUDNN.cmake)
find_path(
TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}
Expand Down
4 changes: 4 additions & 0 deletions csrc/apis/python/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
cmake_minimum_required(VERSION 3.14)
project(mmdeploy_python)

if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES)
include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake)
endif()

if (NOT TARGET pybind11)
add_subdirectory(${CMAKE_SOURCE_DIR}/third_party/pybind11 pybind11)
endif ()
Expand Down
3 changes: 1 addition & 2 deletions csrc/backend_ops/onnxruntime/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.14)
project(mmdeploy_onnxruntime_ops)

include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/modules/FindONNXRUNTIME.cmake)

# add plugin source
file(GLOB_RECURSE ORT_OPS_SRCS *.cpp)
Expand All @@ -16,8 +17,6 @@ target_include_directories(${PROJECT_NAME}_obj PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/common>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../common>
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/csrc>)
target_link_directories(${PROJECT_NAME}_obj PUBLIC
${ONNXRUNTIME_DIR}/lib)
target_link_libraries(${PROJECT_NAME}_obj PUBLIC onnxruntime)

mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "")
Expand Down
3 changes: 1 addition & 2 deletions csrc/backend_ops/tensorrt/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@ target_include_directories(${PROJECT_NAME}_obj
target_include_directories(${PROJECT_NAME}_obj PRIVATE ${TENSORRT_INCLUDE_DIR})
target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDNN_DIR}/include)
target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUB_ROOT_DIR})
target_link_directories(${PROJECT_NAME}_obj PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64)
target_link_libraries(${PROJECT_NAME}_obj
PUBLIC ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn)
PUBLIC ${TENSORRT_LIBS} cublas cudnn)
mmdeploy_export(${PROJECT_NAME}_obj)

# Build module library. It is used to convert onnx model to tensorrt engine
Expand Down
3 changes: 1 addition & 2 deletions csrc/device/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ set(SRCS
cuda_device.cpp
cuda_builtin_kernels.cu)
mmdeploy_add_module(${PROJECT_NAME} "${SRCS}")
target_include_directories(${PROJECT_NAME} PUBLIC ${CUDA_INCLUDE_DIRS})
target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64)
target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_INCLUDE_DIRS})
target_link_libraries(${PROJECT_NAME} PRIVATE cudart cuda)
add_library(mmdeploy::device::cuda ALIAS ${PROJECT_NAME})
4 changes: 3 additions & 1 deletion csrc/net/ort/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@
cmake_minimum_required(VERSION 3.14)
project(mmdeploy_ort_net)

include(${CMAKE_SOURCE_DIR}/cmake/modules/FindONNXRUNTIME.cmake)

if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES)
include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake)
mmdeploy_add_module(${PROJECT_NAME} ort_net.cpp)
target_include_directories(${PROJECT_NAME} PRIVATE ${ONNXRUNTIME_DIR}/include)
target_link_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/lib)
target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_onnxruntime_ops_obj)
target_link_libraries(${PROJECT_NAME} PUBLIC onnxruntime)
add_library(mmdeploy::ort_net ALIAS ${PROJECT_NAME})
else ()
message(ERROR "'ort_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}")
Expand Down
3 changes: 1 addition & 2 deletions csrc/net/trt/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@ target_include_directories(${PROJECT_NAME} PRIVATE
${TENSORRT_INCLUDE_DIR})
target_include_directories(${PROJECT_NAME} PRIVATE ${CUDNN_DIR}/include)
target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include)
target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64)
target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_tensorrt_ops_obj)
target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBRARY} cudnn)
target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBS} cudnn)

add_library(mmdeploy::trt_net ALIAS ${PROJECT_NAME})
2 changes: 1 addition & 1 deletion csrc/preprocess/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,5 @@ mmdeploy_add_module(${PROJECT_NAME} "${SRCS}")
target_link_libraries(${PROJECT_NAME} PRIVATE
mmdeploy::transform ${PPLCV_LIBRARIES})
target_include_directories(${PROJECT_NAME}
PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS})
PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS})
add_library(mmdeploy::transform_impl::cuda ALIAS ${PROJECT_NAME})
3 changes: 3 additions & 0 deletions tests/test_csrc/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
cmake_minimum_required(VERSION 3.14)
project(tests)

if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES)
include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake)
endif()
include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake)


Expand Down

0 comments on commit 3fc3fad

Please sign in to comment.