mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-16 21:00:14 +00:00
https://github.com/microsoft/onnxruntime/pull/17468 The above PR didn't fully fix the issue for some environments. This PR fixes this.
147 lines
8 KiB
CMake
147 lines
8 KiB
CMake
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
# Licensed under the MIT License.
|
|
|
|
add_definitions(-DUSE_TENSORRT=1)
|
|
if (onnxruntime_TENSORRT_PLACEHOLDER_BUILDER)
|
|
add_definitions(-DORT_TENSORRT_PLACEHOLDER_BUILDER)
|
|
endif()
|
|
set(BUILD_LIBRARY_ONLY 1)
|
|
add_definitions("-DONNX_ML=1")
|
|
add_definitions("-DONNX_NAMESPACE=onnx")
|
|
set(CUDA_INCLUDE_DIRS ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
|
|
set(TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME})
|
|
set(OLD_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
|
|
set(PROTOBUF_LIBRARY ${PROTOBUF_LIB})
|
|
if (WIN32)
|
|
add_definitions(-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING=1)
|
|
set(OLD_CMAKE_CUDA_FLAGS ${CMAKE_CUDA_FLAGS})
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4099 /wd4551 /wd4505 /wd4515 /wd4706 /wd4456 /wd4324 /wd4701 /wd4804 /wd4702 /wd4458 /wd4703")
|
|
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4805")
|
|
endif()
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -include algorithm")
|
|
set(DISABLED_WARNINGS_FOR_TRT /wd4456)
|
|
endif()
|
|
if ( CMAKE_COMPILER_IS_GNUCC )
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-missing-field-initializers")
|
|
endif()
|
|
set(CXX_VERSION_DEFINED TRUE)
|
|
|
|
# There is an issue when running "Debug build" TRT EP with "Release build" TRT builtin parser on Windows.
|
|
# We enforce following workaround for now until the real fix.
|
|
if (WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")
|
|
set(onnxruntime_USE_TENSORRT_BUILTIN_PARSER OFF)
|
|
MESSAGE(STATUS "[Note] There is an issue when running \"Debug build\" TRT EP with \"Release build\" TRT built-in parser on Windows. This build will use tensorrt oss parser instead.")
|
|
endif()
|
|
|
|
if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
|
|
# Add TensorRT library
|
|
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
|
|
HINTS ${TENSORRT_ROOT}
|
|
PATH_SUFFIXES include)
|
|
MESSAGE(STATUS "Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")
|
|
find_library(TENSORRT_LIBRARY_INFER nvinfer
|
|
HINTS ${TENSORRT_ROOT}
|
|
PATH_SUFFIXES lib lib64 lib/x64)
|
|
find_library(TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin
|
|
HINTS ${TENSORRT_ROOT}
|
|
PATH_SUFFIXES lib lib64 lib/x64)
|
|
find_library(TENSORRT_LIBRARY_NVONNXPARSER nvonnxparser
|
|
HINTS ${TENSORRT_ROOT}
|
|
PATH_SUFFIXES lib lib64 lib/x64)
|
|
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_INFER_PLUGIN} ${TENSORRT_LIBRARY_NVONNXPARSER})
|
|
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
|
|
else()
|
|
FetchContent_Declare(
|
|
onnx_tensorrt
|
|
URL ${DEP_URL_onnx_tensorrt}
|
|
URL_HASH SHA1=${DEP_SHA1_onnx_tensorrt}
|
|
)
|
|
if (NOT CUDA_INCLUDE_DIR)
|
|
set(CUDA_INCLUDE_DIR ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) # onnx-tensorrt repo needs this variable to build
|
|
endif()
|
|
# The onnx_tensorrt repo contains a test program, getSupportedAPITest, which doesn't support Windows. It uses
|
|
# unistd.h. So we must exclude it from our build. onnxruntime_fetchcontent_makeavailable is for the purpose.
|
|
onnxruntime_fetchcontent_makeavailable(onnx_tensorrt)
|
|
include_directories(${onnx_tensorrt_SOURCE_DIR})
|
|
set(CMAKE_CXX_FLAGS ${OLD_CMAKE_CXX_FLAGS})
|
|
if ( CMAKE_COMPILER_IS_GNUCC )
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
|
|
endif()
|
|
if (WIN32)
|
|
set(CMAKE_CUDA_FLAGS ${OLD_CMAKE_CUDA_FLAGS})
|
|
unset(PROTOBUF_LIBRARY)
|
|
unset(OLD_CMAKE_CXX_FLAGS)
|
|
unset(OLD_CMAKE_CUDA_FLAGS)
|
|
set_target_properties(nvonnxparser PROPERTIES LINK_FLAGS "/ignore:4199")
|
|
target_compile_options(nvonnxparser_static PRIVATE /FIio.h /wd4100)
|
|
target_compile_options(nvonnxparser PRIVATE /FIio.h /wd4100)
|
|
endif()
|
|
set(onnxparser_link_libs nvonnxparser_static)
|
|
endif()
|
|
|
|
include_directories(${TENSORRT_INCLUDE_DIR})
|
|
# ${TENSORRT_LIBRARY} is empty if we link nvonnxparser_static.
|
|
# nvonnxparser_static is linked against tensorrt libraries in onnx-tensorrt
|
|
# See https://github.com/onnx/onnx-tensorrt/blob/8af13d1b106f58df1e98945a5e7c851ddb5f0791/CMakeLists.txt#L121
|
|
set(trt_link_libs cudnn cublas ${CMAKE_DL_LIBS} ${TENSORRT_LIBRARY})
|
|
|
|
file(GLOB_RECURSE onnxruntime_providers_tensorrt_cc_srcs CONFIGURE_DEPENDS
|
|
"${ONNXRUNTIME_ROOT}/core/providers/tensorrt/*.h"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/tensorrt/*.cc"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.h"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.cc"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.h"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.cc"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.h"
|
|
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.cc"
|
|
)
|
|
|
|
source_group(TREE ${ONNXRUNTIME_ROOT}/core FILES ${onnxruntime_providers_tensorrt_cc_srcs})
|
|
onnxruntime_add_shared_library_module(onnxruntime_providers_tensorrt ${onnxruntime_providers_tensorrt_cc_srcs})
|
|
onnxruntime_add_include_to_target(onnxruntime_providers_tensorrt onnxruntime_common onnx flatbuffers::flatbuffers Boost::mp11 safeint_interface)
|
|
add_dependencies(onnxruntime_providers_tensorrt onnxruntime_providers_shared ${onnxruntime_EXTERNAL_DEPENDENCIES})
|
|
if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
|
|
target_link_libraries(onnxruntime_providers_tensorrt PRIVATE ${trt_link_libs} cudart ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers Boost::mp11 safeint_interface ${ABSEIL_LIBS})
|
|
else()
|
|
target_link_libraries(onnxruntime_providers_tensorrt PRIVATE ${onnxparser_link_libs} ${trt_link_libs} cudart ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers ${ABSEIL_LIBS})
|
|
endif()
|
|
target_include_directories(onnxruntime_providers_tensorrt PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR} ${eigen_INCLUDE_DIRS} PUBLIC ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
|
|
if(onnxruntime_CUDNN_HOME)
|
|
target_include_directories(onnxruntime_providers_tensorrt PRIVATE ${onnxruntime_CUDNN_HOME}/include)
|
|
endif()
|
|
|
|
# ${CMAKE_CURRENT_BINARY_DIR} is so that #include "onnxruntime_config.h" inside tensor_shape.h is found
|
|
set_target_properties(onnxruntime_providers_tensorrt PROPERTIES LINKER_LANGUAGE CUDA)
|
|
set_target_properties(onnxruntime_providers_tensorrt PROPERTIES FOLDER "ONNXRuntime")
|
|
target_compile_definitions(onnxruntime_providers_tensorrt PRIVATE ONNXIFI_BUILD_LIBRARY=1)
|
|
target_compile_options(onnxruntime_providers_tensorrt PRIVATE ${DISABLED_WARNINGS_FOR_TRT})
|
|
if (WIN32)
|
|
target_compile_options(onnxruntime_providers_tensorrt INTERFACE /wd4456)
|
|
endif()
|
|
|
|
# Needed for the provider interface, as it includes training headers when training is enabled
|
|
if (onnxruntime_ENABLE_TRAINING_OPS)
|
|
target_include_directories(onnxruntime_providers_tensorrt PRIVATE ${ORTTRAINING_ROOT})
|
|
if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
|
|
onnxruntime_add_include_to_target(onnxruntime_providers_tensorrt Python::Module)
|
|
endif()
|
|
endif()
|
|
|
|
if(APPLE)
|
|
set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker -exported_symbols_list ${ONNXRUNTIME_ROOT}/core/providers/tensorrt/exported_symbols.lst")
|
|
target_link_libraries(onnxruntime_providers_tensorrt PRIVATE nsync::nsync_cpp)
|
|
elseif(UNIX)
|
|
set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY COMPILE_FLAGS "-Wno-deprecated-declarations")
|
|
set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/core/providers/tensorrt/version_script.lds -Xlinker --gc-sections")
|
|
target_link_libraries(onnxruntime_providers_tensorrt PRIVATE nsync::nsync_cpp stdc++fs)
|
|
elseif(WIN32)
|
|
set_property(TARGET onnxruntime_providers_tensorrt APPEND_STRING PROPERTY LINK_FLAGS "-DEF:${ONNXRUNTIME_ROOT}/core/providers/tensorrt/symbols.def")
|
|
else()
|
|
message(FATAL_ERROR "onnxruntime_providers_tensorrt unknown platform, need to specify shared library exports for it")
|
|
endif()
|
|
|
|
install(TARGETS onnxruntime_providers_tensorrt
|
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|