mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
### Description Added CUDNN Frontend and used it for NHWC convolutions, and optionally fuse activation. #### Backward compatible - For model existed with FusedConv, model can still run. - If ORT is built with cuDNN 8, cuDNN frontend will not be built into binary. Old kernels (using cudnn backend APIs) are used. #### Major Changes - For cuDNN 9, we will enable cudnn frontend to fuse convolution and bias when a provider option `fuse_conv_bias=1`. - Remove the fusion of FusedConv from graph transformer for CUDA provider, so there will not be FusedConv be added to graph for CUDA EP in the future. - Update cmake files regarding to cudnn settings. The search order of CUDNN installation in build are like the following: * environment variable `CUDNN_PATH` * `onnxruntime_CUDNN_HOME` cmake extra defines. If a build starts from build.py/build.sh, user can pass it through `--cudnn_home` parameter, or by environment variable `CUDNN_HOME` if `--cudnn_home` not used. * cudnn python package installation directory like python3.xx/site-packages/nvidia/cudnn * CUDA installation path #### Potential Issues - If ORT is built with cuDNN 8, FusedConv fusion is no longer done automatically, so some model might have performance regression. If user still wants FusedConv operator for performance reason, they can still have multiple ways to walkaround: like use older version of onnxruntime; or use older version of ORT to save optimized onnx, then run with latest version of ORT. We believe that majority users have moved to cudnn 9 when 1.20 release (since the default in ORT and PyTorch is cudnn 9 for 3 months when 1.20 release), so the impact is small. - cuDNN graph uses TF32 by default, and user cannot disable TF32 through the use_tf32 cuda provider option. If user encounters accuracy issue (like in testing), user has to set environment variable `NVIDIA_TF32_OVERRIDE=0` to disable TF32. Need update the document of use_tf32 later. #### Follow ups This is one of PRs that target to enable NHWC convolution in CUDA EP by default if device supports it. There are other changes will follow up to make it possible. (1) Enable `prefer_nhwc` by default for device with sm >= 70. (2) Change `fuse_conv_bias=1` by default after more testing. (3) Add other NHWC operators (like Resize or UpSample). ### Motivation and Context The new CUDNN Frontend library provides the functionality to fuse operations and provides new heuristics for kernel selection. Here it fuses the convolution with the pointwise bias operation. On the [NVIDIA ResNet50](https://pytorch.org/hub/nvidia_deeplearningexamples_resnet50/) we get a performance boost from 49.1144 ms to 42.4643 ms per inference on a 2560x1440 input (`onnxruntime_perf_test -e cuda -I -q -r 100-d 1 -i 'prefer_nhwc|1' resnet50.onnx`). --------- Co-authored-by: Tianlei Wu <tlwu@microsoft.com> Co-authored-by: Maximilian Mueller <maximilianm@nvidia.com>
135 lines
6.5 KiB
CMake
135 lines
6.5 KiB
CMake
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
# Licensed under the MIT License.
|
|
|
|
file(GLOB_RECURSE onnxruntime_framework_srcs CONFIGURE_DEPENDS
|
|
"${ONNXRUNTIME_INCLUDE_DIR}/core/framework/*.h"
|
|
"${ONNXRUNTIME_ROOT}/core/framework/*.h"
|
|
"${ONNXRUNTIME_ROOT}/core/framework/*.cc"
|
|
)
|
|
|
|
if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
|
|
file(GLOB_RECURSE onnxruntime_training_framework_torch_srcs CONFIGURE_DEPENDS
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/*.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/*.cc"
|
|
)
|
|
list(APPEND onnxruntime_framework_srcs ${onnxruntime_training_framework_torch_srcs})
|
|
if (onnxruntime_ENABLE_TRITON)
|
|
file(GLOB_RECURSE onnxruntime_training_framework_triton_srcs CONFIGURE_DEPENDS
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/triton/*.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/triton/*.cc"
|
|
)
|
|
list(APPEND onnxruntime_framework_srcs ${onnxruntime_training_framework_triton_srcs})
|
|
endif()
|
|
elseif(onnxruntime_ENABLE_TRITON)
|
|
# Triton executor shares some code from torch_interop, such as python and dlpack related code files.
|
|
# When torch_interop is enabled, all these dependencies are already included.
|
|
# But if not, we need to include them explicitly.
|
|
file(GLOB_RECURSE onnxruntime_training_framework_triton_srcs CONFIGURE_DEPENDS
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/dlpack_python.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/dlpack_python.cc"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/gil.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/torch/python_common.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/triton/*.h"
|
|
"${ORTTRAINING_SOURCE_DIR}/core/framework/triton/*.cc"
|
|
)
|
|
list(APPEND onnxruntime_framework_srcs ${onnxruntime_training_framework_triton_srcs})
|
|
endif()
|
|
|
|
if (onnxruntime_MINIMAL_BUILD)
|
|
set(onnxruntime_framework_src_exclude
|
|
"${ONNXRUNTIME_ROOT}/core/framework/fallback_cpu_capability.h"
|
|
"${ONNXRUNTIME_ROOT}/core/framework/fallback_cpu_capability.cc"
|
|
)
|
|
|
|
# custom ops support must be explicitly enabled in a minimal build. exclude if not.
|
|
if (NOT onnxruntime_MINIMAL_BUILD_CUSTOM_OPS)
|
|
list(APPEND onnxruntime_framework_src_exclude
|
|
"${ONNXRUNTIME_INCLUDE_DIR}/core/framework/customregistry.h"
|
|
"${ONNXRUNTIME_ROOT}/core/framework/customregistry.cc"
|
|
)
|
|
endif()
|
|
|
|
list(REMOVE_ITEM onnxruntime_framework_srcs ${onnxruntime_framework_src_exclude})
|
|
endif()
|
|
|
|
source_group(TREE ${REPO_ROOT} FILES ${onnxruntime_framework_srcs})
|
|
|
|
onnxruntime_add_static_library(onnxruntime_framework ${onnxruntime_framework_srcs})
|
|
|
|
if (MSVC)
|
|
set(ORT_FRAMEWORK_NATVIS_FILE "onnxruntime_framework.natvis")
|
|
target_sources(
|
|
onnxruntime_framework
|
|
INTERFACE $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/${ORT_FRAMEWORK_NATVIS_FILE}>)
|
|
endif()
|
|
|
|
if(onnxruntime_ENABLE_INSTRUMENT)
|
|
target_compile_definitions(onnxruntime_framework PRIVATE ONNXRUNTIME_ENABLE_INSTRUMENT)
|
|
endif()
|
|
if(onnxruntime_USE_TENSORRT OR onnxruntime_USE_NCCL)
|
|
# TODO: for now, core framework depends on CUDA. It should be moved to TensorRT EP
|
|
# TODO: provider_bridge_ort.cc should not include nccl.h
|
|
target_include_directories(onnxruntime_framework PRIVATE ${ONNXRUNTIME_ROOT} ${eigen_INCLUDE_DIRS} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
|
|
else()
|
|
target_include_directories(onnxruntime_framework PRIVATE ${ONNXRUNTIME_ROOT} ${eigen_INCLUDE_DIRS} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
|
endif()
|
|
# Needed for the provider interface, as it includes training headers when training is enabled
|
|
if (onnxruntime_ENABLE_TRAINING_OPS)
|
|
target_include_directories(onnxruntime_framework PRIVATE ${ORTTRAINING_ROOT})
|
|
if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP OR onnxruntime_ENABLE_TRITON)
|
|
onnxruntime_add_include_to_target(onnxruntime_framework Python::Module)
|
|
target_include_directories(onnxruntime_framework PRIVATE ${dlpack_SOURCE_DIR}/include)
|
|
endif()
|
|
endif()
|
|
if (onnxruntime_USE_MPI)
|
|
target_include_directories(onnxruntime_framework PUBLIC ${MPI_CXX_INCLUDE_DIRS})
|
|
endif()
|
|
|
|
if (onnxruntime_ENABLE_ATEN)
|
|
# DLPack is a header-only dependency
|
|
set(DLPACK_INCLUDE_DIR ${dlpack_SOURCE_DIR}/include)
|
|
target_include_directories(onnxruntime_framework PRIVATE ${DLPACK_INCLUDE_DIR})
|
|
endif()
|
|
onnxruntime_add_include_to_target(onnxruntime_framework onnxruntime_common onnx onnx_proto ${PROTOBUF_LIB} flatbuffers::flatbuffers safeint_interface Boost::mp11 nlohmann_json::nlohmann_json)
|
|
|
|
if (onnxruntime_USE_MIMALLOC)
|
|
target_link_libraries(onnxruntime_framework mimalloc-static)
|
|
endif()
|
|
|
|
if (CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
|
|
target_link_libraries(onnxruntime_framework ${ABSEIL_LIBS})
|
|
endif()
|
|
|
|
set_target_properties(onnxruntime_framework PROPERTIES FOLDER "ONNXRuntime")
|
|
# need onnx to build to create headers that this project includes
|
|
add_dependencies(onnxruntime_framework ${onnxruntime_EXTERNAL_DEPENDENCIES})
|
|
|
|
# In order to find the shared provider libraries we need to add the origin to the rpath for all executables we build
|
|
# For the shared onnxruntime library, this is set in onnxruntime.cmake through CMAKE_SHARED_LINKER_FLAGS
|
|
# But our test files don't use the shared library so this must be set for them.
|
|
# For Win32 it generates an absolute path for shared providers based on the location of the executable/onnxruntime.dll
|
|
if (UNIX AND NOT APPLE AND NOT onnxruntime_MINIMAL_BUILD AND NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "AIX")
|
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-rpath='$ORIGIN'")
|
|
endif()
|
|
|
|
if (onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS_ENABLE_DUMP_TO_SQLDB)
|
|
find_package (SQLite3 REQUIRED)
|
|
include_directories(${SQLite3_INCLUDE_DIR})
|
|
target_link_libraries (onnxruntime_framework ${SQLite3_LIBRARY})
|
|
target_compile_definitions(onnxruntime_framework PRIVATE DEBUG_NODE_INPUTS_OUTPUTS_ENABLE_DUMP_TO_SQLDB)
|
|
endif()
|
|
|
|
if (WIN32)
|
|
target_compile_definitions(onnxruntime_framework PRIVATE _SCL_SECURE_NO_WARNINGS)
|
|
endif()
|
|
|
|
if (onnxruntime_BUILD_SHARED_LIB)
|
|
install(FILES ${PROJECT_SOURCE_DIR}/../include/onnxruntime/core/framework/provider_options.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/onnxruntime/)
|
|
else()
|
|
install(DIRECTORY ${PROJECT_SOURCE_DIR}/../include/onnxruntime/core/framework DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/onnxruntime/core)
|
|
install(TARGETS onnxruntime_framework
|
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
|
FRAMEWORK DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
endif()
|