mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
STVM, NUPHAR, remove tvm from submodules list, checks pointers are not null. (#10211)
* STVM, checks pointers are not null. * removes submodules tvm * add missing include(FetchContent) * add target tvm * fix stvm test * extend cgmanifest with dependencies of tvm
This commit is contained in:
parent
ec4362f8f3
commit
481b96d32a
15 changed files with 332 additions and 101 deletions
6
.gitmodules
vendored
6
.gitmodules
vendored
|
|
@ -10,9 +10,6 @@
|
||||||
[submodule "cmake/external/onnx"]
|
[submodule "cmake/external/onnx"]
|
||||||
path = cmake/external/onnx
|
path = cmake/external/onnx
|
||||||
url = https://github.com/onnx/onnx.git
|
url = https://github.com/onnx/onnx.git
|
||||||
[submodule "cmake/external/tvm"]
|
|
||||||
path = cmake/external/tvm
|
|
||||||
url = https://github.com/microsoft/onnxruntime-tvm.git
|
|
||||||
[submodule "cmake/external/date"]
|
[submodule "cmake/external/date"]
|
||||||
path = cmake/external/date
|
path = cmake/external/date
|
||||||
url = https://github.com/HowardHinnant/date.git
|
url = https://github.com/HowardHinnant/date.git
|
||||||
|
|
@ -78,6 +75,3 @@
|
||||||
[submodule "cmake/external/onnx-tensorrt"]
|
[submodule "cmake/external/onnx-tensorrt"]
|
||||||
path = cmake/external/onnx-tensorrt
|
path = cmake/external/onnx-tensorrt
|
||||||
url = https://github.com/onnx/onnx-tensorrt.git
|
url = https://github.com/onnx/onnx-tensorrt.git
|
||||||
[submodule "cmake/external/tvm_update"]
|
|
||||||
path = cmake/external/tvm_update
|
|
||||||
url = https://github.com/apache/tvm
|
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,78 @@
|
||||||
"git": {
|
"git": {
|
||||||
"commitHash": "638d7d2407de27f98f542f61a37a33c90a2e75a9",
|
"commitHash": "638d7d2407de27f98f542f61a37a33c90a2e75a9",
|
||||||
"repositoryUrl": "https://github.com/microsoft/onnxruntime-tvm.git"
|
"repositoryUrl": "https://github.com/microsoft/onnxruntime-tvm.git"
|
||||||
}
|
},
|
||||||
|
"comments": "needed for nuphar"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "7b3a22e465dd6aca4729504a19beb4bc23312755",
|
||||||
|
"repositoryUrl": "https://github.com/apache/tvm.git"
|
||||||
|
},
|
||||||
|
"comments": "needed for EP STVM"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "cabe04d6d6b05356fa8f9741704924788f0dd762",
|
||||||
|
"repositoryUrl": "https://github.com/agauniyal/rang.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "a3bcc6981d5dad3afb212689e2c7853d1b1ee45d",
|
||||||
|
"repositoryUrl": "https://github.com/NVIDIA/cutlass.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "08f7c7e69f8ea61a0c4151359bc8023be8e9217b",
|
||||||
|
"repositoryUrl": "https://github.com/tlc-pack/libbacktrace.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "36a91576edf633479c78649e050f18dd2ddc8103",
|
||||||
|
"repositoryUrl": "https://github.com/apache/incubator-tvm-vta.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "111c9be5188f7350c2eac9ddaedd8cca3d7bf394",
|
||||||
|
"repositoryUrl": "https://github.com/kazuho/picojson.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": {
|
||||||
|
"type": "git",
|
||||||
|
"git": {
|
||||||
|
"commitHash": "b5e4186d7ab63458e79084842dced166be2ca5b5",
|
||||||
|
"repositoryUrl": "https://github.com/lammertb/libcrc.git"
|
||||||
|
},
|
||||||
|
"comments": "dependency from tvm"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ set(CMAKE_C_STANDARD 99)
|
||||||
include(CheckCXXCompilerFlag)
|
include(CheckCXXCompilerFlag)
|
||||||
include(CheckLanguage)
|
include(CheckLanguage)
|
||||||
include(CMakeDependentOption)
|
include(CMakeDependentOption)
|
||||||
|
include(FetchContent)
|
||||||
|
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
|
|
@ -1330,27 +1331,65 @@ endif()
|
||||||
|
|
||||||
# TVM
|
# TVM
|
||||||
if (onnxruntime_USE_TVM)
|
if (onnxruntime_USE_TVM)
|
||||||
|
if (NOT TARGET tvm)
|
||||||
|
message(STATUS "Include TVM(*).")
|
||||||
|
include(tvm)
|
||||||
|
endif()
|
||||||
if (onnxruntime_USE_CUDA)
|
if (onnxruntime_USE_CUDA)
|
||||||
set(USE_CUDA ON)
|
if (onnxruntime_USE_STVM)
|
||||||
|
set(USE_CUDA ${onnxruntime_CUDA_HOME} CACHE BOOL "Only defined for TVM")
|
||||||
|
set(USE_MKLDNN ON CACHE BOOL "Only defined for TVM")
|
||||||
|
set(USE_CUDNN ON CACHE BOOL "Only defined for TVM")
|
||||||
|
endif()
|
||||||
|
if (onnxruntime_USE_NUPHAR)
|
||||||
|
set(USE_CUDA ON CACHE BOOL "Only defined for TVM")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
if (onnxruntime_USE_LLVM)
|
if (onnxruntime_USE_LLVM)
|
||||||
set(USE_LLVM ON)
|
set(USE_LLVM ON CACHE BOOL "Only defined for TVM")
|
||||||
add_definitions(-DUSE_TVM_WITH_LLVM)
|
add_definitions(-DUSE_TVM_WITH_LLVM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_subdirectory(${PROJECT_SOURCE_DIR}/external/tvm EXCLUDE_FROM_ALL)
|
if (onnxruntime_USE_STVM)
|
||||||
set_target_properties(tvm PROPERTIES FOLDER "External/tvm")
|
set(USE_OPENMP gnu CACHE STRING "Only defined for TVM")
|
||||||
set_target_properties(tvm_topi PROPERTIES FOLDER "External/tvm")
|
set(USE_MICRO OFF CACHE BOOL "Only defined for TVM")
|
||||||
set_target_properties(tvm_runtime PROPERTIES FOLDER "External/tvm")
|
endif()
|
||||||
|
message(STATUS "TVM BEFORE USE_LLVM=${USE_LLVM} USE_OPENMP=${USE_OPENMP} USE_MICRO=${USE_MICRO} CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} USE_CUDA=${USE_CUDA} USE_GTEST=${USE_GTEST} CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
|
||||||
|
message(STATUS "tvm_SOURCE_DIR=${tvm_SOURCE_DIR}")
|
||||||
|
message(STATUS "tvm_BINARY_DIR=${tvm_BINARY_DIR}")
|
||||||
|
add_subdirectory(${tvm_SOURCE_DIR} ${tvm_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||||
|
message(STATUS "TVM AFTER USE_LLVM=${USE_LLVM} USE_OPENMP=${USE_OPENMP} USE_MICRO=${USE_MICRO} CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} USE_CUDA=${USE_CUDA} USE_GTEST=${USE_GTEST} CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
|
||||||
|
|
||||||
|
if (onnxruntime_USE_NUPHAR)
|
||||||
|
set_target_properties(tvm PROPERTIES FOLDER ${tvm_SOURCE_DIR})
|
||||||
|
set_target_properties(tvm_topi PROPERTIES FOLDER ${tvm_SOURCE_DIR})
|
||||||
|
set_target_properties(tvm_runtime PROPERTIES FOLDER ${tvm_SOURCE_DIR})
|
||||||
|
|
||||||
|
set(TVM_INCLUDES ${tvm_SOURCE_DIR}/include
|
||||||
|
${tvm_SOURCE_DIR}/3rdparty/dmlc-core/include
|
||||||
|
${tvm_SOURCE_DIR}/3rdparty/dlpack/include
|
||||||
|
$<TARGET_PROPERTY:tvm,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
|
$<TARGET_PROPERTY:tvm_topi,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (onnxruntime_USE_STVM)
|
||||||
|
set_target_properties(tvm PROPERTIES FOLDER ${tvm_SOURCE_DIR})
|
||||||
|
|
||||||
|
set(TVM_INCLUDES ${tvm_SOURCE_DIR}/include
|
||||||
|
${tvm_SOURCE_DIR}/3rdparty/dmlc-core/include
|
||||||
|
${tvm_SOURCE_DIR}/3rdparty/dlpack/include
|
||||||
|
$<TARGET_PROPERTY:tvm,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(TVM_INCLUDES ${PROJECT_SOURCE_DIR}/external/tvm/include
|
|
||||||
${PROJECT_SOURCE_DIR}/external/tvm/3rdparty/dmlc-core/include
|
|
||||||
${PROJECT_SOURCE_DIR}/external/tvm/3rdparty/dlpack/include
|
|
||||||
$<TARGET_PROPERTY:tvm,INTERFACE_INCLUDE_DIRECTORIES>
|
|
||||||
$<TARGET_PROPERTY:tvm_topi,INTERFACE_INCLUDE_DIRECTORIES>)
|
|
||||||
add_definitions(-DUSE_TVM)
|
add_definitions(-DUSE_TVM)
|
||||||
|
|
||||||
set(onnxruntime_tvm_libs onnxruntime_codegen_tvm)
|
if (onnxruntime_USE_NUPHAR)
|
||||||
|
set(onnxruntime_tvm_libs onnxruntime_codegen_tvm)
|
||||||
|
endif()
|
||||||
|
if (onnxruntime_USE_STVM)
|
||||||
|
set(onnxruntime_tvm_libs onnxruntime_providers_stvm)
|
||||||
|
endif()
|
||||||
|
|
||||||
# needs to link with stdc++fs in Linux
|
# needs to link with stdc++fs in Linux
|
||||||
if (UNIX)
|
if (UNIX)
|
||||||
if (NOT APPLE)
|
if (NOT APPLE)
|
||||||
|
|
@ -1683,7 +1722,9 @@ if (onnxruntime_USE_TVM)
|
||||||
list(APPEND DISABLED_WARNINGS_FOR_TVM "-Wno-error=catch-value")
|
list(APPEND DISABLED_WARNINGS_FOR_TVM "-Wno-error=catch-value")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
include(onnxruntime_codegen.cmake)
|
if (onnxruntime_USE_NUPHAR)
|
||||||
|
include(onnxruntime_codegen.cmake)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (onnxruntime_ENABLE_MICROSOFT_INTERNAL)
|
if (onnxruntime_ENABLE_MICROSOFT_INTERNAL)
|
||||||
|
|
|
||||||
1
cmake/external/tvm
vendored
1
cmake/external/tvm
vendored
|
|
@ -1 +0,0 @@
|
||||||
Subproject commit 9ec2b92d180dff8877e402018b97baa574031b8b
|
|
||||||
38
cmake/external/tvm.cmake
vendored
Normal file
38
cmake/external/tvm.cmake
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
if (onnxruntime_USE_STVM)
|
||||||
|
message(STATUS "onnxruntime_USE_STVM: Fetch tvm for STVM.")
|
||||||
|
|
||||||
|
FetchContent_Declare(
|
||||||
|
tvm
|
||||||
|
GIT_REPOSITORY https://github.com/apache/tvm.git
|
||||||
|
GIT_TAG v0.8.0
|
||||||
|
)
|
||||||
|
|
||||||
|
FetchContent_GetProperties(tvm)
|
||||||
|
if(NOT tvm_POPULATED)
|
||||||
|
FetchContent_Populate(tvm)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(tvm_INCLUDE_DIRS ${tvm_SOURCE_DIR}/include)
|
||||||
|
set(onnxruntime_STVM_HOME ${tvm_SOURCE_DIR})
|
||||||
|
message(STATUS "Define onnxruntime_STVM_HOME.")
|
||||||
|
message(STATUS ${onnxruntime_STVM_HOME})
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (onnxruntime_USE_NUPHAR)
|
||||||
|
message(STATUS "onnxruntime_USE_NUPHAR: Fetch onnxruntime-tvm for NUPHAR.")
|
||||||
|
|
||||||
|
FetchContent_Declare(
|
||||||
|
tvm
|
||||||
|
GIT_REPOSITORY https://github.com/microsoft/onnxruntime-tvm.git
|
||||||
|
GIT_TAG 9ec2b92d180dff8877e402018b97baa574031b8b
|
||||||
|
)
|
||||||
|
|
||||||
|
FetchContent_GetProperties(tvm)
|
||||||
|
if(NOT tvm_POPULATED)
|
||||||
|
FetchContent_Populate(tvm)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(tvm_INCLUDE_DIRS ${tvm_SOURCE_DIR}/include)
|
||||||
|
|
||||||
|
endif()
|
||||||
1
cmake/external/tvm_update
vendored
1
cmake/external/tvm_update
vendored
|
|
@ -1 +0,0 @@
|
||||||
Subproject commit 094a73d4e43a25651555bab9b1fd6373262208c3
|
|
||||||
|
|
@ -1275,6 +1275,10 @@ if (onnxruntime_USE_ROCM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (onnxruntime_USE_STVM)
|
if (onnxruntime_USE_STVM)
|
||||||
|
if (NOT TARGET tvm)
|
||||||
|
message(STATUS "Include TVM.")
|
||||||
|
include(tvm)
|
||||||
|
endif()
|
||||||
add_definitions(-DUSE_STVM=1)
|
add_definitions(-DUSE_STVM=1)
|
||||||
|
|
||||||
file (GLOB_RECURSE onnxruntime_providers_stvm_cc_srcs CONFIGURE_DEPENDS
|
file (GLOB_RECURSE onnxruntime_providers_stvm_cc_srcs CONFIGURE_DEPENDS
|
||||||
|
|
@ -1293,20 +1297,16 @@ if (onnxruntime_USE_STVM)
|
||||||
${onnxruntime_STVM_HOME}/3rdparty/dlpack/include
|
${onnxruntime_STVM_HOME}/3rdparty/dlpack/include
|
||||||
${onnxruntime_STVM_HOME}/3rdparty/dmlc-core/include
|
${onnxruntime_STVM_HOME}/3rdparty/dmlc-core/include
|
||||||
${PYTHON_INLCUDE_DIRS})
|
${PYTHON_INLCUDE_DIRS})
|
||||||
onnxruntime_add_include_to_target(onnxruntime_providers_stvm onnxruntime_common onnx)
|
onnxruntime_add_include_to_target(onnxruntime_providers_stvm onnxruntime_common onnx tvm)
|
||||||
|
|
||||||
add_dependencies(onnxruntime_providers_stvm ${onnxruntime_EXTERNAL_DEPENDENCIES})
|
add_dependencies(onnxruntime_providers_stvm ${onnxruntime_EXTERNAL_DEPENDENCIES})
|
||||||
|
|
||||||
target_link_libraries(onnxruntime_providers_stvm PRIVATE
|
target_link_libraries(onnxruntime_providers_stvm PRIVATE
|
||||||
onnx
|
onnx
|
||||||
|
tvm
|
||||||
onnxruntime_common
|
onnxruntime_common
|
||||||
onnxruntime_framework
|
onnxruntime_framework
|
||||||
)
|
)
|
||||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
|
||||||
target_link_libraries(onnxruntime_providers_stvm PRIVATE ${onnxruntime_STVM_HOME}/build/libtvm.dylib)
|
|
||||||
else()
|
|
||||||
target_link_libraries(onnxruntime_providers_stvm PRIVATE ${onnxruntime_STVM_HOME}/build/libtvm.so)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set_target_properties(onnxruntime_providers_stvm PROPERTIES FOLDER "ONNXRuntime")
|
set_target_properties(onnxruntime_providers_stvm PROPERTIES FOLDER "ONNXRuntime")
|
||||||
set_target_properties(onnxruntime_providers_stvm PROPERTIES LINKER_LANGUAGE CXX)
|
set_target_properties(onnxruntime_providers_stvm PROPERTIES LINKER_LANGUAGE CXX)
|
||||||
|
|
|
||||||
|
|
@ -643,19 +643,16 @@ if (onnxruntime_USE_ROCM)
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (onnxruntime_USE_TVM)
|
if (onnxruntime_USE_NUPHAR)
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
TARGET onnxruntime_pybind11_state POST_BUILD
|
TARGET onnxruntime_pybind11_state POST_BUILD
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy
|
COMMAND ${CMAKE_COMMAND} -E copy
|
||||||
$<TARGET_FILE:tvm>
|
$<TARGET_FILE:tvm>
|
||||||
$<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/capi/
|
$<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/capi/
|
||||||
)
|
)
|
||||||
endif()
|
|
||||||
|
|
||||||
if (onnxruntime_USE_NUPHAR)
|
|
||||||
file(GLOB onnxruntime_python_nuphar_python_srcs CONFIGURE_DEPENDS
|
file(GLOB onnxruntime_python_nuphar_python_srcs CONFIGURE_DEPENDS
|
||||||
"${ONNXRUNTIME_ROOT}/core/providers/nuphar/scripts/*"
|
"${ONNXRUNTIME_ROOT}/core/providers/nuphar/scripts/*"
|
||||||
)
|
)
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
TARGET onnxruntime_pybind11_state POST_BUILD
|
TARGET onnxruntime_pybind11_state POST_BUILD
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/nuphar
|
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/nuphar
|
||||||
|
|
@ -679,7 +676,24 @@ if (onnxruntime_USE_STVM)
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy
|
COMMAND ${CMAKE_COMMAND} -E copy
|
||||||
$<TARGET_FILE:onnxruntime_providers_stvm>
|
$<TARGET_FILE:onnxruntime_providers_stvm>
|
||||||
$<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/capi/
|
$<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/capi/
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy
|
||||||
|
${tvm_BINARY_DIR}/libtvm*
|
||||||
|
${tvm_SOURCE_DIR}/python/tvm
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_command(
|
||||||
|
TARGET onnxruntime_pybind11_state POST_BUILD
|
||||||
|
WORKING_DIRECTORY ${tvm_SOURCE_DIR}/python
|
||||||
|
COMMAND ${Python_EXECUTABLE} setup.py build_ext --inplace
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E rm
|
||||||
|
${tvm_SOURCE_DIR}/python/tvm/*.so
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E env TVM_LIBRARY_PATH=${tvm_BINARY_DIR}
|
||||||
|
${Python_EXECUTABLE} setup.py bdist_wheel
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy
|
||||||
|
${tvm_BINARY_DIR}/libtvm*
|
||||||
|
${tvm_SOURCE_DIR}/python/tvm
|
||||||
)
|
)
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (onnxruntime_USE_DML)
|
if (onnxruntime_USE_DML)
|
||||||
|
|
|
||||||
|
|
@ -459,12 +459,12 @@ if(onnxruntime_USE_COREML)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(GLOB_RECURSE onnxruntime_test_tvm_src CONFIGURE_DEPENDS
|
|
||||||
"${TEST_SRC_DIR}/tvm/*.h"
|
|
||||||
"${TEST_SRC_DIR}/tvm/*.cc"
|
|
||||||
)
|
|
||||||
|
|
||||||
if(onnxruntime_USE_NUPHAR)
|
if(onnxruntime_USE_NUPHAR)
|
||||||
|
file(GLOB_RECURSE onnxruntime_test_tvm_src CONFIGURE_DEPENDS
|
||||||
|
"${TEST_SRC_DIR}/tvm/*.h"
|
||||||
|
"${TEST_SRC_DIR}/tvm/*.cc"
|
||||||
|
)
|
||||||
|
|
||||||
list(APPEND onnxruntime_test_framework_src_patterns ${TEST_SRC_DIR}/framework/nuphar/*)
|
list(APPEND onnxruntime_test_framework_src_patterns ${TEST_SRC_DIR}/framework/nuphar/*)
|
||||||
list(APPEND onnxruntime_test_framework_libs onnxruntime_providers_nuphar)
|
list(APPEND onnxruntime_test_framework_libs onnxruntime_providers_nuphar)
|
||||||
list(APPEND onnxruntime_test_providers_dependencies onnxruntime_providers_nuphar)
|
list(APPEND onnxruntime_test_providers_dependencies onnxruntime_providers_nuphar)
|
||||||
|
|
@ -644,7 +644,7 @@ if (onnxruntime_ENABLE_TRAINING)
|
||||||
list(APPEND all_tests ${onnxruntime_test_training_src})
|
list(APPEND all_tests ${onnxruntime_test_training_src})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (onnxruntime_USE_TVM)
|
if (onnxruntime_USE_NUPHAR)
|
||||||
list(APPEND all_tests ${onnxruntime_test_tvm_src})
|
list(APPEND all_tests ${onnxruntime_test_tvm_src})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,13 +15,7 @@
|
||||||
STVM is an execution provider for ONNX Runtime that is built on top of Apache TVM. It enables ONNX Runtime users to leverage Apache TVM model optimizations.
|
STVM is an execution provider for ONNX Runtime that is built on top of Apache TVM. It enables ONNX Runtime users to leverage Apache TVM model optimizations.
|
||||||
STVM EP is currently in "Preview". It's been tested to work on a handful of models on Linux, but not on Windows or MacOS.
|
STVM EP is currently in "Preview". It's been tested to work on a handful of models on Linux, but not on Windows or MacOS.
|
||||||
|
|
||||||
## Build
|
### Build ONNX Runtime with the STVM Execution Provider
|
||||||
|
|
||||||
To use the STVM EP in ONNX Runtime (ORT), users first need to build Apache TVM and ONNX Runtime.
|
|
||||||
|
|
||||||
Note: some python packages may need to be upgraded/downgraded because both TVM and ORT with the STVM EP use the Python API. Alternatively, use modify PYTHONPATH to solve these conflicts.
|
|
||||||
|
|
||||||
### Build and configure TVM
|
|
||||||
|
|
||||||
Install the minimal pre-requisites on Ubuntu/Debian like linux operating systems:
|
Install the minimal pre-requisites on Ubuntu/Debian like linux operating systems:
|
||||||
```
|
```
|
||||||
|
|
@ -29,29 +23,7 @@ apt-get install -y python3 python3-dev python3-pip python3-setuptools gcc libtin
|
||||||
pip3 install numpy decorator attrs
|
pip3 install numpy decorator attrs
|
||||||
```
|
```
|
||||||
|
|
||||||
Clone this repo using the `--recursive` flag to pull all associated dependencies
|
Clone this repo.
|
||||||
|
|
||||||
|
|
||||||
Build TVM from the tvm_update folder:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd onnxruntime/cmake/external/tvm_update/
|
|
||||||
mkdir build
|
|
||||||
cd ./build
|
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -DUSE_LLVM=ON -DUSE_OPENMP=gnu -DUSE_MICRO=ON (If your machine is CUDA enabled -DUSE_CUDA=ON) ..
|
|
||||||
make -j <number of threads in build machine>
|
|
||||||
```
|
|
||||||
|
|
||||||
Set the environment variable PYTHONPATH to tell python where to find the TVM library:
|
|
||||||
|
|
||||||
```
|
|
||||||
export TVM_HOME=<path_to_onnx_runtime>/cmake/external/tvm_update
|
|
||||||
export PYTHONPATH=$TVM_HOME/python:${PYTHONPATH}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details on installing Apache TVM click [here](https://tvm.apache.org/docs/install/from_source.html)
|
|
||||||
|
|
||||||
### Build ONNX Runtime with the STVM Execution Provider
|
|
||||||
|
|
||||||
In order to build ONNXRT you will need to have CMake 3.18 or higher. In Ubuntu 20.04 you can use the following commands to install the latest version of CMake:
|
In order to build ONNXRT you will need to have CMake 3.18 or higher. In Ubuntu 20.04 you can use the following commands to install the latest version of CMake:
|
||||||
|
|
||||||
|
|
@ -75,22 +47,22 @@ Build ONNX Runtime:
|
||||||
./build.sh --config Release --enable_pybind --build_wheel --skip_tests --parallel --use_stvm --skip_onnx_tests
|
./build.sh --config Release --enable_pybind --build_wheel --skip_tests --parallel --use_stvm --skip_onnx_tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
This command builds both TVM and onnxruntime-stvm. It creates two wheel, one for each project.
|
||||||
Build the python API for ONNX Runtime instead of using the standard package:
|
Build the python API for ONNX Runtime instead of using the standard package:
|
||||||
```
|
```
|
||||||
cd <path_to_onnx_runtime>
|
cd <path_to_onnx_runtime>
|
||||||
pip3 uninstall onnxruntime onnxruntime-stvm -y
|
pip3 uninstall onnxruntime onnxruntime-stvm tvm -y
|
||||||
whl_path=$(find ./build/Linux/Release/dist -name "*.whl")
|
whl_path=$(find ./build/Linux/Release/dist -name "*.whl")
|
||||||
python3 -m pip install $whl_path
|
python3 -m pip install $whl_path
|
||||||
```
|
```
|
||||||
Alternatively, you can set PYTHONPATH to tell python where to find the ONNXRT library:
|
Alternatively, you can set PYTHONPATH to tell python where to find the ONNXRT library and the TVM library.
|
||||||
```
|
```
|
||||||
export ORT_PYTHON_HOME=<path_to_onnx_runtime>/build/Linux/Release
|
export PYTHONPATH=$ORT_PYTHON_HOME:$TVM_PYTHON_HOME:${PYTHONPATH}
|
||||||
export PYTHONPATH=$ORT_PYTHON_HOME:${PYTHONPATH}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration options
|
## Configuration options
|
||||||
STVM Executor Provider can be configured with the following provider options:
|
STVM Executor Provider can be configured with the following provider options:
|
||||||
```
|
```python
|
||||||
po = [dict(target=client_target,
|
po = [dict(target=client_target,
|
||||||
target_host=client_target_host,
|
target_host=client_target_host,
|
||||||
opt_level=client_opt_level,
|
opt_level=client_opt_level,
|
||||||
|
|
@ -109,7 +81,7 @@ stvm_session = onnxruntime.InferenceSession(model_path, providers=["StvmExecutio
|
||||||
- `tuning_file_path` is path to AutoTVM or Ansor tuning file which gives specifications for given model and target for the best performance. (See below for more details).
|
- `tuning_file_path` is path to AutoTVM or Ansor tuning file which gives specifications for given model and target for the best performance. (See below for more details).
|
||||||
|
|
||||||
TVM supports models with fixed graph only. If your model has unknown dimensions in input shapes (excluding batch size) you must provide the shape using the `input_names` and `input_shapes` provider options. Below is an example of what must be passed to `provider_options`:
|
TVM supports models with fixed graph only. If your model has unknown dimensions in input shapes (excluding batch size) you must provide the shape using the `input_names` and `input_shapes` provider options. Below is an example of what must be passed to `provider_options`:
|
||||||
```
|
```python
|
||||||
input_names = "input_1 input_2"
|
input_names = "input_1 input_2"
|
||||||
input_shapes = "[1 3 224 224] [1 2]"
|
input_shapes = "[1 3 224 224] [1 2]"
|
||||||
```
|
```
|
||||||
|
|
@ -149,4 +121,26 @@ pip3 install protobuf==3.19.1
|
||||||
|
|
||||||
The following pair of ONNX and protobuf versions have been found to be compatible:
|
The following pair of ONNX and protobuf versions have been found to be compatible:
|
||||||
- 3.17.3 and 1.8.0
|
- 3.17.3 and 1.8.0
|
||||||
- 3.19.1 and 1.10.1
|
- 3.19.1 and 1.10.1
|
||||||
|
|
||||||
|
When use onnxruntime-stvm after it was build from the source, the following error may happen:
|
||||||
|
|
||||||
|
```
|
||||||
|
terminate called after throwing an instance of 'tvm::runtime::InternalError'
|
||||||
|
what(): [12:01:11] ..._deps/tvm-src/src/runtime/registry.cc:69:
|
||||||
|
---------------------------------------------------------------
|
||||||
|
An error occurred during the execution of TVM.
|
||||||
|
For more information, please see: https://tvm.apache.org/docs/errors.html
|
||||||
|
---------------------------------------------------------------
|
||||||
|
Check failed: (can_override) is false: Global PackedFunc arith.CreateAnalyzer is already registered
|
||||||
|
|
||||||
|
Aborted (core dumped)
|
||||||
|
```
|
||||||
|
|
||||||
|
It means both onnxruntime and tvm loaded a different dynamic library ``libtvm.[so|dll]``.
|
||||||
|
To solve that, `tvm` must be imported first:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import tvm
|
||||||
|
import onnxruntime
|
||||||
|
```
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
// Licensed under the MIT License.
|
// Licensed under the MIT License.
|
||||||
|
|
||||||
#include "stvm_api.h"
|
#include "stvm_api.h"
|
||||||
|
#include "core/common/common.h"
|
||||||
|
|
||||||
#include <tvm/runtime/registry.h>
|
#include <tvm/runtime/registry.h>
|
||||||
#include <tvm/target/codegen.h>
|
#include <tvm/target/codegen.h>
|
||||||
|
|
@ -32,6 +33,7 @@ tvm::runtime::Module TVMCompile(const std::string& onnx_txt,
|
||||||
}
|
}
|
||||||
|
|
||||||
const tvm::PackedFunc* compile = tvm::runtime::Registry::Get("tvm_onnx_import_and_compile");
|
const tvm::PackedFunc* compile = tvm::runtime::Registry::Get("tvm_onnx_import_and_compile");
|
||||||
|
ORT_ENFORCE(compile != nullptr, "Unable to retrieve 'tvm_onnx_import_and_compile'.");
|
||||||
tvm::runtime::Module mod = (*compile)(
|
tvm::runtime::Module mod = (*compile)(
|
||||||
TVMByteArray{onnx_txt.data(), onnx_txt.size()},
|
TVMByteArray{onnx_txt.data(), onnx_txt.size()},
|
||||||
model_path,
|
model_path,
|
||||||
|
|
@ -84,6 +86,7 @@ void TVMRun(tvm::runtime::Module& mod,
|
||||||
[[maybe_unused]] tvm::runtime::TVMRetValue *ret)
|
[[maybe_unused]] tvm::runtime::TVMRetValue *ret)
|
||||||
{
|
{
|
||||||
const tvm::PackedFunc* run = tvm::runtime::Registry::Get("tvm_run");
|
const tvm::PackedFunc* run = tvm::runtime::Registry::Get("tvm_run");
|
||||||
|
ORT_ENFORCE(run != nullptr, "Unable to retrieve 'tvm_run'.");
|
||||||
(*run)(mod);
|
(*run)(mod);
|
||||||
|
|
||||||
tvm::PackedFunc get_output = mod.GetFunction("get_output", false);
|
tvm::PackedFunc get_output = mod.GetFunction("get_output", false);
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,10 @@ from onnxruntime.capi.onnxruntime_pybind11_state import Fail
|
||||||
if platform.system() == 'Windows' and sys.version_info.major >= 3 and sys.version_info.minor >= 8:
|
if platform.system() == 'Windows' and sys.version_info.major >= 3 and sys.version_info.minor >= 8:
|
||||||
os.add_dll_directory(os.getcwd())
|
os.add_dll_directory(os.getcwd())
|
||||||
|
|
||||||
|
available_providers = [
|
||||||
|
provider for provider in onnxrt.get_available_providers()
|
||||||
|
if provider not in {'StvmExecutionProvider'}]
|
||||||
|
|
||||||
|
|
||||||
class TestInferenceSession(unittest.TestCase):
|
class TestInferenceSession(unittest.TestCase):
|
||||||
|
|
||||||
|
|
@ -28,6 +32,12 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
|
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
|
||||||
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
|
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
|
||||||
|
|
||||||
|
def testTvmImported(self):
|
||||||
|
if "StvmExecutionProvider" not in onnxrt.get_available_providers():
|
||||||
|
return
|
||||||
|
import tvm
|
||||||
|
self.assertTrue(tvm is not None)
|
||||||
|
|
||||||
def testModelSerialization(self):
|
def testModelSerialization(self):
|
||||||
try:
|
try:
|
||||||
so = onnxrt.SessionOptions()
|
so = onnxrt.SessionOptions()
|
||||||
|
|
@ -308,7 +318,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
|
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
|
||||||
|
|
||||||
def testRunModel(self):
|
def testRunModel(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=available_providers)
|
||||||
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
|
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
|
||||||
input_name = sess.get_inputs()[0].name
|
input_name = sess.get_inputs()[0].name
|
||||||
self.assertEqual(input_name, "X")
|
self.assertEqual(input_name, "X")
|
||||||
|
|
@ -416,7 +426,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
self.assertTrue('CPU' in device or 'GPU' in device)
|
self.assertTrue('CPU' in device or 'GPU' in device)
|
||||||
|
|
||||||
def testRunModelSymbolicInput(self):
|
def testRunModelSymbolicInput(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"), providers=available_providers)
|
||||||
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
|
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
|
||||||
input_name = sess.get_inputs()[0].name
|
input_name = sess.get_inputs()[0].name
|
||||||
self.assertEqual(input_name, "X")
|
self.assertEqual(input_name, "X")
|
||||||
|
|
@ -433,7 +443,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
|
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
|
||||||
|
|
||||||
def testBooleanInputs(self):
|
def testBooleanInputs(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=available_providers)
|
||||||
a = np.array([[True, True], [False, False]], dtype=bool)
|
a = np.array([[True, True], [False, False]], dtype=bool)
|
||||||
b = np.array([[True, False], [True, False]], dtype=bool)
|
b = np.array([[True, False], [True, False]], dtype=bool)
|
||||||
|
|
||||||
|
|
@ -507,7 +517,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
np.testing.assert_equal(x, res[0])
|
np.testing.assert_equal(x, res[0])
|
||||||
|
|
||||||
def testInputBytes(self):
|
def testInputBytes(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers)
|
||||||
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
|
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
|
||||||
|
|
||||||
x_name = sess.get_inputs()[0].name
|
x_name = sess.get_inputs()[0].name
|
||||||
|
|
@ -528,7 +538,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
np.testing.assert_equal(x, res[0].astype('|S8'))
|
np.testing.assert_equal(x, res[0].astype('|S8'))
|
||||||
|
|
||||||
def testInputObject(self):
|
def testInputObject(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers)
|
||||||
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
|
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
|
||||||
|
|
||||||
x_name = sess.get_inputs()[0].name
|
x_name = sess.get_inputs()[0].name
|
||||||
|
|
@ -549,7 +559,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
np.testing.assert_equal(x, res[0])
|
np.testing.assert_equal(x, res[0])
|
||||||
|
|
||||||
def testInputVoid(self):
|
def testInputVoid(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=onnxrt.get_available_providers())
|
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers)
|
||||||
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
|
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
|
||||||
# so we use inputs where that is the case
|
# so we use inputs where that is the case
|
||||||
x = np.array([b'must', b'have', b'same', b'size'], dtype=np.void).reshape((2, 2))
|
x = np.array([b'must', b'have', b'same', b'size'], dtype=np.void).reshape((2, 2))
|
||||||
|
|
@ -636,7 +646,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
|
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
|
||||||
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
|
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
|
||||||
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt,
|
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt,
|
||||||
providers=onnxrt.get_available_providers())
|
providers=available_providers)
|
||||||
a = np.array([[True, True], [False, False]], dtype=bool)
|
a = np.array([[True, True], [False, False]], dtype=bool)
|
||||||
b = np.array([[True, False], [True, False]], dtype=bool)
|
b = np.array([[True, False], [True, False]], dtype=bool)
|
||||||
|
|
||||||
|
|
@ -666,7 +676,7 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
|
|
||||||
def testSequenceConstruct(self):
|
def testSequenceConstruct(self):
|
||||||
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"),
|
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"),
|
||||||
providers=onnxrt.get_available_providers())
|
providers=available_providers)
|
||||||
|
|
||||||
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
|
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
|
||||||
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
|
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
|
||||||
|
|
@ -1129,4 +1139,4 @@ class TestInferenceSession(unittest.TestCase):
|
||||||
print("Create session with customize execution provider successfully!")
|
print("Create session with customize execution provider successfully!")
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main(verbosity=1)
|
||||||
|
|
|
||||||
55
onnxruntime/test/python/onnxruntime_test_python_stvm.py
Normal file
55
onnxruntime/test/python/onnxruntime_test_python_stvm.py
Normal file
|
|
@ -0,0 +1,55 @@
|
||||||
|
import numpy
|
||||||
|
from numpy.testing import assert_almost_equal
|
||||||
|
from onnx import numpy_helper, TensorProto
|
||||||
|
from onnx.helper import (
|
||||||
|
make_model, make_node, set_model_props, make_tensor,
|
||||||
|
make_graph, make_tensor_value_info)
|
||||||
|
import onnxruntime
|
||||||
|
|
||||||
|
if "StvmExecutionProvider" not in onnxruntime.get_available_providers():
|
||||||
|
raise AssertionError(
|
||||||
|
"Unable to find 'StvmExecutionProvider' in %r." % onnxruntime.get_available_providers())
|
||||||
|
|
||||||
|
X = make_tensor_value_info('X', TensorProto.FLOAT, [None, None])
|
||||||
|
A = make_tensor_value_info('A', TensorProto.FLOAT, [None, None])
|
||||||
|
B = make_tensor_value_info('B', TensorProto.FLOAT, [None, None])
|
||||||
|
Y = make_tensor_value_info('Y', TensorProto.FLOAT, [None, None])
|
||||||
|
node1 = make_node('MatMul', ['X', 'A'], ['XA'])
|
||||||
|
node2 = make_node('Add', ['XA', 'B'], ['Y'])
|
||||||
|
graph = make_graph([node1, node2], 'lr', [X, A, B], [Y])
|
||||||
|
onnx_model = make_model(graph)
|
||||||
|
|
||||||
|
a = numpy.random.randn(2, 2).astype(numpy.float32)
|
||||||
|
b = numpy.random.randn(1, 2).astype(numpy.float32)
|
||||||
|
x = numpy.random.randn(1, 2).astype(numpy.float32)
|
||||||
|
data = {'A': a, 'B': b, 'X': x}
|
||||||
|
|
||||||
|
sess = onnxruntime.InferenceSession(
|
||||||
|
onnx_model.SerializeToString(), providers=['CPUExecutionProvider'])
|
||||||
|
|
||||||
|
y = sess.run(None, data)[0]
|
||||||
|
|
||||||
|
provider_options = dict(
|
||||||
|
target="llvm -mcpu=core-avx2",
|
||||||
|
target_host="llvm -mcpu=core-avx2",
|
||||||
|
opt_level=3,
|
||||||
|
freeze_weights=True,
|
||||||
|
tuning_file_path="",
|
||||||
|
tuning_type="Ansor",
|
||||||
|
input_names=" ".join(i.name for i in sess.get_inputs()),
|
||||||
|
input_shapes=" ".join(str(numpy.array(data[i.name].shape))
|
||||||
|
for i in sess.get_inputs()))
|
||||||
|
|
||||||
|
so = onnxruntime.SessionOptions()
|
||||||
|
so.log_severity_level = 0
|
||||||
|
so.log_verbosity_level = 0
|
||||||
|
so.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
|
||||||
|
|
||||||
|
sess = onnxruntime.InferenceSession(
|
||||||
|
onnx_model.SerializeToString(), so,
|
||||||
|
providers=["StvmExecutionProvider"],
|
||||||
|
provider_options=[provider_options])
|
||||||
|
|
||||||
|
y_tvm = sess.run(None, data)[0]
|
||||||
|
|
||||||
|
assert_almost_equal(y, y_tvm)
|
||||||
2
setup.py
2
setup.py
|
|
@ -235,7 +235,7 @@ try:
|
||||||
self._rewrite_ld_preload(to_preload)
|
self._rewrite_ld_preload(to_preload)
|
||||||
self._rewrite_ld_preload_cuda(to_preload_cuda)
|
self._rewrite_ld_preload_cuda(to_preload_cuda)
|
||||||
self._rewrite_ld_preload_tensorrt(to_preload_tensorrt)
|
self._rewrite_ld_preload_tensorrt(to_preload_tensorrt)
|
||||||
if package_name == 'onnxruntime-tvm':
|
if package_name == 'onnxruntime-stvm':
|
||||||
self._rewrite_ld_preload_tvm()
|
self._rewrite_ld_preload_tvm()
|
||||||
_bdist_wheel.run(self)
|
_bdist_wheel.run(self)
|
||||||
if is_manylinux and not disable_auditwheel_repair:
|
if is_manylinux and not disable_auditwheel_repair:
|
||||||
|
|
|
||||||
|
|
@ -439,8 +439,6 @@ def parse_arguments():
|
||||||
"--use_nuphar", action='store_true', help="Build with nuphar")
|
"--use_nuphar", action='store_true', help="Build with nuphar")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--use_stvm", action='store_true', help="Build with standalone TVM")
|
"--use_stvm", action='store_true', help="Build with standalone TVM")
|
||||||
parser.add_argument(
|
|
||||||
"--stvm_home", help="Path to TVM installation for the standalone TVM execution provider.")
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--use_tensorrt", action='store_true', help="Build with TensorRT")
|
"--use_tensorrt", action='store_true', help="Build with TensorRT")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
|
@ -763,8 +761,8 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
|
||||||
args.android or (args.ios and is_macOS())
|
args.android or (args.ios and is_macOS())
|
||||||
or args.use_rknpu)
|
or args.use_rknpu)
|
||||||
else "OFF"),
|
else "OFF"),
|
||||||
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
|
"-Donnxruntime_USE_TVM=" + ("ON" if (args.use_nuphar or args.use_stvm) else "OFF"),
|
||||||
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
|
"-Donnxruntime_USE_LLVM=" + ("ON" if (args.use_nuphar or args.use_stvm) else "OFF"),
|
||||||
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
|
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
|
||||||
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
|
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
|
||||||
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
|
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
|
||||||
|
|
@ -772,7 +770,6 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
|
||||||
"-Donnxruntime_TENSORRT_HOME=" + (tensorrt_home if args.use_tensorrt else ""),
|
"-Donnxruntime_TENSORRT_HOME=" + (tensorrt_home if args.use_tensorrt else ""),
|
||||||
# set vars for standalone TVM
|
# set vars for standalone TVM
|
||||||
"-Donnxruntime_USE_STVM=" + ("ON" if args.use_stvm else "OFF"),
|
"-Donnxruntime_USE_STVM=" + ("ON" if args.use_stvm else "OFF"),
|
||||||
"-Donnxruntime_STVM_HOME=" + (os.path.join(source_dir, "cmake", "external", "tvm_update")),
|
|
||||||
# set vars for migraphx
|
# set vars for migraphx
|
||||||
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
|
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
|
||||||
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
|
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
|
||||||
|
|
@ -932,7 +929,7 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
|
||||||
"-DProtobuf_USE_STATIC_LIBS=ON"
|
"-DProtobuf_USE_STATIC_LIBS=ON"
|
||||||
]
|
]
|
||||||
|
|
||||||
if args.use_nuphar and args.llvm_path is not None:
|
if (args.use_nuphar or args.use_stvm) and args.llvm_path is not None:
|
||||||
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
|
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
|
||||||
|
|
||||||
if args.use_cuda and not is_windows():
|
if args.use_cuda and not is_windows():
|
||||||
|
|
@ -1128,15 +1125,16 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
|
||||||
for config in configs:
|
for config in configs:
|
||||||
config_build_dir = get_config_build_dir(build_dir, config)
|
config_build_dir = get_config_build_dir(build_dir, config)
|
||||||
os.makedirs(config_build_dir, exist_ok=True)
|
os.makedirs(config_build_dir, exist_ok=True)
|
||||||
if args.use_nuphar:
|
if args.use_nuphar or args.use_stvm:
|
||||||
os.environ["PATH"] = os.path.join(
|
os.environ["PATH"] = (
|
||||||
config_build_dir, "external", "tvm",
|
os.path.join(config_build_dir, "_deps", "tvm-build") + os.pathsep +
|
||||||
config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
|
os.path.join(config_build_dir, "_deps", "tvm-src") + os.pathsep +
|
||||||
|
os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"])
|
||||||
|
|
||||||
run_subprocess(
|
run_subprocess(
|
||||||
cmake_args + [
|
cmake_args + [
|
||||||
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
|
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
|
||||||
("ON" if config.lower() == 'debug' and not args.use_nuphar and not
|
("ON" if config.lower() == 'debug' and not (args.use_nuphar or args.use_stvm) and not
|
||||||
args.use_openvino and not
|
args.use_openvino and not
|
||||||
args.enable_msvc_static_runtime and not
|
args.enable_msvc_static_runtime and not
|
||||||
args.disable_memleak_checker
|
args.disable_memleak_checker
|
||||||
|
|
@ -1535,8 +1533,7 @@ def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
|
||||||
continue
|
continue
|
||||||
dll_path_list = []
|
dll_path_list = []
|
||||||
if args.use_nuphar:
|
if args.use_nuphar:
|
||||||
dll_path_list.append(os.path.join(
|
dll_path_list.append(os.path.join(build_dir, "_deps", "tvm-build"))
|
||||||
build_dir, config, "external", "tvm", config))
|
|
||||||
if args.use_tensorrt:
|
if args.use_tensorrt:
|
||||||
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
|
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
|
||||||
# Adding the torch lib path for loading DLLs for onnxruntime in eager mode
|
# Adding the torch lib path for loading DLLs for onnxruntime in eager mode
|
||||||
|
|
@ -1700,12 +1697,25 @@ def nuphar_run_python_tests(build_dir, configs):
|
||||||
cwd = get_config_build_dir(build_dir, config)
|
cwd = get_config_build_dir(build_dir, config)
|
||||||
if is_windows():
|
if is_windows():
|
||||||
cwd = os.path.join(cwd, config)
|
cwd = os.path.join(cwd, config)
|
||||||
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
|
dll_path = os.path.join(build_dir, config, "_deps", "tvm-build", config)
|
||||||
run_subprocess(
|
run_subprocess(
|
||||||
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
|
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
|
||||||
cwd=cwd, dll_path=dll_path)
|
cwd=cwd, dll_path=dll_path)
|
||||||
|
|
||||||
|
|
||||||
|
def stvm_run_python_tests(build_dir, configs):
|
||||||
|
for config in configs:
|
||||||
|
if config == 'Debug':
|
||||||
|
continue
|
||||||
|
cwd = get_config_build_dir(build_dir, config)
|
||||||
|
if is_windows():
|
||||||
|
cwd = os.path.join(cwd, config)
|
||||||
|
dll_path = os.path.join(build_dir, config, "_deps", "tvm-build", config)
|
||||||
|
run_subprocess(
|
||||||
|
[sys.executable, 'onnxruntime_test_python_stvm.py'],
|
||||||
|
cwd=cwd, dll_path=dll_path)
|
||||||
|
|
||||||
|
|
||||||
def run_nodejs_tests(nodejs_binding_dir):
|
def run_nodejs_tests(nodejs_binding_dir):
|
||||||
args = ['npm', 'test', '--', '--timeout=30000']
|
args = ['npm', 'test', '--', '--timeout=30000']
|
||||||
if is_windows():
|
if is_windows():
|
||||||
|
|
@ -2324,6 +2334,9 @@ def main():
|
||||||
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
|
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
|
||||||
nuphar_run_python_tests(build_dir, configs)
|
nuphar_run_python_tests(build_dir, configs)
|
||||||
|
|
||||||
|
if args.enable_pybind and not args.skip_onnx_tests and args.use_stvm:
|
||||||
|
stvm_run_python_tests(build_dir, configs)
|
||||||
|
|
||||||
# run node.js binding tests
|
# run node.js binding tests
|
||||||
if args.build_nodejs and not args.skip_nodejs_tests:
|
if args.build_nodejs and not args.skip_nodejs_tests:
|
||||||
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
|
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue