onnxruntime/cmake/onnxruntime_python.cmake

378 lines
14 KiB
CMake
Raw Normal View History

2018-11-20 00:48:22 +00:00
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
include(pybind11)
FIND_PACKAGE(NumPy)
if(NOT PYTHON_INCLUDE_DIR)
set(PYTHON_NOT_FOUND false)
exec_program("${PYTHON_EXECUTABLE}"
ARGS "-c \"import distutils.sysconfig; print(distutils.sysconfig.get_python_inc())\""
OUTPUT_VARIABLE PYTHON_INCLUDE_DIR
RETURN_VALUE PYTHON_NOT_FOUND)
if(${PYTHON_NOT_FOUND})
message(FATAL_ERROR
"Cannot get Python include directory. Is distutils installed?")
endif(${PYTHON_NOT_FOUND})
endif(NOT PYTHON_INCLUDE_DIR)
# 2. Resolve the installed version of NumPy (for numpy/arrayobject.h).
if(NOT NUMPY_INCLUDE_DIR)
set(NUMPY_NOT_FOUND false)
exec_program("${PYTHON_EXECUTABLE}"
ARGS "-c \"import numpy; print(numpy.get_include())\""
OUTPUT_VARIABLE NUMPY_INCLUDE_DIR
RETURN_VALUE NUMPY_NOT_FOUND)
if(${NUMPY_NOT_FOUND})
message(FATAL_ERROR
"Cannot get NumPy include directory: Is NumPy installed?")
endif(${NUMPY_NOT_FOUND})
endif(NOT NUMPY_INCLUDE_DIR)
2018-11-20 00:48:22 +00:00
# ---[ Python + Numpy
set(onnxruntime_pybind_srcs_pattern
"${ONNXRUNTIME_ROOT}/python/*.cc"
"${ONNXRUNTIME_ROOT}/python/*.h"
)
if (onnxruntime_ENABLE_TRAINING)
list(APPEND onnxruntime_pybind_srcs_pattern
"${ORTTRAINING_ROOT}/orttraining/python/*.cc"
"${ORTTRAINING_ROOT}/orttraining/python/*.h"
)
endif()
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_pybind_srcs CONFIGURE_DEPENDS
${onnxruntime_pybind_srcs_pattern}
)
2018-11-20 00:48:22 +00:00
add_library(onnxruntime_pybind11_state MODULE ${onnxruntime_pybind_srcs})
if(MSVC)
target_compile_options(onnxruntime_pybind11_state PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:--compiler-options /utf-8>" "$<$<NOT:$<COMPILE_LANGUAGE:CUDA>>:/utf-8>")
endif()
2018-11-20 00:48:22 +00:00
if(HAS_CAST_FUNCTION_TYPE)
target_compile_options(onnxruntime_pybind11_state PRIVATE "-Wno-cast-function-type")
2018-11-20 00:48:22 +00:00
endif()
if(onnxruntime_PYBIND_EXPORT_OPSCHEMA)
target_compile_definitions(onnxruntime_pybind11_state PRIVATE onnxruntime_PYBIND_EXPORT_OPSCHEMA)
endif()
if (onnxruntime_USE_DNNL)
target_compile_definitions(onnxruntime_pybind11_state PRIVATE USE_DNNL=1)
endif()
if (MSVC AND NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
#TODO: fix the warnings
target_compile_options(onnxruntime_pybind11_state PRIVATE "/wd4244")
endif()
target_include_directories(onnxruntime_pybind11_state PRIVATE ${ONNXRUNTIME_ROOT} ${PYTHON_INCLUDE_DIR} ${NUMPY_INCLUDE_DIR} ${pybind11_INCLUDE_DIRS})
Add Python API for specifying device options. (#4205) * Add python API for specifying CUDA device id * Modification for providing session based python api for specifying device id * When include header file pybind11/stl.h, conversion between c++ containers and Python list, vector and dict data structure are automatically enabled. https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html# Therefore, refactor the code for better leverage this advantage. * Make struct CudaDeviceOptions as default cuda device options * Implement sess.set_providers(list_of_providers, list_of_provider_option_dicts) But still stay consistent with existing sess.set_providers(list_of_provider) * Add cuda provider option default setting * Add support for setting cuda cuda_mem_limit and arena_extend_strategy. Also resolved the merge conflict on session.py * Use python ctypes to call cuda library to help python unittest * Refine the code with reviewer's suggestions * Add the capability of getting execution provider's configuration - Once we introduced the capability to set execution provider's configuration, it makes sense to add capability of getting ep's configuration. * Modify the code with reviewer's suggestions. * Using stoull() and stoul() depends on 32/64-bits architecture. * Rewrite the testcases for testing setting CUDA device id Note: We need to make sure every ORT process be run on one CUDA device at a time. * Make sure old session object is destroyed by python gc before new session object is being created * Move testcases to original onnxruntime_test_python.py * Fix bugs to pass CI build * Make it pass CI build (cont.) * Make it pass CI build (cont.)
2020-07-21 14:28:13 +00:00
if(onnxruntime_USE_CUDA)
target_include_directories(onnxruntime_pybind11_state PRIVATE ${onnxruntime_CUDNN_HOME}/include)
endif()
2020-03-11 21:25:37 +00:00
if (onnxruntime_ENABLE_TRAINING)
target_include_directories(onnxruntime_pybind11_state PRIVATE ${ORTTRAINING_ROOT})
endif()
2018-11-20 00:48:22 +00:00
if(APPLE)
set(ONNXRUNTIME_SO_LINK_FLAG "-Xlinker -exported_symbols_list ${ONNXRUNTIME_ROOT}/python/exported_symbols.lst")
elseif(UNIX)
set(ONNXRUNTIME_SO_LINK_FLAG "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/python/version_script.lds -Xlinker --gc-sections")
else()
set(ONNXRUNTIME_SO_LINK_FLAG "-DEF:${ONNXRUNTIME_ROOT}/python/pybind.def")
2018-11-20 00:48:22 +00:00
endif()
set(onnxruntime_pybind11_state_libs
onnxruntime_session
${onnxruntime_libs}
${PROVIDERS_CUDA}
${PROVIDERS_DNNL}
Trt execution provider (#382) * updated cmake files for trt * added trt execution provider * added trt basic test * removed trt_path action attribute * Add files via upload * Update build.py * Update trt_allocator.h * fixed issues found by reviewers * changed cast operator * added comment for custom kernel implementation * changed auto to auto& * changed to function compile APIs for TRT execution provider * changed to function compile APIs for TRT execution provider * added new DType DInt64 * adapted to the changes of onnxruntime_c_api * removed trt kernel (use function compile instead) * updated onnx-tensorrt submodule * set default memory type to TRT fused kernel * resolve merge conflict * fixed the issue that USE_CUDA conflicts with USE_TRT * construct graph by adding nodes in topological order * made changes for Windows * change buffers type * bypass HasImplementationOf check for TRT XP because TRT kernel is not registered * added domain to version info in rebuilt model proto * added trt to test option list * added DomainToVersionMap() to GraphViewer * removed Copy() * fixed broken code * format the code to clang format * used local reference to the frequently used values * fixed a couple of issues according to reviewers feedback * fixed a couple of issues according to reviewers feedback * added python binding for TRT and enable use_cuda when use_trt is on * fixed a redefinition issue * changed shared_ptr to unique_ptr on trt engines, and made a few changes required by reviewers * enabled trtexecution provider for unit tests * renamed trt to tensorrt * added tesorrt to python binding * update submodule onnx and onnx-tensorrt * made a couple of minor changes based on reviewer's feedback * added CUDA_CHECK * removed test code * fixed broken code after merge * updated onnx-tensorrt submodule * added post processing to align trt inputs/outputs with graph inputs/outputs * updated onnx submodule * added CUDA fallback for TensorRT and fixed TensorRT cmake issue * added ci pipeline for tensorrt and removed some redundent code from trt xp * fixed syntax issue * updated onnx-tensorrt submodule * fix trt build problem by: (#602) 1. Add additional /wd for debug build 2. Add io.h for additional targets 3. Bring back mb version of getopt * Update install_ubuntu.sh * Update linux-gpu-tensorrt-ci-pipeline.yml * Update linux-gpu-tensorrt-ci-pipeline.yml * Update run_build.sh * Update run_build.sh * Update run_build.sh * Update run_build.sh * fixed the issue that GetKernelRegistry returns nullptr * merged master to this branch * moved some data types to private * fixed tensorrt CI pipeline issue * customized test data for TensorRT pipeline * added onnx-tensorrt in json file and fixed an issue in ci script * added comments
2019-03-14 19:00:39 +00:00
${PROVIDERS_TENSORRT}
${PROVIDERS_MIGRAPHX}
${PROVIDERS_NGRAPH}
Initial commit for OpenVINO Execution Provider (#935) * Initial commit for OpenVINO Execution Provider OpenVINO Execution Provider provides the interface for ONNX Runtime applications to access Intel's hardware accelerators using Intel's OpenVINO Toolkit. * Fixed bug in GetCapability to disable custom ops Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added OPENVINO ci pipeline Added new pipeline for openvino provider, made changes to support the docker build and onnxruntime build with openvino. Signed-off-by: Luis Daniel Castellanos <luis.daniel.castellanos@intel.com> * Enabled all unit tests for OpenVINO EP Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed syntax issue in run_docker_build.sh file * Added missing default OPENVINO_VERSION Default value for OPENVINO_VERSION env was missing causing the build to fail * Added install Model Optimizer deps step * Fixed python unit tests and some tests from onnx_backend_test_series Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed indentation bug Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled some of the python backend tests for OpenVINO Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled some model tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Remove Duplicate checks for openvino in build.py Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Modified GetCapability for FP16 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled GPU FP32 tests that are not supported Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Convert modelProto to string and use it in compile Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Pass byte-array input args to MO * Serialized ModelProto passed in-memory to MO ModelOptimizer python module receives the serialized ModelProto in-memory. Uses appropriate ONNX function to load the serialized bytes. * Make Py_Finalize compatible with older python versions Also, remove pFunc unassigned variable possibility. * Fallback if input dims of Matmul is greater than 2 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * fixup: Device #define syntax * Updated the documentation Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Enable dynamic dim value * removed commented out code * Added Dockerfile for openvino EP Updated instructions on dockerfiles/README.md file Signed-off-by: Luis Daniel Castellanos <luis.daniel.castellanos@intel.com> * Disabled fp16_inception_v1 test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Code formatting with clang-format Uses style from the .clang-format file in root directory. * fixup: docker tag and build error fixes * Heuristics to automatically detect batching Distributes slices from batch into parallel infer-request objects. * Handle disabled tests in GetCapability Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled average pool and max pool if ceil_mode is 1 Also dilations are not supported if they are greater than 1 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Unsqueeze int32 test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * changes to fix output results bug * Disabled a few C++ unit tests for MYRIAD FP16 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Manually revert '9fe162bb Enable dynamic dim value' Reverts compile time setting of dynamic shape Reverting manually due to significantly huge auto-revert conflicts. * Fixed unused variable warning Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Mul test for GPU_FP16 due to accuracy issue Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * VPU documentation update * Disabled inception_v1 for MYRIAD and HDDL *Also disabled few C++ accuracy tests for HDDL Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * updates from upstream * use the new CustomOpApis for I/O interfacing * Pass initializers as subgraph meta-def inputs in GetCapability() Requirement due to API changes introduced with PR# 1019. * Remove obsolete functions * Save indexes of graph inputs from fused_node info Both inputs and initializers are passed as data inputs to the infer function. To identify only inputs among them, save thier index info from fused_node in Compile function. * Documentation changes to enable VPU * Fix VPU related changes in documentation * Fix minor changes in documentation * Fix VPU related changes in documentation * Use Node.In/OutputDefs() to track graph inputs and outputs. Don't use graph_viewer's GetInputs() or GetInputsIncludingInitializers(). * Permit "SAME_UPPER" auto_pad attribute from MaxPool * Disabled fp16_tiny_yolov2 in onnx model tests * Updated documentation to include configuration guides for myriad and hddl Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Use 8 Infer requests only for VAD-R * disable debug prints * Clang-format source files * Updated BUILD.md with OpenVINO R5 links Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled same upper python tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Update test exclusion syntax * Change path of install_onnx.sh Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disable tiny_yolov2 in broken tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Revert "Change path of install_onnx.sh" This reverts commit ba9db165f3be430f2aff1ef413299ed04637196a. This change is only required for Intel internal CI pipeline until the settings are matched with the upstream's CI pipeline. * Added debug statements for debugging CI error Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Add --build_wheel to linux openvino pipeline Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added -v option to onnx_test_runner for debugging Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed path change patch Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added -c 1 to onnx_test_runner Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Refactor MO python invocation in separate function Cleans up Model Optimizer python invocation check and conversion logic. Invokes MO only once in GetCapability() and passes the IR strings (xml and bin) to the Compiler as meta-def attributes. * Add comments * code cleanup and comments * Code cleanup for GetCapability Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed unnecessary files Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Revert "Added -v option to onnx_test_runner for debugging" This reverts commit d1dd70938a94d648df1a1dbbc2e48d0b97e49ec8. * Revert "Added debug statements for debugging CI error" This reverts commit b86d41afed2aa29c3508155d6f9c8d3a7263cc60. * incorporate Status Code changes * ComputeFunc returns Status::OK() on success * Use test names to disable tests for MYRIAD and VAD-R Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Rename local identifiers from CNNNetwork to OpenVINO network CNNNetwork is an OpenVINO's API class that represents more than just convolutional neural networks (CNNs). Renaming helps to avoid confusion that the API's only support CNN type models. * Added error message if building on windows * Removed duplicate option in Cmake * Removed unnecessary parameters in activation_opt_test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Refactor Map search and access logic for efficiently and cleanliness. * use C++ style casts * Use os.path.join for python directory path operations * use C++ style casts * EP classes should use onnxruntime namespace * Clean up fixes from PR comments * Don't explicitly shutdown Py interpreter * Remove debug print statements Prints will be re-enabled later with a logging mechanism with debug/verbose printing options. * Decrement ref counts for used pyObjects * Restore build instructions for other compilers Content under the "Using other compilers" section has been accidentally deleted by a previous commit. Restoring back that content from the latest upstream repo. * CMake code cleanup Code clean up, commenting and formatting of CMake code. * Don't pass the unused device_info parameter to OpenVINOGraph ctor. * Add support for multiple I/O data types Adds support for the following tensor data types for graph inputs and outputs: 1) float 2) float16 3) int32 4) int16 5) int8 6) uint16 7) uint8 * cleanup setup.py module list definition * Deduce index of input using tracked input index map Ignores initializers in case they are ordered before inputs. * Removed debug statement in MO code Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * PR feedback * Removed per_sample_tolerance for openvino * Removed unnecessary disabled tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed debug function Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled tiny_yolo_v2 due to accuracy issues Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Changed the disabled reason for broken tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Reshape with no input Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Python formatting with Autopep8 * Minor fix for MYRIAD devices * Added zero dimension check *Removed setting batch size for the network Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Set the threshold to larger value for MNIST Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed setting higher threshold in provider_test_utils Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Check for --use_openvino in python wheel setup.py Add openvino modules to the setup script for building the wheel package only for --use_openvino a build option. * Removed nullptr checks for GetNode() Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com>
2019-06-18 15:58:53 +00:00
${PROVIDERS_OPENVINO}
${PROVIDERS_NUPHAR}
${PROVIDERS_VITISAI}
Initial PR for NNAPI execution provider (#1220) * init * Update DNNLibrary * Update DNNLibrary, set compiler flags, it compiles now * Add more missing flags, add test * Update DNNLibrary * Update Compile method, fix allocator and some other bugs * Update DNNLibrary * Implement CopyTensor * Not delete state explicitly since it is managed by unique_ptr * Add the missing files when SingleUnitTestProjct is ON * misc changes * Fix wrong name in provider factory * Add my own test * Update the code of add node into graph, and add the missing initializer into graph * Fix the bug that re-build the graph produces extra output * Update DNNLibrary * Transpose nchw (ONNX) -> nhwc (NNAPI) * Add license * Add GetSupportedNodes method (implement it later) * Rename onnxruntime_nnapi_test->onnxruntime_nnapi_squeezenet_test * Update squeezenet_test.cpp after rebase master * Remove squeezenet_test.cpp since it is almost same with the c++ sample * Update DNNLibrary for GetSupportedNodes * Update GetSupportedNodes * Revert "Remove squeezenet_test.cpp since it is almost same with the c++ sample" This reverts commit a97575fd9ff49e50ba1dc8d8154790d8cd86c48d. * Update DNNLibrary * Fix multiple outputs bug * Remove GetKernelRegistry * Revert "Revert "Remove squeezenet_test.cpp since it is almost same with the c++ sample"" This reverts commit 2a0670e9cbf10ea654111ce39e198a4be0ddd838. * Set default memory type of NNAPI EP * Add CPUOutput allocator * Update DNNLibrary for multiple outputs * Fix bug of nhwc->nchw * Remove GetExecutionHandle()
2019-07-02 13:03:29 +00:00
${PROVIDERS_NNAPI}
${PROVIDERS_RKNPU}
${PROVIDERS_DML}
${PROVIDERS_ACL}
${PROVIDERS_ARMNN}
onnxruntime_optimizer
2018-11-20 00:48:22 +00:00
onnxruntime_providers
onnxruntime_util
${onnxruntime_tvm_libs}
2018-11-20 00:48:22 +00:00
onnxruntime_framework
onnxruntime_util
onnxruntime_graph
onnxruntime_common
onnxruntime_mlas
${pybind11_lib}
2018-11-20 00:48:22 +00:00
)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
list(APPEND onnxruntime_pybind11_state_libs onnxruntime_language_interop onnxruntime_pyop)
endif()
2020-03-11 21:25:37 +00:00
if (onnxruntime_ENABLE_TRAINING)
list(INSERT onnxruntime_pybind11_state_libs 1 onnxruntime_training)
endif()
2018-11-20 00:48:22 +00:00
set(onnxruntime_pybind11_state_dependencies
${onnxruntime_EXTERNAL_DEPENDENCIES}
${pybind11_dep}
2018-11-20 00:48:22 +00:00
)
set_property(TARGET onnxruntime_pybind11_state APPEND_STRING PROPERTY LINK_FLAGS ${ONNXRUNTIME_SO_LINK_FLAG} ${onnxruntime_DELAYLOAD_FLAGS})
2018-11-20 00:48:22 +00:00
add_dependencies(onnxruntime_pybind11_state ${onnxruntime_pybind11_state_dependencies})
2018-11-20 00:48:22 +00:00
if (MSVC)
set_target_properties(onnxruntime_pybind11_state PROPERTIES LINK_FLAGS "${ONNXRUNTIME_SO_LINK_FLAG}")
2018-11-20 00:48:22 +00:00
# if MSVC, pybind11 looks for release version of python lib (pybind11/detail/common.h undefs _DEBUG)
2019-04-04 21:51:09 +00:00
target_link_libraries(onnxruntime_pybind11_state ${onnxruntime_pybind11_state_libs}
${PYTHON_LIBRARY_RELEASE} ${onnxruntime_EXTERNAL_LIBRARIES})
2018-11-29 02:29:16 +00:00
elseif (APPLE)
set_target_properties(onnxruntime_pybind11_state PROPERTIES LINK_FLAGS "${ONNXRUNTIME_SO_LINK_FLAG} -undefined dynamic_lookup")
target_link_libraries(onnxruntime_pybind11_state ${onnxruntime_pybind11_state_libs} ${onnxruntime_EXTERNAL_LIBRARIES})
2018-11-29 04:01:21 +00:00
set_target_properties(onnxruntime_pybind11_state PROPERTIES
INSTALL_RPATH "@loader_path"
BUILD_WITH_INSTALL_RPATH TRUE
INSTALL_RPATH_USE_LINK_PATH FALSE)
2018-11-20 00:48:22 +00:00
else()
target_link_libraries(onnxruntime_pybind11_state PRIVATE ${onnxruntime_pybind11_state_libs} ${onnxruntime_EXTERNAL_LIBRARIES})
set_property(TARGET onnxruntime_pybind11_state APPEND_STRING PROPERTY LINK_FLAGS " -Xlinker -rpath=\$ORIGIN")
2018-11-20 00:48:22 +00:00
endif()
set_target_properties(onnxruntime_pybind11_state PROPERTIES PREFIX "")
set_target_properties(onnxruntime_pybind11_state PROPERTIES FOLDER "ONNXRuntime")
if(onnxruntime_ENABLE_LTO)
set_target_properties(onnxruntime_pybind11_state PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE)
set_target_properties(onnxruntime_pybind11_state PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELWITHDEBINFO TRUE)
endif()
2018-11-20 00:48:22 +00:00
if (MSVC)
set_target_properties(onnxruntime_pybind11_state PROPERTIES SUFFIX ".pyd")
else()
set_target_properties(onnxruntime_pybind11_state PROPERTIES SUFFIX ".so")
endif()
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_backend_srcs CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/python/backend/*.py"
)
if (onnxruntime_ENABLE_TRAINING)
file(GLOB onnxruntime_python_srcs CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/python/*.py"
"${ORTTRAINING_SOURCE_DIR}/python/*.py"
)
else()
file(GLOB onnxruntime_python_srcs CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/python/*.py"
)
endif()
if (onnxruntime_ENABLE_TRAINING)
file(GLOB onnxruntime_python_capi_training_srcs CONFIGURE_DEPENDS
"${ORTTRAINING_SOURCE_DIR}/python/training/*.py"
)
Add new PytTrch front-end (#4815) * Add ORTTrainerOptions class for the new pytorch frontend (#4382) Add ORTTrainerOptions class and some placeholders * Add _ORTTrainerModelDesc to perform validation for model description (#4416) * Add Loss Scaler classes to the new frontend (#4306) * Add TrainStepInfo used on the new frontend API (#4256) * Add Optimizer classes to the new frontend (#4280) * Add LRScheduler implementation (#4357) * Add basic ORTTrainer API (#4435) This PR presents the public API for ORTTrainer for the short term development. It also validates and saves input parameters, which will be used in the next stages, such as building ONNX model, post processing the model and configuring the training session * Add opset_version into ORTTrainerOptions and change type of ORTTrainer.loss_fn (#4592) * Update ModelDescription and minor fix on ORTTrainer ctor (#4605) * Update ModelDescription and minor fix on ORTTrainer/ORTTrainerOptions This PR keeps the public API intact, but changes how model description is stored on the backend Currently, users creates a dict with two lists of tuples. One list called 'inputs' and each tuple has the following format tuple(name, shape). The second list is called 'outputs' and each tuple can be either tuple(name, shape) or tuple(name, shape, is_loss). With this PR, when this dict is passed in to ORTTrainer, it is fully validated as usual. However, tuples are internally replaced by namedtuples and all output tuples will have tuple(name, shape, is_loss) format instead of is_loss being optionally present. Additionally to that normalization in the internal representation (which eases coding), two internal methods were created to replace a namedtuple(name, shape) to namedtuple(name, shape, dtype) or namedtuple(name, shape, is_loss, dtype) dependeing whether the tuple is an input or output. This is necessary as ORTTRainer finds out data types of each input/output during model export to onnx. Finally, a minor fix was done on ORTTrainer. It could initialize ORTTrainerOptions incorrectly when options=None * Rename input name for test * Add ONNX Model Export to New Frontend (#4612) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Create training session + minor improvements (#4668) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Save ONNX model in file (#4671) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add eval step (#4674) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add train_step (#4677) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add LR Scheduler (#4694) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add deterministic compute tests (#4716) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add legacy vs experimental ORTTrainer accuracy comparison (#4727) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add Mixed precision/LossScaler + several fixes (#4739) Additionally to the mixed precision/loss scaler code, this PR includes: * Fix CUDA training * Add optimization_step into TrainStepInfo class * Refactor LRSCheduler to use optimization_step instead of step * Updated several default values at ORTTrainerOptions * Add initial Gradient Accumulation supported. Untested * Fix ONNX model post processing * Refactor unit tests * Add ONNX BERT example + minor fixes (#4757) * Fix training issue when passing ONNX file into ORTTrainer Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add Dynamic Shape support (#4758) * Update DeepSpeed Zero Stage option to a separate option group (#4772) * Add support to fetches (#4777) * Add Gradient Accumulation Steps support (#4793) * Fix Dynamic Axes feature and add unit test (#4795) * Add frozen weights test (#4807) * Move new pytorch front-end to 'experimental' namespace (#4814) * Fix build Co-authored-by: Rayan-Krishnan <rayankrishnan@live.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net>
2020-08-17 16:45:25 +00:00
file(GLOB onnxruntime_python_root_srcs CONFIGURE_DEPENDS
"${ORTTRAINING_SOURCE_DIR}/python/experimental/*.py"
)
file(GLOB onnxruntime_python_amp_srcs CONFIGURE_DEPENDS
"${ORTTRAINING_SOURCE_DIR}/python/experimental/amp/*.py"
)
file(GLOB onnxruntime_python_optim_srcs CONFIGURE_DEPENDS
"${ORTTRAINING_SOURCE_DIR}/python/experimental/optim/*.py"
)
file(GLOB onnxruntime_python_train_tools_srcs CONFIGURE_DEPENDS
"${REPO_ROOT}/tools/python/register_custom_ops_pytorch_exporter.py"
)
else()
file(GLOB onnxruntime_python_capi_training_srcs CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/python/training/*.py"
)
endif()
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_python_test_srcs CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/test/python/*.py"
Liqun/e2e transformer test (#3540) * initial change to transformer.py * prepare e2e transformer tests * refactor transformer tests * put test python files in a flat folder * fix typo pip install transform(s) * python 3.6 * python version to 3.6 in install_ubuntu.sh * remove argparser * to use opset ver 12 * workaround loss_scale naming patch in case of loss_fn_ * assign self.loss_fn_ so it can be checked * skip a few un-needed post-process steps * fix loss_scale_input_name, clean up post process steps * skip non-frontend tests * move cpu/cuda related files to coresponding cpu/cuda folder (#3668) Co-authored-by: Weixing Zhang <wezhan@microsoft.com> * type cast for ratio is not necessary for dropout (#3682) Co-authored-by: Weixing Zhang <wezhan@microsoft.com> * thrustallocator is not needed since cub is used directly for gather now. (#3683) Co-authored-by: Weixing Zhang <wezhan@microsoft.com> * GatherND-12 Implementation (#3645) * Renamed, UT passing * Move GatherND CUDA Kerenl into onnxruntime * Merge GatherNDOpTest * Refactor Test code * Merge CPU Kernel Impl * Handle Negative Indice, Fix UT * Improve CUDA kernel to handle negative index * Minor Fixes * Preserve GatherND-1 Cuda kernel * Fix Mac build * fix UT * Fix Build * fix GatherNDOpTest.double > CUDA error cudaErrorInvalidDeviceFunction:invalid device function Co-authored-by: Sherlock Huang <bahuang@OrtTrainingDev3.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Peng Wang (pengwa) <pengwa@microsoft.com> * update with reviewers' comments * testBertTrainingGradientAccumulation was not using rtol and may fail occasionally with small (e-06) difference * fix merge mistakes Co-authored-by: liqun <liqun@OrtTrainingDev4.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Weixing Zhang <weixingzhang@users.noreply.github.com> Co-authored-by: Weixing Zhang <wezhan@microsoft.com> Co-authored-by: Sherlock <baihan.huang@gmail.com> Co-authored-by: Sherlock Huang <bahuang@OrtTrainingDev3.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Peng Wang (pengwa) <pengwa@microsoft.com>
2020-04-30 19:26:38 +00:00
"${ORTTRAINING_SOURCE_DIR}/test/python/*.py"
2018-11-20 00:48:22 +00:00
)
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_python_tools_srcs CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/python/tools/*.py"
)
file(GLOB onnxruntime_python_tools_featurizers_src CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/python/tools/featurizer_ops/*.py"
)
file(GLOB onnxruntime_python_quantization_src CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/python/tools/quantization/*.py"
)
list(REMOVE_ITEM onnxruntime_python_quantization_src
"${ONNXRUNTIME_ROOT}/python/tools/quantization/test_calibrate.py")
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_python_datasets_srcs CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/python/datasets/*.py"
)
2019-04-29 19:58:20 +00:00
file(GLOB onnxruntime_python_datasets_data CONFIGURE_DEPENDS
2018-11-20 00:48:22 +00:00
"${ONNXRUNTIME_ROOT}/python/datasets/*.pb"
"${ONNXRUNTIME_ROOT}/python/datasets/*.onnx"
)
set(test_data_target onnxruntime_test_all)
2018-11-20 00:48:22 +00:00
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/backend
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/training
2018-11-20 00:48:22 +00:00
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/datasets
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/tools
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/tools/featurizer_ops
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/quantization
2018-11-20 00:48:22 +00:00
COMMAND ${CMAKE_COMMAND} -E copy
${ONNXRUNTIME_ROOT}/__init__.py
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/
COMMAND ${CMAKE_COMMAND} -E copy
${REPO_ROOT}/ThirdPartyNotices.txt
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/
COMMAND ${CMAKE_COMMAND} -E copy
${REPO_ROOT}/docs/Privacy.md
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/
2018-11-20 00:48:22 +00:00
COMMAND ${CMAKE_COMMAND} -E copy
${REPO_ROOT}/LICENSE
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_test_srcs}
$<TARGET_FILE_DIR:${test_data_target}>
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_backend_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/backend/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_capi_training_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/training/
2018-11-20 00:48:22 +00:00
COMMAND ${CMAKE_COMMAND} -E copy
$<TARGET_FILE:onnxruntime_pybind11_state>
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_datasets_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/datasets/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_datasets_data}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/datasets/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_tools_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/tools/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_tools_featurizers_src}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/tools/featurizer_ops/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_quantization_src}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/quantization/
2019-03-22 18:18:23 +00:00
COMMAND ${CMAKE_COMMAND} -E copy
${REPO_ROOT}/VERSION_NUMBER
$<TARGET_FILE_DIR:${test_data_target}>
2018-11-20 00:48:22 +00:00
)
Add new PytTrch front-end (#4815) * Add ORTTrainerOptions class for the new pytorch frontend (#4382) Add ORTTrainerOptions class and some placeholders * Add _ORTTrainerModelDesc to perform validation for model description (#4416) * Add Loss Scaler classes to the new frontend (#4306) * Add TrainStepInfo used on the new frontend API (#4256) * Add Optimizer classes to the new frontend (#4280) * Add LRScheduler implementation (#4357) * Add basic ORTTrainer API (#4435) This PR presents the public API for ORTTrainer for the short term development. It also validates and saves input parameters, which will be used in the next stages, such as building ONNX model, post processing the model and configuring the training session * Add opset_version into ORTTrainerOptions and change type of ORTTrainer.loss_fn (#4592) * Update ModelDescription and minor fix on ORTTrainer ctor (#4605) * Update ModelDescription and minor fix on ORTTrainer/ORTTrainerOptions This PR keeps the public API intact, but changes how model description is stored on the backend Currently, users creates a dict with two lists of tuples. One list called 'inputs' and each tuple has the following format tuple(name, shape). The second list is called 'outputs' and each tuple can be either tuple(name, shape) or tuple(name, shape, is_loss). With this PR, when this dict is passed in to ORTTrainer, it is fully validated as usual. However, tuples are internally replaced by namedtuples and all output tuples will have tuple(name, shape, is_loss) format instead of is_loss being optionally present. Additionally to that normalization in the internal representation (which eases coding), two internal methods were created to replace a namedtuple(name, shape) to namedtuple(name, shape, dtype) or namedtuple(name, shape, is_loss, dtype) dependeing whether the tuple is an input or output. This is necessary as ORTTRainer finds out data types of each input/output during model export to onnx. Finally, a minor fix was done on ORTTrainer. It could initialize ORTTrainerOptions incorrectly when options=None * Rename input name for test * Add ONNX Model Export to New Frontend (#4612) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Create training session + minor improvements (#4668) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Save ONNX model in file (#4671) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add eval step (#4674) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add train_step (#4677) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add LR Scheduler (#4694) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add deterministic compute tests (#4716) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add legacy vs experimental ORTTrainer accuracy comparison (#4727) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add Mixed precision/LossScaler + several fixes (#4739) Additionally to the mixed precision/loss scaler code, this PR includes: * Fix CUDA training * Add optimization_step into TrainStepInfo class * Refactor LRSCheduler to use optimization_step instead of step * Updated several default values at ORTTrainerOptions * Add initial Gradient Accumulation supported. Untested * Fix ONNX model post processing * Refactor unit tests * Add ONNX BERT example + minor fixes (#4757) * Fix training issue when passing ONNX file into ORTTrainer Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add Dynamic Shape support (#4758) * Update DeepSpeed Zero Stage option to a separate option group (#4772) * Add support to fetches (#4777) * Add Gradient Accumulation Steps support (#4793) * Fix Dynamic Axes feature and add unit test (#4795) * Add frozen weights test (#4807) * Move new pytorch front-end to 'experimental' namespace (#4814) * Fix build Co-authored-by: Rayan-Krishnan <rayankrishnan@live.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net>
2020-08-17 16:45:25 +00:00
if (onnxruntime_ENABLE_TRAINING)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/amp
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/optim
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_root_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_amp_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/amp/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_optim_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/optim/
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_train_tools_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/experimental/
Add new PytTrch front-end (#4815) * Add ORTTrainerOptions class for the new pytorch frontend (#4382) Add ORTTrainerOptions class and some placeholders * Add _ORTTrainerModelDesc to perform validation for model description (#4416) * Add Loss Scaler classes to the new frontend (#4306) * Add TrainStepInfo used on the new frontend API (#4256) * Add Optimizer classes to the new frontend (#4280) * Add LRScheduler implementation (#4357) * Add basic ORTTrainer API (#4435) This PR presents the public API for ORTTrainer for the short term development. It also validates and saves input parameters, which will be used in the next stages, such as building ONNX model, post processing the model and configuring the training session * Add opset_version into ORTTrainerOptions and change type of ORTTrainer.loss_fn (#4592) * Update ModelDescription and minor fix on ORTTrainer ctor (#4605) * Update ModelDescription and minor fix on ORTTrainer/ORTTrainerOptions This PR keeps the public API intact, but changes how model description is stored on the backend Currently, users creates a dict with two lists of tuples. One list called 'inputs' and each tuple has the following format tuple(name, shape). The second list is called 'outputs' and each tuple can be either tuple(name, shape) or tuple(name, shape, is_loss). With this PR, when this dict is passed in to ORTTrainer, it is fully validated as usual. However, tuples are internally replaced by namedtuples and all output tuples will have tuple(name, shape, is_loss) format instead of is_loss being optionally present. Additionally to that normalization in the internal representation (which eases coding), two internal methods were created to replace a namedtuple(name, shape) to namedtuple(name, shape, dtype) or namedtuple(name, shape, is_loss, dtype) dependeing whether the tuple is an input or output. This is necessary as ORTTRainer finds out data types of each input/output during model export to onnx. Finally, a minor fix was done on ORTTrainer. It could initialize ORTTrainerOptions incorrectly when options=None * Rename input name for test * Add ONNX Model Export to New Frontend (#4612) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Create training session + minor improvements (#4668) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Save ONNX model in file (#4671) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add eval step (#4674) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add train_step (#4677) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add LR Scheduler (#4694) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add deterministic compute tests (#4716) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add legacy vs experimental ORTTrainer accuracy comparison (#4727) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add Mixed precision/LossScaler + several fixes (#4739) Additionally to the mixed precision/loss scaler code, this PR includes: * Fix CUDA training * Add optimization_step into TrainStepInfo class * Refactor LRSCheduler to use optimization_step instead of step * Updated several default values at ORTTrainerOptions * Add initial Gradient Accumulation supported. Untested * Fix ONNX model post processing * Refactor unit tests * Add ONNX BERT example + minor fixes (#4757) * Fix training issue when passing ONNX file into ORTTrainer Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add Dynamic Shape support (#4758) * Update DeepSpeed Zero Stage option to a separate option group (#4772) * Add support to fetches (#4777) * Add Gradient Accumulation Steps support (#4793) * Fix Dynamic Axes feature and add unit test (#4795) * Add frozen weights test (#4807) * Move new pytorch front-end to 'experimental' namespace (#4814) * Fix build Co-authored-by: Rayan-Krishnan <rayankrishnan@live.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net>
2020-08-17 16:45:25 +00:00
)
endif()
if (onnxruntime_USE_DNNL)
2018-11-20 00:48:22 +00:00
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${DNNL_DLL_PATH} $<TARGET_FILE:onnxruntime_providers_dnnl>
$<TARGET_FILE:onnxruntime_providers_shared>
2018-11-20 00:48:22 +00:00
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
if (onnxruntime_USE_TENSORRT)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${DNNL_DLL_PATH} $<TARGET_FILE:onnxruntime_providers_tensorrt>
$<TARGET_FILE:onnxruntime_providers_shared>
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
if (onnxruntime_USE_NGRAPH)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${ngraph_LIBRARIES}/${NGRAPH_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_CODEGEN_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_CPU_BACKEND_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_IOMP5MD_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_MKLDNN_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_MKLML_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_TBB_SHARED_LIB}
${ngraph_LIBRARIES}/${NGRAPH_TBB_SHARED_LIB_2}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
OpenVINO EP v2.0 (#3585) * Added FP16 transformations * Revert "Added CMAKE_BUILD_TYPE to make building dynamic" This reverts commit d3e17af1af655cfdc4d2fec33f52055caa525e85. * Added FP16 transformations for FP16 builds * Backend logic cleanup Cleans the backend(intel_graph.*) code in the following ways:- 1. Minimize global usage: Since all the IR graphs need to be re-generated on every Infer, it is bad practice to rely on globals for their saving and usage as there would be multiple readers and writers to the same global variable leading to incorrect usages or contentions. This change replaces globals with locals where possible. This change also fixes an existing bug with due to incorrect global usage. 2. Remove all unused functions. 3. Remove all unused headers and prepocessor directives. * removed commented out code * Disabled default optimization for Intel EP Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fix missed plugins.xml for python bindings * Fixed the build after latest master changes Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled unsupported ops for accelerators Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added some more disabled ops Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added environment variable to enable debugging Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added more debug statements Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed unsupported ops list for GPU and VPU Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed unsqueeze unit tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added error message to the status Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Overwrite Model proto with shape info from data Overwrites the shape info of Model proto with the shape from actual input data. Needed for inferring models with Dynamic shapes. * Removed print statement and disabled where op Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Reshape with Empty initializer * Added more debug statements for 1P * Don't allow 1D inputs with symbol for dimension * Disabled some 3rd phase ops * Disabled split and added zero dimension check for OutputDefs * Cleanup zero dimensionality check * Added different data type check for inputs and initializers * Added conditions for Mod, Cast and Pad * Removed unused variable * Disabled scan and added conditions for squeeze * Added changes for fixing all C++ unit tests * Implements Backend Manager class for caching Backend Manager provides a layer of indirection between EP interface and OV backend that provides caching services for models with symbolic dims in input shapes. * clean up commented blocks * clang-formatting * Read I/O type info from ModleProto Read the tensor element type information from ModelProto object, as FusedNode is no longer available. * code cleanup * clang-formatting * Added print statement for jenkins * Disabled some python tests * Changed the path of convert fp32 to fp16 hpp * Added conditions for BatchNorm in GetCapability * Fixed failed tests * Revert "Added conditions for BatchNorm in GetCapability" This reverts commit c3c28c3b00d27892c42546b35dacdd807a48ee90. * Added Intel to onnxruntime backends * pick up vars set by OV package setupvars.sh * Added conditions for Identity * remove a few cout prints * Added conditions for GPU_FP32 unit tests * Revert "pick up vars set by OV package setupvars.sh" This reverts commit 8199e029c03eae21a1a7ef6bfdc93d00e5d0198b. * Commented out fatal message for protobuf * Might need to be removed * Add interface class for current backend * moved common logic to base class * simplified cpu backend * Removed unused headers * use vectors to save i/o tensors for windows compatibility * move utils fxns to backend_utils namespace * rename ov_backend to ibackend * Factory pattern for backend creation * rename CPU backend to Basic backend * renamed to vad-M and added to factory list * Added conditions for VPU * Added print statements * Changed the logic for checking for symbolic shapes * Modified logic for zero dimension check * Removed VPU single dimension condition * Removed comments * Modified logic in DimensionCheck method * Remove legacy OpenVINO EP Remove all the legacy code for OpenVINO EP. UEP code will take its place going forward. This change does NOT remove OVEP files in the following areas asa they will be reused by UEP:- 1. Documentation: All .md files 2. Docker releated files 3. Python bindings 4. Java bindings 5. C# bindings 6. ORT Server 7. CI pipeline setup files * Rename Intel EP to OpenVINO EP * Added unique names to the subgraphs * Removed subgraphs with only constant inputs * Modified subgraph partitioning algorithm to remove const input subgraphs * Apply suggestion to onnxruntime/core/providers/openvino/openvino_execution_provider.cc * Tracking output names to fix the output order bug * Changed output names to a unordered map * Modified logic to check for symbolic input shapes * Fixed a bug in Reshape check * Added empty model path to Model constructor * Made necessary changes to cmake to build from the binary package * Changed INTEL_CVSDK_DIR to INTEL_OPENVINO_DIR * Enable dyn device selection with C++ API * Added Round operator to unsupported list * Modified subgraph partition logic for MYRIAD * Removed supported ops from the list * Enable dyn dev selection in Py API's * Add documentation for dynamic device selection * Use MYRIAD || HDDL instead of VPU * Removed temporary cast of Int64 to FP32 * Disabled unit Tests for CPU_FP32 and GPU_FP32 * Removed default "CPU" from unit tests to allow overriding * Removed ops Concat, Squeeze, Unsqueeze from unsupported list * Get the device id from info * Removed overwriting device_id and precision * Enabled ConvTranspose and EyeLike * Reordered unsupported ops in alphabetical order * Fixed syntax error * Fixed syntax error * Code clean-up: Handle exceptions, logs and formatting Code formatted according to ORT coding guidelines. * remove debug print from pybind code * updated docs with ops and models * formatting prints * Added default values for c and j for openvino * Overriding the values set for c and j to be 1 * BACKEND_OPENVINO should be empty if openvino is not in build * Overriding c value with default for perftest * fix VAD-M device string bug * Add IE error details to exceptions * Use IE specific device names in EP * Add VAD-F (FPGA) device support * Removed unecessary libraries from whl package * Code changes for Windows compatibility * Add VAD-F option to python API * [revert before merge] cmake changes for RC * Enable Windows build in CMake * Unset macro OPTIONAL for windows builds inference_engine.hpp's include chain defines a macro 'OPTIONAL' which conflicts with onnx project's headers when using MSVC. So would need to explictly unset it for MSVC. * Use a single copy of plugin/IE::Core Defined as a static member in Backend manager * Remove restriction of single subgraphs for myriad * Passed subgraph name to Backend to enhance log statements * Disabled zero dimension conditions * Disabled concat to remove zero dims * Enabled building ngraph as part of ORT * Removed serializing and added versioning * Fix CPU_FP32 unit tests * Removed unecessary condition * add ngraph.so.0.0 to .whl * Check for zero dimensions only for inputs and outputs * Restrict loading only 10 subgraphs on myriad * Build ngraph.dll within UEP. Doesn't link yet * Rename Linux included libngraph.so to libovep_ngraph.so Renames locally built libngraph.so containing ONNX importer to libovep_ngraph.so in order to avoid linkage conflicts with libngraph.so supplied by OpenVINO binary installer. Applies only for Linux builds. * use output_name cmake properties for lib name * fix .so name format in lib_name.patch * CMake code cleanup * Rename WIN32 included ngraph.dll to ovep_ngraph.dll To avoid conflict with ngraph.dll distributed by openvino. * Added myriad config for networks without 4 dimensions * Loading the 10 max clusters for inference on myriad * Refactor code and add Batching support Encapsulate subgraph settings into context structs. Add batching support for completely supported models. * Disabled some broken tests * use input_indexes to avoid batch-checking initializers * Avoid static initialization order error on WOS * Added candy to broken tests * InternalCI changes for 2020.2 * Updated DLDT instructions * Unsaved changed in install_openvino.sh * Changes after manual check * Remove custom ngraph onnx_import build for WOS ONNX Importer on WOS does not have protobuf issue. * Remove FP32ToFP16 ngraph pass This conversion is performed implicitly within IE. * Surround debug logic by #ifndef NDEBUG * remove invalid TODO comments * removed references to ngrpah-ep * clang-formatting * remove commented code * comment edits * updating copyright year to that of first OpenVINO-EP release * remove redundant log msg * Modified operator and topology support * Update build instructions * doc formatting * Fixed clip unit tests * Revert "Remove FP32ToFP16 ngraph pass" This reverts commit ec962ca5f315a5658ad980e740196f19de2639c1. * Applying FP16 transformation only for GPU FP16 * Fixed GPU FP32 python tests * automatically use full protobuf * disable onnxrt server for now * Disabled upsample * update dockerfile instructions * Removed MO paths and added ngraph path * Remove OVEP from ORT Server docs Will put it back in after validation * Updated path to Ngraph lib * Disabled Resize and some other python tests * Removed unnecesary header files * Use commit SHA to fetch ngraph repo * Avoid un-needed file changes due to version update * Fixed clip tests * Fixed Pow, max and min onnx tests * build.md doc typo * Update cmake patch command for ngraph src * remove dead cmake code for onnxruntime_USE_OPENVINO_BINARY * use spaces instead of tab * remove commented code * Add info about protobuf version * edit debug env var and enable for WIN32 * specify only version tag of 2020.2 for dockerbuilds * remove unnecessary file changes * Pass empty string as default argument to C# tests * Use ${OPENVINO_VERSION} to name openvino install directory in CI builds * Enabled unnecessarily disabled tests * Fixed ngraph protobuf patch * Fixed error in protobuf patch * Revert "Use ${OPENVINO_VERSION} to name openvino install directory in CI builds" This reverts commit 89e72adb8bf3b9712f5c81c5e13fe68c6c0df002. * Remove unsetting OPTIONAL macro This is no longer used in recent ONNX update onnx/onnx@da13be2, so this unset workaround is no longer necessary. * Use a null string default argument for C# API * Set OpenVINO version yml files and pass to CI Docker builds Git Tag info for DLDT as well as install directory are set using this value. This reverts commit 9fa9c20348ed72ae360a95c98e9b074d2f9fafc5. * Documentation: recommendation and instructions for disabling ORT graph optimizations * more doc updates * Reduced the number of models according to CI time constraints Co-authored-by: ynimmaga <yamini.nimmagadda@intel.com> Co-authored-by: suryasidd <surya.siddharth.pemmaraju@intel.com> Co-authored-by: Mikhail Treskin <mikhail.treskin@intel.com> Co-authored-by: mbencer <mateusz.bencer@intel.com> Co-authored-by: Aravind <aravindx.gunda@intel.com> Co-authored-by: suryasidd <48925384+suryasidd@users.noreply.github.com>
2020-04-24 11:06:02 +00:00
if (onnxruntime_USE_OPENVINO)
OpenVINO EP v2.0 (#3585) * Added FP16 transformations * Revert "Added CMAKE_BUILD_TYPE to make building dynamic" This reverts commit d3e17af1af655cfdc4d2fec33f52055caa525e85. * Added FP16 transformations for FP16 builds * Backend logic cleanup Cleans the backend(intel_graph.*) code in the following ways:- 1. Minimize global usage: Since all the IR graphs need to be re-generated on every Infer, it is bad practice to rely on globals for their saving and usage as there would be multiple readers and writers to the same global variable leading to incorrect usages or contentions. This change replaces globals with locals where possible. This change also fixes an existing bug with due to incorrect global usage. 2. Remove all unused functions. 3. Remove all unused headers and prepocessor directives. * removed commented out code * Disabled default optimization for Intel EP Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fix missed plugins.xml for python bindings * Fixed the build after latest master changes Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled unsupported ops for accelerators Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added some more disabled ops Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added environment variable to enable debugging Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added more debug statements Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed unsupported ops list for GPU and VPU Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed unsqueeze unit tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added error message to the status Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Overwrite Model proto with shape info from data Overwrites the shape info of Model proto with the shape from actual input data. Needed for inferring models with Dynamic shapes. * Removed print statement and disabled where op Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Reshape with Empty initializer * Added more debug statements for 1P * Don't allow 1D inputs with symbol for dimension * Disabled some 3rd phase ops * Disabled split and added zero dimension check for OutputDefs * Cleanup zero dimensionality check * Added different data type check for inputs and initializers * Added conditions for Mod, Cast and Pad * Removed unused variable * Disabled scan and added conditions for squeeze * Added changes for fixing all C++ unit tests * Implements Backend Manager class for caching Backend Manager provides a layer of indirection between EP interface and OV backend that provides caching services for models with symbolic dims in input shapes. * clean up commented blocks * clang-formatting * Read I/O type info from ModleProto Read the tensor element type information from ModelProto object, as FusedNode is no longer available. * code cleanup * clang-formatting * Added print statement for jenkins * Disabled some python tests * Changed the path of convert fp32 to fp16 hpp * Added conditions for BatchNorm in GetCapability * Fixed failed tests * Revert "Added conditions for BatchNorm in GetCapability" This reverts commit c3c28c3b00d27892c42546b35dacdd807a48ee90. * Added Intel to onnxruntime backends * pick up vars set by OV package setupvars.sh * Added conditions for Identity * remove a few cout prints * Added conditions for GPU_FP32 unit tests * Revert "pick up vars set by OV package setupvars.sh" This reverts commit 8199e029c03eae21a1a7ef6bfdc93d00e5d0198b. * Commented out fatal message for protobuf * Might need to be removed * Add interface class for current backend * moved common logic to base class * simplified cpu backend * Removed unused headers * use vectors to save i/o tensors for windows compatibility * move utils fxns to backend_utils namespace * rename ov_backend to ibackend * Factory pattern for backend creation * rename CPU backend to Basic backend * renamed to vad-M and added to factory list * Added conditions for VPU * Added print statements * Changed the logic for checking for symbolic shapes * Modified logic for zero dimension check * Removed VPU single dimension condition * Removed comments * Modified logic in DimensionCheck method * Remove legacy OpenVINO EP Remove all the legacy code for OpenVINO EP. UEP code will take its place going forward. This change does NOT remove OVEP files in the following areas asa they will be reused by UEP:- 1. Documentation: All .md files 2. Docker releated files 3. Python bindings 4. Java bindings 5. C# bindings 6. ORT Server 7. CI pipeline setup files * Rename Intel EP to OpenVINO EP * Added unique names to the subgraphs * Removed subgraphs with only constant inputs * Modified subgraph partitioning algorithm to remove const input subgraphs * Apply suggestion to onnxruntime/core/providers/openvino/openvino_execution_provider.cc * Tracking output names to fix the output order bug * Changed output names to a unordered map * Modified logic to check for symbolic input shapes * Fixed a bug in Reshape check * Added empty model path to Model constructor * Made necessary changes to cmake to build from the binary package * Changed INTEL_CVSDK_DIR to INTEL_OPENVINO_DIR * Enable dyn device selection with C++ API * Added Round operator to unsupported list * Modified subgraph partition logic for MYRIAD * Removed supported ops from the list * Enable dyn dev selection in Py API's * Add documentation for dynamic device selection * Use MYRIAD || HDDL instead of VPU * Removed temporary cast of Int64 to FP32 * Disabled unit Tests for CPU_FP32 and GPU_FP32 * Removed default "CPU" from unit tests to allow overriding * Removed ops Concat, Squeeze, Unsqueeze from unsupported list * Get the device id from info * Removed overwriting device_id and precision * Enabled ConvTranspose and EyeLike * Reordered unsupported ops in alphabetical order * Fixed syntax error * Fixed syntax error * Code clean-up: Handle exceptions, logs and formatting Code formatted according to ORT coding guidelines. * remove debug print from pybind code * updated docs with ops and models * formatting prints * Added default values for c and j for openvino * Overriding the values set for c and j to be 1 * BACKEND_OPENVINO should be empty if openvino is not in build * Overriding c value with default for perftest * fix VAD-M device string bug * Add IE error details to exceptions * Use IE specific device names in EP * Add VAD-F (FPGA) device support * Removed unecessary libraries from whl package * Code changes for Windows compatibility * Add VAD-F option to python API * [revert before merge] cmake changes for RC * Enable Windows build in CMake * Unset macro OPTIONAL for windows builds inference_engine.hpp's include chain defines a macro 'OPTIONAL' which conflicts with onnx project's headers when using MSVC. So would need to explictly unset it for MSVC. * Use a single copy of plugin/IE::Core Defined as a static member in Backend manager * Remove restriction of single subgraphs for myriad * Passed subgraph name to Backend to enhance log statements * Disabled zero dimension conditions * Disabled concat to remove zero dims * Enabled building ngraph as part of ORT * Removed serializing and added versioning * Fix CPU_FP32 unit tests * Removed unecessary condition * add ngraph.so.0.0 to .whl * Check for zero dimensions only for inputs and outputs * Restrict loading only 10 subgraphs on myriad * Build ngraph.dll within UEP. Doesn't link yet * Rename Linux included libngraph.so to libovep_ngraph.so Renames locally built libngraph.so containing ONNX importer to libovep_ngraph.so in order to avoid linkage conflicts with libngraph.so supplied by OpenVINO binary installer. Applies only for Linux builds. * use output_name cmake properties for lib name * fix .so name format in lib_name.patch * CMake code cleanup * Rename WIN32 included ngraph.dll to ovep_ngraph.dll To avoid conflict with ngraph.dll distributed by openvino. * Added myriad config for networks without 4 dimensions * Loading the 10 max clusters for inference on myriad * Refactor code and add Batching support Encapsulate subgraph settings into context structs. Add batching support for completely supported models. * Disabled some broken tests * use input_indexes to avoid batch-checking initializers * Avoid static initialization order error on WOS * Added candy to broken tests * InternalCI changes for 2020.2 * Updated DLDT instructions * Unsaved changed in install_openvino.sh * Changes after manual check * Remove custom ngraph onnx_import build for WOS ONNX Importer on WOS does not have protobuf issue. * Remove FP32ToFP16 ngraph pass This conversion is performed implicitly within IE. * Surround debug logic by #ifndef NDEBUG * remove invalid TODO comments * removed references to ngrpah-ep * clang-formatting * remove commented code * comment edits * updating copyright year to that of first OpenVINO-EP release * remove redundant log msg * Modified operator and topology support * Update build instructions * doc formatting * Fixed clip unit tests * Revert "Remove FP32ToFP16 ngraph pass" This reverts commit ec962ca5f315a5658ad980e740196f19de2639c1. * Applying FP16 transformation only for GPU FP16 * Fixed GPU FP32 python tests * automatically use full protobuf * disable onnxrt server for now * Disabled upsample * update dockerfile instructions * Removed MO paths and added ngraph path * Remove OVEP from ORT Server docs Will put it back in after validation * Updated path to Ngraph lib * Disabled Resize and some other python tests * Removed unnecesary header files * Use commit SHA to fetch ngraph repo * Avoid un-needed file changes due to version update * Fixed clip tests * Fixed Pow, max and min onnx tests * build.md doc typo * Update cmake patch command for ngraph src * remove dead cmake code for onnxruntime_USE_OPENVINO_BINARY * use spaces instead of tab * remove commented code * Add info about protobuf version * edit debug env var and enable for WIN32 * specify only version tag of 2020.2 for dockerbuilds * remove unnecessary file changes * Pass empty string as default argument to C# tests * Use ${OPENVINO_VERSION} to name openvino install directory in CI builds * Enabled unnecessarily disabled tests * Fixed ngraph protobuf patch * Fixed error in protobuf patch * Revert "Use ${OPENVINO_VERSION} to name openvino install directory in CI builds" This reverts commit 89e72adb8bf3b9712f5c81c5e13fe68c6c0df002. * Remove unsetting OPTIONAL macro This is no longer used in recent ONNX update onnx/onnx@da13be2, so this unset workaround is no longer necessary. * Use a null string default argument for C# API * Set OpenVINO version yml files and pass to CI Docker builds Git Tag info for DLDT as well as install directory are set using this value. This reverts commit 9fa9c20348ed72ae360a95c98e9b074d2f9fafc5. * Documentation: recommendation and instructions for disabling ORT graph optimizations * more doc updates * Reduced the number of models according to CI time constraints Co-authored-by: ynimmaga <yamini.nimmagadda@intel.com> Co-authored-by: suryasidd <surya.siddharth.pemmaraju@intel.com> Co-authored-by: Mikhail Treskin <mikhail.treskin@intel.com> Co-authored-by: mbencer <mateusz.bencer@intel.com> Co-authored-by: Aravind <aravindx.gunda@intel.com> Co-authored-by: suryasidd <48925384+suryasidd@users.noreply.github.com>
2020-04-24 11:06:02 +00:00
if(NOT WIN32)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${ngraph_LIBRARIES}/${NGRAPH_SHARED_LIB}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
endif()
2018-11-20 00:48:22 +00:00
if (onnxruntime_USE_TVM)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
$<TARGET_FILE:tvm> $<TARGET_FILE:nnvm_compiler>
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
if (onnxruntime_USE_MKLML)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${MKLML_LIB_DIR}/${MKLML_SHARED_LIB} ${MKLML_LIB_DIR}/${IOMP5MD_SHARED_LIB}
2018-11-20 00:48:22 +00:00
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/capi/
)
endif()
if (onnxruntime_USE_NUPHAR)
file(GLOB onnxruntime_python_nuphar_python_srcs CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/core/providers/nuphar/scripts/*"
)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory $<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/nuphar
COMMAND ${CMAKE_COMMAND} -E copy
${onnxruntime_python_nuphar_python_srcs}
$<TARGET_FILE_DIR:${test_data_target}>/onnxruntime/nuphar/
)
endif()
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
2020-03-11 21:25:37 +00:00
include(onnxruntime_language_interop_ops.cmake)
endif()