onnxruntime/setup.py

415 lines
16 KiB
Python
Raw Normal View History

2018-11-20 00:48:22 +00:00
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from setuptools import setup, find_packages, Extension
from distutils import log as logger
from distutils.command.build_ext import build_ext as _build_ext
from glob import glob
from os import path, getcwd, environ, remove, walk, makedirs, listdir
from shutil import copyfile, copytree, rmtree
2018-11-20 00:48:22 +00:00
import platform
import subprocess
2018-11-20 00:48:22 +00:00
import sys
import datetime
2018-11-20 00:48:22 +00:00
nightly_build = False
featurizers_build = False
2018-11-20 00:48:22 +00:00
package_name = 'onnxruntime'
wheel_name_suffix = None
Initial commit for OpenVINO Execution Provider (#935) * Initial commit for OpenVINO Execution Provider OpenVINO Execution Provider provides the interface for ONNX Runtime applications to access Intel's hardware accelerators using Intel's OpenVINO Toolkit. * Fixed bug in GetCapability to disable custom ops Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added OPENVINO ci pipeline Added new pipeline for openvino provider, made changes to support the docker build and onnxruntime build with openvino. Signed-off-by: Luis Daniel Castellanos <luis.daniel.castellanos@intel.com> * Enabled all unit tests for OpenVINO EP Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed syntax issue in run_docker_build.sh file * Added missing default OPENVINO_VERSION Default value for OPENVINO_VERSION env was missing causing the build to fail * Added install Model Optimizer deps step * Fixed python unit tests and some tests from onnx_backend_test_series Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Fixed indentation bug Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled some of the python backend tests for OpenVINO Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled some model tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Remove Duplicate checks for openvino in build.py Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Modified GetCapability for FP16 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled GPU FP32 tests that are not supported Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Convert modelProto to string and use it in compile Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Pass byte-array input args to MO * Serialized ModelProto passed in-memory to MO ModelOptimizer python module receives the serialized ModelProto in-memory. Uses appropriate ONNX function to load the serialized bytes. * Make Py_Finalize compatible with older python versions Also, remove pFunc unassigned variable possibility. * Fallback if input dims of Matmul is greater than 2 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * fixup: Device #define syntax * Updated the documentation Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Enable dynamic dim value * removed commented out code * Added Dockerfile for openvino EP Updated instructions on dockerfiles/README.md file Signed-off-by: Luis Daniel Castellanos <luis.daniel.castellanos@intel.com> * Disabled fp16_inception_v1 test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Code formatting with clang-format Uses style from the .clang-format file in root directory. * fixup: docker tag and build error fixes * Heuristics to automatically detect batching Distributes slices from batch into parallel infer-request objects. * Handle disabled tests in GetCapability Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled average pool and max pool if ceil_mode is 1 Also dilations are not supported if they are greater than 1 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Unsqueeze int32 test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * changes to fix output results bug * Disabled a few C++ unit tests for MYRIAD FP16 Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Manually revert '9fe162bb Enable dynamic dim value' Reverts compile time setting of dynamic shape Reverting manually due to significantly huge auto-revert conflicts. * Fixed unused variable warning Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Mul test for GPU_FP16 due to accuracy issue Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * VPU documentation update * Disabled inception_v1 for MYRIAD and HDDL *Also disabled few C++ accuracy tests for HDDL Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * updates from upstream * use the new CustomOpApis for I/O interfacing * Pass initializers as subgraph meta-def inputs in GetCapability() Requirement due to API changes introduced with PR# 1019. * Remove obsolete functions * Save indexes of graph inputs from fused_node info Both inputs and initializers are passed as data inputs to the infer function. To identify only inputs among them, save thier index info from fused_node in Compile function. * Documentation changes to enable VPU * Fix VPU related changes in documentation * Fix minor changes in documentation * Fix VPU related changes in documentation * Use Node.In/OutputDefs() to track graph inputs and outputs. Don't use graph_viewer's GetInputs() or GetInputsIncludingInitializers(). * Permit "SAME_UPPER" auto_pad attribute from MaxPool * Disabled fp16_tiny_yolov2 in onnx model tests * Updated documentation to include configuration guides for myriad and hddl Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Use 8 Infer requests only for VAD-R * disable debug prints * Clang-format source files * Updated BUILD.md with OpenVINO R5 links Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled same upper python tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Update test exclusion syntax * Change path of install_onnx.sh Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disable tiny_yolov2 in broken tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Revert "Change path of install_onnx.sh" This reverts commit ba9db165f3be430f2aff1ef413299ed04637196a. This change is only required for Intel internal CI pipeline until the settings are matched with the upstream's CI pipeline. * Added debug statements for debugging CI error Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Add --build_wheel to linux openvino pipeline Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added -v option to onnx_test_runner for debugging Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed path change patch Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Added -c 1 to onnx_test_runner Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Refactor MO python invocation in separate function Cleans up Model Optimizer python invocation check and conversion logic. Invokes MO only once in GetCapability() and passes the IR strings (xml and bin) to the Compiler as meta-def attributes. * Add comments * code cleanup and comments * Code cleanup for GetCapability Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed unnecessary files Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Revert "Added -v option to onnx_test_runner for debugging" This reverts commit d1dd70938a94d648df1a1dbbc2e48d0b97e49ec8. * Revert "Added debug statements for debugging CI error" This reverts commit b86d41afed2aa29c3508155d6f9c8d3a7263cc60. * incorporate Status Code changes * ComputeFunc returns Status::OK() on success * Use test names to disable tests for MYRIAD and VAD-R Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Rename local identifiers from CNNNetwork to OpenVINO network CNNNetwork is an OpenVINO's API class that represents more than just convolutional neural networks (CNNs). Renaming helps to avoid confusion that the API's only support CNN type models. * Added error message if building on windows * Removed duplicate option in Cmake * Removed unnecessary parameters in activation_opt_test Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Refactor Map search and access logic for efficiently and cleanliness. * use C++ style casts * Use os.path.join for python directory path operations * use C++ style casts * EP classes should use onnxruntime namespace * Clean up fixes from PR comments * Don't explicitly shutdown Py interpreter * Remove debug print statements Prints will be re-enabled later with a logging mechanism with debug/verbose printing options. * Decrement ref counts for used pyObjects * Restore build instructions for other compilers Content under the "Using other compilers" section has been accidentally deleted by a previous commit. Restoring back that content from the latest upstream repo. * CMake code cleanup Code clean up, commenting and formatting of CMake code. * Don't pass the unused device_info parameter to OpenVINOGraph ctor. * Add support for multiple I/O data types Adds support for the following tensor data types for graph inputs and outputs: 1) float 2) float16 3) int32 4) int16 5) int8 6) uint16 7) uint8 * cleanup setup.py module list definition * Deduce index of input using tracked input index map Ignores initializers in case they are ordered before inputs. * Removed debug statement in MO code Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * PR feedback * Removed per_sample_tolerance for openvino * Removed unnecessary disabled tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed debug function Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled tiny_yolo_v2 due to accuracy issues Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Changed the disabled reason for broken tests Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Disabled Reshape with no input Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Python formatting with Autopep8 * Minor fix for MYRIAD devices * Added zero dimension check *Removed setting batch size for the network Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Set the threshold to larger value for MNIST Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Removed setting higher threshold in provider_test_utils Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com> * Check for --use_openvino in python wheel setup.py Add openvino modules to the setup script for building the wheel package only for --use_openvino a build option. * Removed nullptr checks for GetNode() Signed-off-by: suryasidd <surya.siddharth.pemmaraju@intel.com>
2019-06-18 15:58:53 +00:00
def parse_arg_remove_boolean(argv, arg_name):
arg_value = False
if arg_name in sys.argv:
arg_value = True
argv.remove(arg_name)
return arg_value
def parse_arg_remove_string(argv, arg_name_equal):
arg_value = None
for arg in sys.argv[1:]:
if arg.startswith(arg_name_equal):
arg_value = arg[len(arg_name_equal):]
sys.argv.remove(arg)
break
return arg_value
# Any combination of the following arguments can be applied
featurizers_build = parse_arg_remove_boolean(sys.argv, '--use_featurizers')
if parse_arg_remove_boolean(sys.argv, '--nightly_build'):
package_name = 'ort-nightly'
nightly_build = True
wheel_name_suffix = parse_arg_remove_string(sys.argv, '--wheel_name_suffix=')
cuda_version = None
rocm_version = None
# The following arguments are mutually exclusive
if parse_arg_remove_boolean(sys.argv, '--use_tensorrt'):
package_name = 'onnxruntime-gpu-tensorrt' if not nightly_build else 'ort-trt-nightly'
elif parse_arg_remove_boolean(sys.argv, '--use_cuda'):
package_name = 'onnxruntime-gpu' if not nightly_build else 'ort-gpu-nightly'
cuda_version = parse_arg_remove_string(sys.argv, '--cuda_version=')
elif parse_arg_remove_boolean(sys.argv, '--use_rocm'):
package_name = 'onnxruntime-rocm' if not nightly_build else 'ort-rocm-nightly'
rocm_version = parse_arg_remove_string(sys.argv, '--rocm_version=')
elif parse_arg_remove_boolean(sys.argv, '--use_openvino'):
package_name = 'onnxruntime-openvino'
elif parse_arg_remove_boolean(sys.argv, '--use_dnnl'):
package_name = 'onnxruntime-dnnl'
elif parse_arg_remove_boolean(sys.argv, '--use_nuphar'):
package_name = 'onnxruntime-nuphar'
elif parse_arg_remove_boolean(sys.argv, '--use_vitisai'):
package_name = 'onnxruntime-vitisai'
elif parse_arg_remove_boolean(sys.argv, '--use_acl'):
package_name = 'onnxruntime-acl'
elif parse_arg_remove_boolean(sys.argv, '--use_armnn'):
package_name = 'onnxruntime-armnn'
elif parse_arg_remove_boolean(sys.argv, '--use_dml'):
package_name = 'onnxruntime-dml'
# PEP 513 defined manylinux1_x86_64 and manylinux1_i686
# PEP 571 defined manylinux2010_x86_64 and manylinux2010_i686
# PEP 599 defines the following platform tags:
# manylinux2014_x86_64
# manylinux2014_i686
# manylinux2014_aarch64
# manylinux2014_armv7l
# manylinux2014_ppc64
# manylinux2014_ppc64le
# manylinux2014_s390x
manylinux_tags = [
'manylinux1_x86_64',
'manylinux1_i686',
'manylinux2010_x86_64',
'manylinux2010_i686',
'manylinux2014_x86_64',
'manylinux2014_i686',
'manylinux2014_aarch64',
'manylinux2014_armv7l',
'manylinux2014_ppc64',
'manylinux2014_ppc64le',
'manylinux2014_s390x',
]
is_manylinux = environ.get('AUDITWHEEL_PLAT', None) in manylinux_tags
class build_ext(_build_ext):
def build_extension(self, ext):
dest_file = self.get_ext_fullpath(ext.name)
logger.info('copying %s -> %s', ext.sources[0], dest_file)
copyfile(ext.sources[0], dest_file)
2018-11-20 00:48:22 +00:00
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
if not is_manylinux:
self.root_is_pure = False
def _rewrite_ld_preload(self, to_preload):
with open('onnxruntime/capi/_ld_preload.py', 'rt') as f:
ld_preload = f.read().splitlines()
with open('onnxruntime/capi/_ld_preload.py', 'wt') as f:
for line in ld_preload:
f.write(line)
f.write('\n')
if 'LD_PRELOAD_BEGIN_MARK' in line:
break
if len(to_preload) > 0:
f.write('from ctypes import CDLL, RTLD_GLOBAL\n')
for library in to_preload:
f.write('_{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split('.')[0], library))
def run(self):
if is_manylinux:
source = 'onnxruntime/capi/onnxruntime_pybind11_state.so'
2019-10-09 16:23:00 +00:00
dest = 'onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so'
logger.info('copying %s -> %s', source, dest)
copyfile(source, dest)
result = subprocess.run(['patchelf', '--print-needed', dest], check=True, stdout=subprocess.PIPE, universal_newlines=True)
cuda_dependencies = ['libcublas.so', 'libcudnn.so', 'libcudart.so', 'libcurand.so', 'libcufft.so', 'libnvToolsExt.so']
cuda_dependencies.extend(['librccl.so', 'libamdhip64.so', 'librocblas.so', 'libMIOpen.so', 'libhsa-runtime64.so', 'libhsakmt.so'])
to_preload = []
args = ['patchelf', '--debug']
for line in result.stdout.split('\n'):
for dependency in cuda_dependencies:
if dependency in line:
to_preload.append(line)
args.extend(['--remove-needed', line])
args.append(dest)
if len(to_preload) > 0:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
self._rewrite_ld_preload(to_preload)
_bdist_wheel.run(self)
if is_manylinux:
file = glob(path.join(self.dist_dir, '*linux*.whl'))[0]
2019-10-09 16:23:00 +00:00
logger.info('repairing %s for manylinux1', file)
try:
subprocess.run(['auditwheel', 'repair', '-w', self.dist_dir, file], check=True, stdout=subprocess.PIPE)
finally:
logger.info('removing %s', file)
remove(file)
except ImportError as error:
print("Error importing dependencies:")
print(error)
2018-11-20 00:48:22 +00:00
bdist_wheel = None
# Additional binaries
2018-11-29 04:01:21 +00:00
if platform.system() == 'Linux':
libs = ['onnxruntime_pybind11_state.so', 'libdnnl.so.2', 'libmklml_intel.so', 'libmklml_gnu.so', 'libiomp5.so', 'mimalloc.so']
# DNNL, TensorRT & OpenVINO EPs are built as shared libs
libs.extend(['libonnxruntime_providers_shared.so'])
libs.extend(['libonnxruntime_providers_dnnl.so'])
libs.extend(['libonnxruntime_providers_tensorrt.so'])
libs.extend(['libonnxruntime_providers_openvino.so'])
# Nuphar Libs
libs.extend(['libtvm.so.0.5.1'])
if nightly_build:
libs.extend(['libonnxruntime_pywrapper.so'])
2018-11-29 04:01:21 +00:00
elif platform.system() == "Darwin":
libs = ['onnxruntime_pybind11_state.so', 'libdnnl.2.dylib', 'mimalloc.so'] # TODO add libmklml and libiomp5 later.
# DNNL & TensorRT EPs are built as shared libs
libs.extend(['libonnxruntime_providers_shared.dylib'])
libs.extend(['libonnxruntime_providers_dnnl.dylib'])
libs.extend(['libonnxruntime_providers_tensorrt.dylib'])
if nightly_build:
libs.extend(['libonnxruntime_pywrapper.dylib'])
2018-11-20 00:48:22 +00:00
else:
libs = ['onnxruntime_pybind11_state.pyd', 'dnnl.dll', 'mklml.dll', 'libiomp5md.dll']
# DNNL, TensorRT & OpenVINO EPs are built as shared libs
libs.extend(['onnxruntime_providers_shared.dll'])
libs.extend(['onnxruntime_providers_dnnl.dll'])
libs.extend(['onnxruntime_providers_tensorrt.dll'])
libs.extend(['onnxruntime_providers_openvino.dll'])
# DirectML Libs
libs.extend(['DirectML.dll'])
# Nuphar Libs
libs.extend(['tvm.dll'])
if nightly_build:
libs.extend(['onnxruntime_pywrapper.dll'])
2018-11-20 00:48:22 +00:00
if is_manylinux:
data = ['capi/libonnxruntime_pywrapper.so'] if nightly_build else []
ext_modules = [
Extension(
'onnxruntime.capi.onnxruntime_pybind11_state',
2019-10-09 16:23:00 +00:00
['onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so'],
),
]
else:
data = [path.join('capi', x) for x in libs if path.isfile(path.join('onnxruntime', 'capi', x))]
ext_modules = []
2018-11-20 00:48:22 +00:00
# Additional examples
examples_names = ["mul_1.onnx", "logreg_iris.onnx", "sigmoid.onnx"]
2018-11-20 00:48:22 +00:00
examples = [path.join('datasets', x) for x in examples_names]
# Extra files such as EULA and ThirdPartyNotices
extra = ["LICENSE", "ThirdPartyNotices.txt", "Privacy.md"]
2018-11-20 00:48:22 +00:00
# Description
README = path.join(getcwd(), "docs/python/README.rst")
if not path.exists(README):
this = path.dirname(__file__)
README = path.join(this, "docs/python/README.rst")
if not path.exists(README):
raise FileNotFoundError("Unable to find 'README.rst'")
with open(README) as f:
long_description = f.read()
packages = [
'onnxruntime',
'onnxruntime.backend',
'onnxruntime.capi',
'onnxruntime.capi.training',
'onnxruntime.datasets',
'onnxruntime.tools',
'onnxruntime.tools.ort_format_model',
'onnxruntime.tools.ort_format_model.ort_flatbuffers_py',
'onnxruntime.tools.ort_format_model.ort_flatbuffers_py.experimental',
'onnxruntime.tools.ort_format_model.ort_flatbuffers_py.experimental.fbs',
'onnxruntime.quantization',
'onnxruntime.quantization.operators',
'onnxruntime.quantization.CalTableFlatBuffers',
'onnxruntime.transformers',
'onnxruntime.transformers.longformer',
]
requirements_file = "requirements.txt"
local_version = None
enable_training = parse_arg_remove_boolean(sys.argv, '--enable_training')
if enable_training:
packages.extend(['onnxruntime.training',
'onnxruntime.training.amp',
'onnxruntime.training.optim',
'onnxruntime.training.ortmodule'])
requirements_file = "requirements-training.txt"
# with training, we want to follow this naming convention:
# stable:
# onnxruntime-training-1.7.0+cu111-cp36-cp36m-linux_x86_64.whl
# nightly:
# onnxruntime-training-1.7.0.dev20210408+cu111-cp36-cp36m-linux_x86_64.whl
# this is needed immediately by pytorch/ort so that the user is able to
# install an onnxruntime training package with matching torch cuda version.
package_name = 'onnxruntime-training'
2021-04-14 03:36:24 +00:00
if cuda_version:
# removing '.' to make Cuda version number in the same form as Pytorch.
cuda_version = cuda_version.replace('.', '')
local_version = '+cu' + cuda_version
if rocm_version:
# removing '.' to make Cuda version number in the same form as Pytorch.
rocm_version = rocm_version.replace('.', '')
local_version = '+rocm' + rocm_version
Add new PytTrch front-end (#4815) * Add ORTTrainerOptions class for the new pytorch frontend (#4382) Add ORTTrainerOptions class and some placeholders * Add _ORTTrainerModelDesc to perform validation for model description (#4416) * Add Loss Scaler classes to the new frontend (#4306) * Add TrainStepInfo used on the new frontend API (#4256) * Add Optimizer classes to the new frontend (#4280) * Add LRScheduler implementation (#4357) * Add basic ORTTrainer API (#4435) This PR presents the public API for ORTTrainer for the short term development. It also validates and saves input parameters, which will be used in the next stages, such as building ONNX model, post processing the model and configuring the training session * Add opset_version into ORTTrainerOptions and change type of ORTTrainer.loss_fn (#4592) * Update ModelDescription and minor fix on ORTTrainer ctor (#4605) * Update ModelDescription and minor fix on ORTTrainer/ORTTrainerOptions This PR keeps the public API intact, but changes how model description is stored on the backend Currently, users creates a dict with two lists of tuples. One list called 'inputs' and each tuple has the following format tuple(name, shape). The second list is called 'outputs' and each tuple can be either tuple(name, shape) or tuple(name, shape, is_loss). With this PR, when this dict is passed in to ORTTrainer, it is fully validated as usual. However, tuples are internally replaced by namedtuples and all output tuples will have tuple(name, shape, is_loss) format instead of is_loss being optionally present. Additionally to that normalization in the internal representation (which eases coding), two internal methods were created to replace a namedtuple(name, shape) to namedtuple(name, shape, dtype) or namedtuple(name, shape, is_loss, dtype) dependeing whether the tuple is an input or output. This is necessary as ORTTRainer finds out data types of each input/output during model export to onnx. Finally, a minor fix was done on ORTTrainer. It could initialize ORTTrainerOptions incorrectly when options=None * Rename input name for test * Add ONNX Model Export to New Frontend (#4612) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Create training session + minor improvements (#4668) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Save ONNX model in file (#4671) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add eval step (#4674) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add train_step (#4677) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add LR Scheduler (#4694) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add deterministic compute tests (#4716) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add legacy vs experimental ORTTrainer accuracy comparison (#4727) Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> * Add Mixed precision/LossScaler + several fixes (#4739) Additionally to the mixed precision/loss scaler code, this PR includes: * Fix CUDA training * Add optimization_step into TrainStepInfo class * Refactor LRSCheduler to use optimization_step instead of step * Updated several default values at ORTTrainerOptions * Add initial Gradient Accumulation supported. Untested * Fix ONNX model post processing * Refactor unit tests * Add ONNX BERT example + minor fixes (#4757) * Fix training issue when passing ONNX file into ORTTrainer Co-authored-by: Thiago Crepaldi <thiago.crepaldi@microsoft.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net> * Add Dynamic Shape support (#4758) * Update DeepSpeed Zero Stage option to a separate option group (#4772) * Add support to fetches (#4777) * Add Gradient Accumulation Steps support (#4793) * Fix Dynamic Axes feature and add unit test (#4795) * Add frozen weights test (#4807) * Move new pytorch front-end to 'experimental' namespace (#4814) * Fix build Co-authored-by: Rayan-Krishnan <rayankrishnan@live.com> Co-authored-by: Rayan Krishnan <t-rakr@OrtDevTest2v100.af05slrtruoetgaxwwjv5nsq5e.px.internal.cloudapp.net>
2020-08-17 16:45:25 +00:00
package_data = {}
data_files = []
if package_name == 'onnxruntime-nuphar':
packages += ["onnxruntime.nuphar"]
extra += [path.join('nuphar', 'NUPHAR_CACHE_VERSION')]
if featurizers_build:
# Copy the featurizer data from its current directory into the onnx runtime directory so that the
# content can be included as module data.
# Apparently, the root_dir is different based on how the script is invoked
source_root_dir = None
dest_root_dir = None
for potential_source_prefix, potential_dest_prefix in [
(getcwd(), getcwd()),
(path.dirname(__file__), path.dirname(__file__)),
(path.join(getcwd(), ".."), getcwd()),
]:
potential_dir = path.join(potential_source_prefix, "external", "FeaturizersLibrary", "Data")
if path.isdir(potential_dir):
source_root_dir = potential_source_prefix
dest_root_dir = potential_dest_prefix
break
if source_root_dir is None:
raise Exception("Unable to find the build root dir")
assert dest_root_dir is not None
featurizer_source_dir = path.join(source_root_dir, "external", "FeaturizersLibrary", "Data")
assert path.isdir(featurizer_source_dir), featurizer_source_dir
featurizer_dest_dir = path.join(dest_root_dir, "onnxruntime", "FeaturizersLibrary", "Data")
if path.isdir(featurizer_dest_dir):
rmtree(featurizer_dest_dir)
for item in listdir(featurizer_source_dir):
this_featurizer_source_fullpath = path.join(featurizer_source_dir)
assert path.isdir(this_featurizer_source_fullpath), this_featurizer_source_fullpath
copytree(this_featurizer_source_fullpath, featurizer_dest_dir)
packages.append("onnxruntime.FeaturizersLibrary.Data.{}".format(item))
package_data[packages[-1]] = listdir(path.join(featurizer_dest_dir, item))
package_data["onnxruntime"] = data + examples + extra
version_number = ''
with open('VERSION_NUMBER') as f:
version_number = f.readline().strip()
if nightly_build:
# https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables
build_suffix = environ.get('BUILD_BUILDNUMBER')
if build_suffix is None:
# The following line is only for local testing
build_suffix = str(datetime.datetime.now().date().strftime("%Y%m%d"))
2020-01-02 23:43:40 +00:00
else:
build_suffix = build_suffix.replace('.', '')
if enable_training:
from packaging import version
from packaging.version import Version
# with training package, we need to bump up version minor number so that
# nightly releases take precedence over the latest release when --pre is used during pip install.
# eventually this shall be the behavior of all onnxruntime releases.
# alternatively we may bump up version number right after every release.
ort_version = version.parse(version_number)
if isinstance(ort_version, Version):
version_number = '{major}.{minor}.{macro}'.format(
major=ort_version.major,
minor=ort_version.minor + 1,
macro=ort_version.micro)
version_number = version_number + ".dev" + build_suffix
if local_version:
version_number = version_number + local_version
if wheel_name_suffix:
package_name = "{}_{}".format(package_name, wheel_name_suffix)
2018-11-20 00:48:22 +00:00
cmd_classes = {}
if bdist_wheel is not None:
cmd_classes['bdist_wheel'] = bdist_wheel
cmd_classes['build_ext'] = build_ext
requirements_path = path.join(getcwd(), requirements_file)
if not path.exists(requirements_path):
this = path.dirname(__file__)
requirements_path = path.join(this, requirements_file)
if not path.exists(requirements_path):
raise FileNotFoundError("Unable to find " + requirements_file)
with open(requirements_path) as f:
install_requires = f.read().splitlines()
2018-11-20 00:48:22 +00:00
# Setup
setup(
name=package_name,
version=version_number,
description='ONNX Runtime is a runtime accelerator for Machine Learning models',
2018-11-20 00:48:22 +00:00
long_description=long_description,
author='Microsoft Corporation',
author_email='onnxruntime@microsoft.com',
cmdclass=cmd_classes,
2018-11-20 00:48:22 +00:00
license="MIT License",
packages=packages,
ext_modules=ext_modules,
package_data=package_data,
url="https://onnxruntime.ai",
download_url='https://github.com/microsoft/onnxruntime/tags',
data_files=data_files,
install_requires=install_requires,
keywords='onnx machine learning',
2018-11-20 00:48:22 +00:00
entry_points= {
'console_scripts': [
'onnxruntime_test = onnxruntime.tools.onnxruntime_test:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
2018-11-20 00:48:22 +00:00
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
2018-11-20 00:48:22 +00:00
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
2018-11-20 00:48:22 +00:00
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
2021-02-13 00:49:51 +00:00
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'],
2018-11-20 00:48:22 +00:00
)