ONNX 1.15 integration (#17125)

### Description
this is for ORT 1.17.0 - make ORT to use ONNX release 1.15.0 branch. Eventually will update to the release tag once ONNX 1.15.0 is released


### Motivation and Context
Prepare for ORT 1.17.0 release. People can start work on new and updated ONNX ops in ORT.
---------

Signed-off-by: Liqun Fu <liqfu@microsoft.com>
This commit is contained in:
liqun Fu 2023-09-26 14:44:48 -07:00 committed by GitHub
parent 37dcefb5b7
commit 2be4dc6d04
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 278 additions and 245 deletions

View file

@ -90,55 +90,6 @@ def add_github_dep(name, parsed_url):
git_deps[dep] = name
with open(
os.path.join(REPO_DIR, "tools", "ci_build", "github", "linux", "docker", "Dockerfile.manylinux2_28_cuda11"),
) as f:
for line in f:
if not line.strip():
package_name = None
package_filename = None
package_url = None
if package_filename is None:
m = re.match(r"RUN\s+export\s+(.+?)_ROOT=(\S+).*", line)
if m is not None:
package_name = m.group(1)
package_filename = m.group(2)
else:
m = re.match(r"RUN\s+export\s+(.+?)_VERSION=(\S+).*", line)
if m is not None:
package_name = m.group(1)
package_filename = m.group(2)
elif package_url is None:
m = re.match(r"(.+?)_DOWNLOAD_URL=(\S+)", line)
if m is not None:
package_url = m.group(2)
if package_name == "LIBXCRYPT":
package_url = m.group(2) + "/v" + package_filename + ".tar.gz"
elif package_name == "CMAKE":
package_url = m.group(2) + "/v" + package_filename + "/cmake-" + package_filename + ".tar.gz"
else:
package_url = m.group(2) + "/" + package_filename + ".tar.gz"
parsed_url = urlparse(package_url)
if parsed_url.hostname == "github.com":
add_github_dep("manylinux dependency " + package_name, parsed_url)
else:
registration = {
"Component": {
"Type": "other",
"other": {
"Name": package_name.lower(),
"Version": package_filename.split("-")[-1],
"DownloadUrl": package_url,
},
"comments": "manylinux dependency",
}
}
registrations.append(registration)
package_name = None
package_filename = None
package_url = None
def normalize_path_separators(path):
return path.replace(os.path.sep, "/")

View file

@ -2,112 +2,6 @@
"$schema": "https://json.schemastore.org/component-detection-manifest.json",
"Version": 1,
"Registrations": [
{
"Component": {
"Type": "other",
"other": {
"Name": "autoconf",
"Version": "2.71",
"DownloadUrl": "http://ftp.gnu.org/gnu/autoconf/autoconf-2.71.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"Component": {
"Type": "other",
"other": {
"Name": "automake",
"Version": "1.16.5",
"DownloadUrl": "http://ftp.gnu.org/gnu/automake/automake-1.16.5.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"Component": {
"Type": "other",
"other": {
"Name": "libtool",
"Version": "2.4.7",
"DownloadUrl": "http://ftp.gnu.org/gnu/libtool/libtool-2.4.7.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"Component": {
"Type": "other",
"other": {
"Name": "git",
"Version": "2.36.2",
"DownloadUrl": "https://www.kernel.org/pub/software/scm/git/git-2.36.2.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"Component": {
"Type": "other",
"other": {
"Name": "sqlite_autoconf",
"Version": "3390200",
"DownloadUrl": "https://www.sqlite.org/2022/sqlite-autoconf-3390200.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"Component": {
"Type": "other",
"other": {
"Name": "openssl",
"Version": "1.1.1q",
"DownloadUrl": "https://www.openssl.org/source/openssl-1.1.1q.tar.gz"
},
"comments": "manylinux dependency"
}
},
{
"component": {
"type": "git",
"git": {
"commitHash": "50cf2b6dd4fdf04309445f2eec8de7051d953abf",
"repositoryUrl": "https://github.com/besser82/libxcrypt.git"
},
"comments": "manylinux dependency LIBXCRYPT"
}
},
{
"component": {
"type": "git",
"git": {
"commitHash": "a896e3d066448b3530dbcaa48869fafefd738f57",
"repositoryUrl": "https://github.com/emscripten-core/emsdk.git"
},
"comments": "git submodule at cmake/external/emsdk"
}
},
{
"component": {
"type": "git",
"git": {
"commitHash": "7a2ed51a6b682a83e345ff49fc4cfd7ca47550db",
"repositoryUrl": "https://github.com/google/libprotobuf-mutator.git"
},
"comments": "git submodule at cmake/external/libprotobuf-mutator"
}
},
{
"component": {
"type": "git",
"git": {
"commitHash": "e2525550194ce3d8a2c4a3af451c9d9b3ae6650e",
"repositoryUrl": "https://github.com/onnx/onnx.git"
},
"comments": "git submodule at cmake/external/onnx"
}
},
{
"component": {
"type": "git",
@ -268,6 +162,16 @@
"comments": "mp11"
}
},
{
"component": {
"type": "git",
"git": {
"commitHash": "fdefbe85ed9c362b95b9b401cd19db068a76141f",
"repositoryUrl": "https://github.com/onnx/onnx.git"
},
"comments": "onnx"
}
},
{
"component": {
"type": "git",

View file

@ -24,7 +24,7 @@ microsoft_gsl;https://github.com/microsoft/GSL/archive/refs/tags/v4.0.0.zip;cf36
microsoft_wil;https://github.com/microsoft/wil/archive/refs/tags/v1.0.230629.1.zip;e4a542a323c070376f7c2d1973d0f7ddbc1d2fa5
mimalloc;https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.1.zip;d5ee7d34223d0567892db5179849939c8769dc41
mp11;https://github.com/boostorg/mp11/archive/refs/tags/boost-1.82.0.zip;9bc9e01dffb64d9e0773b2e44d2f22c51aace063
onnx;https://github.com/onnx/onnx/archive/e2525550194ce3d8a2c4a3af451c9d9b3ae6650e.zip;782f23d788185887f520a90535513e244218e928
onnx;https://github.com/onnx/onnx/archive/14303de049144035dfd94ace5f7a3b44773b1aad.zip;250eab9690392b248d75b56e605fb49eca373442
#use the commit of supporting all the plugins and TRT 8.6-GA (https://github.com/onnx/onnx-tensorrt/commit/0462dc31ae78f48744b6141ae376df1f96d3f459)
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/0462dc31ae78f48744b6141ae376df1f96d3f459.zip;5ff086361956cceb81ed17453a1fd8db2aa4328d
protobuf;https://github.com/protocolbuffers/protobuf/archive/refs/tags/v21.12.zip;7cf2733949036c7d52fda017badcab093fe73bfa
@ -44,4 +44,4 @@ tensorboard;https://github.com/tensorflow/tensorboard/archive/373eb09e4c5d2b3cc2
cutlass;https://github.com/NVIDIA/cutlass/archive/refs/tags/v3.0.0.zip;0f95b3c1fc1bd1175c4a90b2c9e39074d1bccefd
utf8_range;https://github.com/protocolbuffers/utf8_range/archive/72c943dea2b9240cd09efde15191e144bc7c7d38.zip;9925739c9debc0efa2adcb194d371a35b6a03156
extensions;https://github.com/microsoft/onnxruntime-extensions/archive/94142d8391c9791ec71c38336436319a2d4ac7a0.zip;4365ac5140338b4cb75a39944a4be276e3829b3c
composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/d52ec01652b7d620386251db92455968d8d90bdc.zip;6b5ce8edf3625f8817086c194fbf94b664e1b0e0
composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/d52ec01652b7d620386251db92455968d8d90bdc.zip;6b5ce8edf3625f8817086c194fbf94b664e1b0e0

View file

@ -0,0 +1,56 @@
# in case deps.txt is updated, run this file to update and upload the dependencies so that CI can use them.
# Before running the script, increase the version number found at:
# https://aiinfra.visualstudio.com/Lotus/_artifacts/feed/Lotus/UPack/onnxruntime_build_dependencies/versions
# Run without --do-upload once to verify downloading. Use --do-upload when you are ready to publish.
# python cmake/deps_update_and_upload.py --root-path C:/temp/onnxruntime_deps --version 1.0.82 --do-upload
# update version number in tools\ci_build\github\azure-pipelines\templates\download-deps.yml
import re
import subprocess
import os
import argparse
import tempfile
parser = argparse.ArgumentParser(description="Update dependencies and publish to Azure Artifacts")
parser.add_argument(
"--root-path", type=str, default=tempfile.gettempdir(), help="Target root path for downloaded files"
)
parser.add_argument("--version", type=str, default="1.0.82", help="Package version to publish")
parser.add_argument("--do-upload", action="store_true", help="Upload the package to Azure Artifacts")
args = parser.parse_args()
with open("cmake/deps.txt") as file:
text = file.read()
lines = [line for line in text.split("\n") if not line.startswith("#") and ";" in line]
root_path = args.root_path
for line in lines:
url = re.sub("^[^;]+?;https://([^;]+?);.*", r"https://\1", line)
filename = re.sub("^[^;]+?;https://([^;]+?);.*", r"\1", line)
full_path = os.path.join(root_path, filename)
subprocess.run(["curl", "-sSL", "--create-dirs", "-o", full_path, url])
package_name = "onnxruntime_build_dependencies"
version = args.version
# Check if the user is logged in to Azure
result = subprocess.run("az account show", shell=True, capture_output=True, text=True)
if "No subscriptions found" in result.stderr:
# Prompt the user to log in to Azure
print("You are not logged in to Azure. Please log in to continue.")
subprocess.run("az login", shell=True)
# Publish the package to Azure Artifacts if --no-upload is not specified
cmd = f'az artifacts universal publish --organization https://dev.azure.com/onnxruntime --feed onnxruntime --name {package_name} --version {version} --description "onnxruntime build time dependencies" --path {root_path}'
if args.do_upload:
subprocess.run(cmd, shell=True)
else:
print("would have run: " + cmd)
cmd = f'az artifacts universal publish --organization https://dev.azure.com/aiinfra --feed Lotus --name {package_name} --version {version} --description "onnxruntime build time dependencies" --path {root_path}'
if args.do_upload:
subprocess.run(cmd, shell=True)
else:
print("would have run: " + cmd)

View file

@ -64,16 +64,3 @@ index 0aab3e26..0f859267 100644
+#endif
+
#endif // ! ONNX_ONNX_PB_H
diff --git a/onnx/checker.cc b/onnx/checker.cc
index 8fdaf037..1beb1b88 100644
--- a/onnx/checker.cc
+++ b/onnx/checker.cc
@@ -190,7 +190,7 @@ void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) {
}
std::string data_path = path_join(ctx.get_model_dir(), relative_path);
// use stat64 to check whether the file exists
-#ifdef __APPLE__
+#if defined(__APPLE__) || defined(__wasm__)
struct stat buffer; // APPLE does not have stat64
if (stat((data_path).c_str(), &buffer) != 0) {
#else

View file

@ -67,7 +67,8 @@ Do not modify directly.*
|||[11, 12]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[4, 10]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|ConcatFromSequence|*in* input_sequence:**S**<br> *out* concat_result:**T**|11+|**S** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(string)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8))|
|ConstantOfShape|*in* input:**T1**<br> *out* output:**T2**|9+|**T1** = tensor(int64)<br/> **T2** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|ConstantOfShape|*in* input:**T1**<br> *out* output:**T2**|20+|**T1** = tensor(int64)<br/> **T2** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[9, 19]|**T1** = tensor(int64)<br/> **T2** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|Conv|*in* X:**T**<br> *in* W:**T**<br> *in* B:**T**<br> *out* Y:**T**|11+|**T** = tensor(float)|
|||[1, 10]|**T** = tensor(float)|
|ConvInteger|*in* x:**T1**<br> *in* w:**T2**<br> *in* x_zero_point:**T1**<br> *in* w_zero_point:**T2**<br> *out* y:**T3**|10+|**T1** = tensor(uint8)<br/> **T2** = tensor(uint8)<br/> **T3** = tensor(int32)|
@ -78,7 +79,7 @@ Do not modify directly.*
|Crop|*in* input:**T**<br> *out* output:**T**|1+|**T** = tensor(float)|
|CumSum|*in* x:**T**<br> *in* axis:**T2**<br> *out* y:**T**|14+|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)<br/> **T2** = tensor(int32), tensor(int64)|
|||[11, 13]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)<br/> **T2** = tensor(int32), tensor(int64)|
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|17+|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *in* axis:**tensor(int64)**<br> *out* output:**T1**<br><br>or<br><br>*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|17+|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(double), tensor(float)|
|||[11, 12]|**T** = tensor(double), tensor(float)|
|||[1, 10]|**T** = tensor(double), tensor(float)|
@ -935,7 +936,7 @@ Do not modify directly.*
|Crop|*in* input:**T**<br> *out* output:**T**|1+|**T** = tensor(float), tensor(float16)|
|CumSum|*in* x:**T**<br> *in* axis:**T2**<br> *out* y:**T**|14+|**T** = tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|||11+|**T** = tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|17+|**T1** = tensor(float), tensor(float16)<br/> **T2** = tensor(int64)|
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *in* axis:**tensor(int64)**<br> *out* output:**T1**<br><br>or<br><br>*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|17+|**T1** = tensor(float), tensor(float16)<br/> **T2** = tensor(int64)|
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||11+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||1+|**T** = tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|

View file

@ -442,7 +442,7 @@ class GraphRef {
} // namespace api
constexpr int64_t kMinSupportedOpset = 7;
constexpr int64_t kMaxSupportedOpset = 19;
constexpr int64_t kMaxSupportedOpset = 20;
// enum of results that a CostCheckFn can return.
enum class CostCheckResult {

View file

@ -51,7 +51,7 @@ class BaseOpBuilder : public IOpBuilder {
virtual bool HasSupportedInputsImpl(const Node& node, const logging::Logger& logger) const;
virtual int GetMinSupportedOpSet(const Node& /* node */) const { return 1; }
virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 19; }
virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 20; }
private:
bool HasSupportedOpSet(const Node& node, const logging::Logger& logger) const;

View file

@ -273,7 +273,7 @@ class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDoma
// Opset 9
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 10, Compress);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, ConstantOfShape);
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 19, ConstantOfShape);
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, MeanVarianceNormalization);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, float, Greater);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, double, Greater);
@ -958,6 +958,9 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain,
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Scan);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Shape);
// Opset 20
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 20, ConstantOfShape);
// !!PLEASE READ BELOW!! Following that, add new entries above this comment
/* *** IMPORTANT! ***
@ -1332,7 +1335,7 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
// Opset 9
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 10,
Compress)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, ConstantOfShape)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 19, ConstantOfShape)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12,
MeanVarianceNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, 12, float,
@ -2383,6 +2386,9 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, uint8_t, Resize)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Scan)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 19, Shape)>,
// Opset 20
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 20, ConstantOfShape)>,
};
for (auto& function_table_entry : function_table) {

View file

@ -11,11 +11,16 @@ ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPE_LIST_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0,
ConstantOfShapeDefaultOutputTypes);
ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPE_LIST(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, 20, Output, 0,
ConstantOfShapeDefaultOutputTypesOpset20);
// pytorch converter uses ConstantOfShape with int64 to create Pad input
// https://github.com/pytorch/pytorch/blob/044b519a80459f6787f6723c1c091a18b153d184/torch/onnx/symbolic_opset11.py#L449
ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0,
int64_t);
} // namespace op_kernel_type_control
namespace {
@ -24,6 +29,10 @@ using EnabledOutputTypes =
ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST_ALL_OPSETS(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, Output, 0);
using EnabledOutputTypesOpset20 =
ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST(
kCpuExecutionProvider, kOnnxDomain, ConstantOfShape, 20, Output, 0);
class ConstantOfShape final : public ConstantOfShapeBase<EnabledOutputTypes>, public OpKernel {
public:
explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), OpKernel(info) {}
@ -66,13 +75,22 @@ Status ConstantOfShape::Compute(OpKernelContext* ctx) const {
} // namespace
ONNX_CPU_OPERATOR_KERNEL(
ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
ConstantOfShape,
9,
19,
KernelDefBuilder()
.TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T2",
BuildKernelDefConstraintsFromTypeList<EnabledOutputTypes>()),
ConstantOfShape);
ONNX_CPU_OPERATOR_KERNEL(
ConstantOfShape,
20,
KernelDefBuilder()
.TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T2",
BuildKernelDefConstraintsFromTypeList<EnabledOutputTypesOpset20>()),
ConstantOfShape);
} // namespace onnxruntime

View file

@ -23,6 +23,18 @@ using ConstantOfShapeDefaultOutputTypes =
uint8_t, uint16_t, uint32_t, uint64_t,
bool>;
using ConstantOfShapeDefaultOutputTypesOpset20 =
TypeList<
BFloat16,
MLFloat16,
float, double,
#if !defined(DISABLE_FLOAT8_TYPES)
Float8E4M3FN, Float8E4M3FNUZ, Float8E5M2, Float8E5M2FNUZ,
#endif
int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t,
bool>;
template <typename EnabledOutputTypeList = ConstantOfShapeDefaultOutputTypes>
class ConstantOfShapeBase {
protected:

View file

@ -46,7 +46,7 @@ class BaseOpBuilder : public IOpBuilder {
// We still set the mininal supported opset to 1 as we couldn't
// get the model opset version at this stage.
virtual int GetMinSupportedOpSet(const Node& /* node */) const { return 1; }
virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 19; }
virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 20; }
private:
bool HasSupportedOpSet(const Node& node, const logging::Logger& logger) const;

View file

@ -6,6 +6,7 @@
#include "onnx/defs/parser.h"
#include "core/common/span_utils.h"
#include "core/framework/float8.h"
#include "core/graph/model.h"
#include "core/providers/cpu/cpu_execution_provider.h"
#include "core/session/inference_session.h"
@ -69,7 +70,9 @@ static void Check(const char* source,
float threshold = 0.001f;
for (size_t i = 0; i < size; ++i) {
ASSERT_NEAR(data[i], output_values[i], threshold) << "at position i:" << i;
if (!std::isnan(data[i]) && !std::isnan(output_values[i])) {
ASSERT_NEAR(data[i], output_values[i], threshold) << "at position i:" << i;
}
}
}
@ -389,25 +392,13 @@ TEST(FunctionTest, AttrSaturateNan) {
>
agraph (float[N] x) => (float[N] y)
{
y0 = local.myfun <a = 1e6> (x)
y1 = local.myfun (x)
y = Add (y0, y1)
}
<
opset_import: [ "" : 19 ],
domain: "local"
>
myfun <a: float=1.0> (x) => (y) {
x2 = Constant <value_float: float=@a>()
x2_ = Cast<to=18>(x2)
x3 = CastLike<saturate=0>(x2, x2_)
x3_ = Cast<to=1>(x3)
y = Add (x, x3_)
x_E4M3FNUZ = Cast<to=18>(x)
x_E4M3FNUZ_2 = CastLike<saturate=0>(x, x_E4M3FNUZ) # NaN when OOR
y = Cast<to=1>(x_E4M3FNUZ_2)
}
)";
Check(code, "x", {1.0, 2.0, 1e6}, "y", {243.0, 245.0, 2000241}); // std::numeric_limits<float>::quiet_NaN()});
Check(code, "x", {1.0, 2.0, 1e6}, "y", {1.0, 2.0, std::numeric_limits<float>::quiet_NaN()});
}
#endif

View file

@ -2056,7 +2056,7 @@ TEST(InferenceSessionTests, TestStrictShapeInference) {
ASSERT_STATUS_OK(session_options.config_options.AddConfigEntry(kOrtSessionOptionsConfigStrictShapeTypeInference, "1"));
tester.Run(session_options, OpTester::ExpectResult::kExpectFailure,
"Mismatch between number of source and target dimensions. Source=1 Target=2",
"Mismatch between number of inferred and declared dimensions. inferred=1 declared=2",
excluded_provider_types);
}

View file

@ -6280,7 +6280,7 @@ TEST_F(GraphTransformationTests, ConstantSharing_ShouldNotShareForGraphOutput) {
TEST_F(GraphTransformationTests, GatherToSplitFusion) {
auto build_test_case = [&](ModelTestBuilder& builder) {
auto* data_arg = builder.MakeInput<float>({{54}});
auto* shape_arg = builder.MakeInput<int64_t>({{1}});
auto* shape_arg = builder.MakeInput<int64_t>({{4}});
auto* reshape_out = builder.MakeIntermediate<float>({{2, 3, 3, 3}});
auto* gather_index_1 = builder.MakeInitializer<int64_t>({}, {static_cast<int64_t>(0)});
auto* gather_index_2 = builder.MakeInitializer<int64_t>({}, {static_cast<int64_t>(1)});
@ -6393,7 +6393,7 @@ TEST_F(GraphTransformationTests, GatherToSplitFusion) {
TEST_F(GraphTransformationTests, GatherToSplitFusion_NoSqueeze) {
auto build_test_case = [&](ModelTestBuilder& builder) {
auto* data_arg = builder.MakeInput<float>({{54}});
auto* shape_arg = builder.MakeInput<int64_t>({{1}});
auto* shape_arg = builder.MakeInput<int64_t>({{4}});
auto* reshape_out = builder.MakeIntermediate<float>({{2, 3, 3, 3}});
auto* gather_index_1 = builder.MakeInitializer<int64_t>({1}, {static_cast<int64_t>(0)});
auto* gather_index_2 = builder.MakeInitializer<int64_t>({1}, {static_cast<int64_t>(1)});

View file

@ -578,7 +578,7 @@ TEST(Scan9, DISABLED_BadShape) {
ShortSequenceOneInBatchOneLoopStateVar(
options,
"Node:concat Output:concat_out_1 [ShapeInferenceError] Mismatch between number of source and target dimensions. "
"Source=2 Target=1");
"inferred=2 declared=1");
}
TEST(Scan8, ShortSequenceTwoInBatchOneLoopStateVar) {

View file

@ -109,7 +109,7 @@ TEST(ConvFp16Test, Conv1D_Invalid_Input_Shape) {
TestConvFp16Op(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
"Both source and target dimension have values but they differ. Source=0 Target=2 Dimension=2",
"Both inferred and declared dimension have values but they differ. Inferred=0 Declared=2 Dimension=2",
-1); // use latest opset for shape inferencing errors
}
@ -132,7 +132,7 @@ TEST(ConvFp16Test, Conv2D_Invalid_Input_Shape) {
TestConvFp16Op(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
"Both source and target dimension have values but they differ. Source=1 Target=2 Dimension=0",
"Both inferred and declared dimension have values but they differ. Inferred=1 Declared=2 Dimension=0",
-1); // use latest opset for shape inferencing errors
}

View file

@ -249,7 +249,7 @@ TEST(ConvTest, Conv1D_Invalid_Input_Shape) {
TestConvOp(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
"Both source and target dimension have values but they differ. Source=0 Target=2 Dimension=2",
"Both inferred and declared dimension have values but they differ. Inferred=0 Declared=2 Dimension=2",
-1); // use latest opset for shape inferencing errors
}
@ -272,7 +272,7 @@ TEST(ConvTest, Conv2D_Invalid_Input_Shape) {
TestConvOp(attrs, {X, dummy_vals}, {X_shape, dummy_shape}, dummy_vals, dummy_shape, false,
OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Can't merge shape info. "
"Both source and target dimension have values but they differ. Source=1 Target=2 Dimension=0",
"Both inferred and declared dimension have values but they differ. Inferred=1 Declared=2 Dimension=0",
-1); // use latest opset for shape inferencing errors
}

View file

@ -91,7 +91,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim1Fail) {
test.Run(OpTester::ExpectResult::kExpectFailure,
"Can't merge shape info. "
"Both source and target dimension have values but they differ. Source=7 Target=0 Dimension=0");
"Both inferred and declared dimension have values but they differ. Inferred=7 Declared=0 Dimension=0");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim1Success) {
@ -136,7 +136,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim2) {
test.AddOutput<float>("Y", out_dims, output);
test.Run(OpTester::ExpectResult::kExpectFailure,
"Mismatch between number of source and target dimensions. Source=2 Target=1");
"Mismatch between number of inferred and declared dimensions. inferred=2 declared=1");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip01_Empty_Dim2) {
@ -159,7 +159,7 @@ TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip01_Empty_Dim2) {
test.AddOutput<float>("Y", out_dims, output);
test.Run(OpTester::ExpectResult::kExpectFailure,
"Mismatch between number of source and target dimensions. Source=2 Target=1");
"Mismatch between number of inferred and declared dimensions. inferred=2 declared=1");
}
TEST(TfIdfVectorizerTest, Int32_TF_onlyBigrams_Skip0_Empty_Dim2N) {

View file

@ -37,7 +37,7 @@ TEST(TransposeOpTest, PermRankDoesNotMatchTensorRank) {
// This failure comes from shape inference, because in this case it knows the input dims.
// But in the real world, the model can supply different input dims at runtime.
test.Run(OpTester::ExpectResult::kExpectFailure,
"Node:node1 Output:Y [ShapeInferenceError] Mismatch between number of source and target dimensions. Source=3 Target=4");
"Node:node1 Output:Y [ShapeInferenceError] Mismatch between number of inferred and declared dimensions. inferred=3 declared=4");
}
// Some of the tests can't run on TensorrtExecutionProvider because of errors.

View file

@ -279,6 +279,9 @@ def check_model_correctness(
ops_set = set(node.op_type for node in model_onnx.graph.node)
check_reference_evaluator = not (ops_set & {"EmbedLayerNormalization", "Conv", "Attention", "Transpose"})
with open(model_path_to_check, "rb") as f:
model_check = onnx.load(f)
if check_reference_evaluator and onnx_recent_enough:
ref = ReferenceEvaluator(model_path_origin)
ref_origin_results = ref.run(None, inputs)
@ -289,7 +292,7 @@ def check_model_correctness(
output,
rtol=rtol,
atol=atol,
err_msg=f"Model {model_path_to_check!r} failed for providers={providers!r}.",
err_msg=f"Model {model_path_origin!r} failed for providers={providers!r}.",
)
# Verifies the shapes in the quantized model.
@ -301,40 +304,52 @@ def check_model_correctness(
expected_shapes[init.name] = tuple(init.dims)
checked = 0
f8_quantization = False
with open(model_path_to_check, "rb") as f:
model_check = onnx.load(f)
for init in model_check.graph.initializer:
if init.name.endswith("_quantized"):
name = init.name.replace("_quantized", "")
expected = expected_shapes[name]
shape = tuple(init.dims)
if not dynamic and expected != shape:
raise AssertionError(
f"Shape mismatch for initializer {init.name!r} from {init.name!r}, "
f"shape={shape} != {expected} (expected)."
)
else:
checked += 1
if "zero_point" in init.name:
dt = init.data_type
f8_quantization = f8_quantization or dt in (
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
for init in model_check.graph.initializer:
if init.name.endswith("_quantized"):
name = init.name.replace("_quantized", "")
expected = expected_shapes[name]
shape = tuple(init.dims)
if not dynamic and expected != shape:
raise AssertionError(
f"Shape mismatch for initializer {init.name!r} from {init.name!r}, "
f"shape={shape} != {expected} (expected)."
)
if checked == 0:
raise AssertionError(
f"Unable to check expected shape, expected_shapes={expected_shapes}, "
f"names={[init.name for init in model_check.graph.initializer]}."
else:
checked += 1
if "zero_point" in init.name:
dt = init.data_type
f8_quantization = f8_quantization or dt in (
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
)
if checked == 0:
raise AssertionError(
f"Unable to check expected shape, expected_shapes={expected_shapes}, "
f"names={[init.name for init in model_check.graph.initializer]}."
)
if f8_quantization:
check_sign_f8_quantization(model_path_origin, model_path_to_check)
# Verifies the expected outputs.
if check_reference_evaluator and onnx_recent_enough:
reference_new_ops = [QGemm]
has_missing_reference_ops = any(
node.domain not in ["", "ai.onnx"]
and not any(
node.domain == new_node.op_domain and node.op_type == new_node.__name__
for new_node in reference_new_ops
)
for node in model_check.graph.node
)
if has_missing_reference_ops:
# We need to skip the test if the model contains ops that are not supported.
testcase.skipTest(
f"Model {model_path_to_check!r} contains ops that are not supported by the reference evaluator."
)
# Needs pv.Version(onnx.__version__) >= pv.Version("1.16.0")
ref = ReferenceEvaluator(model_path_to_check, new_ops=[QGemm])
ref = ReferenceEvaluator(model_check, new_ops=reference_new_ops)
target_results = ref.run(None, inputs)
testcase.assertEqual(len(origin_results), len(target_results), "result count are different")
for idx, ref_output in enumerate(origin_results):

View file

@ -233,7 +233,64 @@
"^test_resize_upsample_sizes_nearest_cuda",
"^test_resize_upsample_sizes_nearest_floor_align_corners_cuda",
"^test_resize_upsample_sizes_nearest_not_larger_cuda",
"^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cuda"
"^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cuda",
// onnx 1.15 (opset 20) new and updated op tests
"^test_ai_onnx_ml_label_encoder_string_int",
"^test_ai_onnx_ml_label_encoder_string_int_no_default",
"^test_ai_onnx_ml_label_encoder_tensor_mapping",
"^test_ai_onnx_ml_label_encoder_tensor_value_only_mapping",
"^test_gridsample_aligncorners_true",
"^test_gridsample_bicubic_align_corners_0_additional_1",
"^test_gridsample_bicubic_align_corners_1_additional_1",
"^test_gridsample_bicubic",
"^test_gridsample_bilinear_align_corners_0_additional_1",
"^test_gridsample_bilinear_align_corners_1_additional_1",
"^test_gridsample_bilinear",
"^test_gridsample_border_padding",
"^test_gridsample",
"^test_gridsample_nearest_align_corners_0_additional_1",
"^test_gridsample_nearest_align_corners_1_additional_1",
"^test_gridsample_nearest",
"^test_gridsample_reflection_padding",
"^test_gridsample_volumetric_bilinear_align_corners_0",
"^test_gridsample_volumetric_bilinear_align_corners_1",
"^test_gridsample_volumetric_nearest_align_corners_0",
"^test_gridsample_volumetric_nearest_align_corners_1",
"^test_gridsample_zeros_padding",
"^test_image_decoder_decode_bmp_rgb",
"^test_image_decoder_decode_jpeg2k_rgb",
"^test_image_decoder_decode_jpeg_bgr",
"^test_image_decoder_decode_jpeg_grayscale",
"^test_image_decoder_decode_jpeg_rgb",
"^test_image_decoder_decode_png_rgb",
"^test_image_decoder_decode_pnm_rgb",
"^test_image_decoder_decode_tiff_rgb",
"^test_image_decoder_decode_webp_rgb",
"^test_regex_full_match_basic",
"^test_regex_full_match_email_domain",
"^test_regex_full_match_empty",
"^test_string_concat_broadcasting",
"^test_string_concat",
"^test_string_concat_empty_string",
"^test_string_concat_utf8",
"^test_string_concat_zero_dimensional",
"^test_string_split_basic",
"^test_string_split_consecutive_delimiters",
"^test_string_split_empty_string_delimiter",
"^test_string_split_empty_tensor",
"^test_string_split_maxsplit",
"^test_string_split_no_delimiter",
"^test_dft_axis",
"^test_dft",
"^test_dft_inverse",
"^test_isinf",
"^test_isinf_float16",
"^test_isinf_negative",
"^test_isinf_positive",
"^test_isnan",
"^test_isnan_float16",
"^test_reduce_max_bool_inputs",
"^test_reduce_min_bool_inputs"
],
"current_failing_tests_x86": [
"^test_vgg19",
@ -316,7 +373,24 @@
"^test_layer_normalization_4d_axis_negative_1_expanded_ver18_cpu",
"^test_layer_normalization_4d_axis_negative_2_expanded_ver18_cpu",
"^test_layer_normalization_4d_axis_negative_3_expanded_ver18_cpu",
"^test_layer_normalization_default_axis_expanded_ver18_cpu"
"^test_layer_normalization_default_axis_expanded_ver18_cpu",
// onnx 1.15 (opset 20) new and updated op tests (test_affine_grid_???_expanded utilizes ConstantOfShape so it needs to be skipped as well)
// https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1139541&view=logs&j=249e9d58-0012-5814-27cf-6a201adbd9cf&t=bb33e81f-0527-50e0-0fd2-e94f509f0a82
// only supported with cpu provider
"^test_affine_grid_2d",
"^test_affine_grid_2d_align_corners",
"^test_affine_grid_2d_align_corners_expanded",
"^test_affine_grid_2d_expanded",
"^test_affine_grid_3d",
"^test_affine_grid_3d_align_corners",
"^test_affine_grid_3d_align_corners_expanded",
"^test_affine_grid_3d_expanded",
"^test_constantofshape_float_ones",
"^test_constantofshape_int_shape_zero",
"^test_constantofshape_int_zeros",
// https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1141563&view=logs&j=a018b46d-e41a-509d-6581-c95fdaa42fcd&t=d61c1d37-f101-5d28-982f-e5931b720302
"^test_gelu_tanh_2_cpu",
"^test_gelu_tanh_2_expanded_cpu"
],
"current_failing_tests_NNAPI": [
"^test_maxpool_2d_uint8",
@ -569,7 +643,22 @@
"^test_sequence_map_identity_1_sequence_cpu",
"^test_sequence_map_identity_1_sequence_expanded_cpu",
"^test_sequence_map_identity_2_sequences_cpu",
"^test_sequence_map_identity_2_sequences_expanded_cpu"
"^test_sequence_map_identity_2_sequences_expanded_cpu",
// onnx 1.15 (opset 20) new and updated op tests (test_affine_grid_???_expanded utilizes ConstantOfShape so it needs to be skipped as well)
// https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1139542&view=logs&j=3032dfba-5baf-5872-0871-2e69cb7f4b6a&t=f0d05deb-fc26-5aaf-e43e-7db2764c07da
// only supported with cpu provider
"^test_affine_grid_2d",
"^test_affine_grid_2d_align_corners",
"^test_affine_grid_2d_align_corners_expanded",
"^test_affine_grid_2d_expanded",
"^test_affine_grid_3d",
"^test_affine_grid_3d_align_corners",
"^test_affine_grid_3d_align_corners_expanded",
"^test_affine_grid_3d_expanded",
"^test_constantofshape_float_ones",
"^test_constantofshape_int_shape_zero",
"^test_constantofshape_int_zeros"
],
// ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported
"tests_with_pre_opset7_dependencies": [

View file

@ -7,6 +7,9 @@
"test_dft": 1e-3,
"test_dft_axis": 1e-3,
"test_dft_inverse": 1e-3,
"test_dft_opset19": 1e-3,
"test_dft_axis_opset19": 1e-3,
"test_dft_inverse_opset19": 1e-3,
"test_stft": 1e-4,
"test_stft_with_window": 1e-4
},

View file

@ -4171,7 +4171,7 @@ Return true if all elements are true and false otherwise.
"T", OpSchema::Variadic,
/*is_homogeneous*/ false,
/*min_arity*/ 1)
.TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(),
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(),
"Allow inputs and outputs to be any kind of tensor.");
#endif // ENABLE_TRITON

View file

@ -17,4 +17,4 @@ scikit-learn
scipy
sympy
wheel
setuptools>=41.4.0
setuptools>=61.0.0

View file

@ -6,4 +6,4 @@ onnx
packaging
protobuf
sympy
setuptools>=41.4.0
setuptools>=61.0.0

View file

@ -11,7 +11,7 @@ steps:
packageType: upack
feed: '/7424c8e4-5c62-490e-95c4-79446f31017c'
definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0'
version: 1.0.81
version: 1.0.90
downloadPath: $(Build.BinariesDirectory)/deps
# The private ADO project
@ -22,7 +22,7 @@ steps:
packageType: upack
feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325'
definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a'
version: 1.0.81
version: 1.0.90
downloadPath: $(Build.BinariesDirectory)/deps
# You can add more ADO accounts at here.

View file

@ -31,7 +31,7 @@ steps:
architecture: ${{parameters.BuildArch}}
- script: |
python -m pip install -q setuptools wheel numpy flatbuffers
python -m pip install --upgrade "setuptools>=61.0.0" wheel numpy flatbuffers
workingDirectory: '$(Build.BinariesDirectory)'
displayName: 'Install python modules'

View file

@ -4,7 +4,7 @@ mypy
pytest
setuptools>=41.4.0
wheel
git+http://github.com/onnx/onnx.git@e2525550194ce3d8a2c4a3af451c9d9b3ae6650e#egg=onnx
git+http://github.com/onnx/onnx.git@ac3e58759463ff3a3089e3cd64fddbfad0f6724d#egg=onnx
protobuf==3.20.2
sympy==1.12
flatbuffers