using Microsoft.ML.OnnxRuntime.Tensors;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text.RegularExpressions;
using Xunit;
namespace Microsoft.ML.OnnxRuntime.Tests
{
///
/// This is compensate for the absence of string.Contains() in .NET Standard 2.0
/// Contains(String, StringComparison)
///
public static class StringExtensions
{
public static bool Contains(this String str, String substring,
StringComparison comp)
{
if (substring == null)
throw new ArgumentNullException("substring",
"substring cannot be null.");
else if (!Enum.IsDefined(typeof(StringComparison), comp))
throw new ArgumentException("comp is not a member of StringComparison",
"comp");
return str.IndexOf(substring, comp) >= 0;
}
}
public partial class InferenceTest
{
private const string module = "onnxruntime.dll";
private const string propertiesFile = "Properties.txt";
[Fact(DisplayName = "CanCreateAndDisposeSessionWithModelPath")]
public void CanCreateAndDisposeSessionWithModelPath()
{
string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
using (var session = new InferenceSession(modelPath))
{
Assert.NotNull(session);
Assert.NotNull(session.InputMetadata);
Assert.Single(session.InputMetadata); // 1 input nodeMeta
Assert.True(session.InputMetadata.ContainsKey("data_0")); // input nodeMeta name
Assert.Equal(typeof(float), session.InputMetadata["data_0"].ElementType);
Assert.True(session.InputMetadata["data_0"].IsTensor);
var expectedInputDimensions = new int[] { 1, 3, 224, 224 };
Assert.Equal(expectedInputDimensions.Length, session.InputMetadata["data_0"].Dimensions.Length);
for (int i = 0; i < expectedInputDimensions.Length; i++)
{
Assert.Equal(expectedInputDimensions[i], session.InputMetadata["data_0"].Dimensions[i]);
}
Assert.NotNull(session.OutputMetadata);
Assert.Single(session.OutputMetadata); // 1 output nodeMeta
Assert.True(session.OutputMetadata.ContainsKey("softmaxout_1")); // output nodeMeta name
Assert.Equal(typeof(float), session.OutputMetadata["softmaxout_1"].ElementType);
Assert.True(session.OutputMetadata["softmaxout_1"].IsTensor);
var expectedOutputDimensions = new int[] { 1, 1000, 1, 1 };
Assert.Equal(expectedOutputDimensions.Length, session.OutputMetadata["softmaxout_1"].Dimensions.Length);
for (int i = 0; i < expectedOutputDimensions.Length; i++)
{
Assert.Equal(expectedOutputDimensions[i], session.OutputMetadata["softmaxout_1"].Dimensions[i]);
}
}
}
#if USE_CUDA
[Fact(DisplayName = "TestCUDAProviderOptions")]
private void TestCUDAProviderOptions()
{
string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
string defaultDeviceId = "0";
string deviceIdFromEnv = System.Environment.GetEnvironmentVariable("OnnxruntimeTestGpuDeviceId");
if (!string.IsNullOrEmpty(deviceIdFromEnv) && int.TryParse(deviceIdFromEnv, out int deviceId) && deviceId >= 0)
{
defaultDeviceId = deviceIdFromEnv;
output.WriteLine($"Parsed ID: {deviceIdFromEnv}");
}
using (var cleanUp = new DisposableListTest())
{
var cudaProviderOptions = new OrtCUDAProviderOptions();
cleanUp.Add(cudaProviderOptions);
var providerOptionsDict = new Dictionary();
providerOptionsDict["device_id"] = defaultDeviceId;
// 256MB
providerOptionsDict["gpu_mem_limit"] = "268435456";
providerOptionsDict["arena_extend_strategy"] = "kSameAsRequested";
providerOptionsDict["cudnn_conv_algo_search"] = "DEFAULT";
providerOptionsDict["do_copy_in_default_stream"] = "1";
providerOptionsDict["cudnn_conv_use_max_workspace"] = "1";
providerOptionsDict["cudnn_conv1d_pad_to_nc1d"] = "1";
cudaProviderOptions.UpdateOptions(providerOptionsDict);
var resultProviderOptionsDict = new Dictionary();
ProviderOptionsValueHelper.StringToDict(cudaProviderOptions.GetOptions(), resultProviderOptionsDict);
// test provider options configuration
string value;
value = resultProviderOptionsDict["device_id"];
Assert.Equal("0", value);
value = resultProviderOptionsDict["gpu_mem_limit"];
Assert.Equal("268435456", value);
value = resultProviderOptionsDict["arena_extend_strategy"];
Assert.Equal("kSameAsRequested", value);
value = resultProviderOptionsDict["cudnn_conv_algo_search"];
Assert.Equal("DEFAULT", value);
value = resultProviderOptionsDict["do_copy_in_default_stream"];
Assert.Equal("1", value);
value = resultProviderOptionsDict["cudnn_conv_use_max_workspace"];
Assert.Equal("1", value);
value = resultProviderOptionsDict["cudnn_conv1d_pad_to_nc1d"];
Assert.Equal("1", value);
// test correctness of provider options
SessionOptions options = SessionOptions.MakeSessionOptionWithCudaProvider(cudaProviderOptions);
cleanUp.Add(options);
var session = new InferenceSession(modelPath, options);
cleanUp.Add(session);
var inputMeta = session.InputMetadata;
var container = new List();
float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
foreach (var name in inputMeta.Keys)
{
Assert.Equal(typeof(float), inputMeta[name].ElementType);
Assert.True(inputMeta[name].IsTensor);
var tensor = new DenseTensor(inputData, inputMeta[name].Dimensions);
container.Add(NamedOnnxValue.CreateFromTensor(name, tensor));
}
session.Run(container);
}
}
#endif
#if USE_TENSORRT
[Fact(DisplayName = "CanRunInferenceOnAModelWithTensorRT")]
private void CanRunInferenceOnAModelWithTensorRT()
{
string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
int deviceId = 0;
string deviceIdStr = System.Environment.GetEnvironmentVariable("ONNXRUNTIME_TEST_GPU_DEVICE_ID");
if (!string.IsNullOrEmpty(deviceIdStr) && int.TryParse(deviceIdStr, out int parsedValue) && parsedValue >= 0)
{
deviceId = parsedValue;
output.WriteLine($"Parsed ID: {parsedValue}");
}
using (var cleanUp = new DisposableListTest())
{
SessionOptions options = SessionOptions.MakeSessionOptionWithTensorrtProvider(deviceId);
cleanUp.Add(options);
var session = new InferenceSession(modelPath, options);
cleanUp.Add(session);
var inputMeta = session.InputMetadata;
var container = new List();
float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
foreach (var name in inputMeta.Keys)
{
Assert.Equal(typeof(float), inputMeta[name].ElementType);
Assert.True(inputMeta[name].IsTensor);
var tensor = new DenseTensor(inputData, inputMeta[name].Dimensions);
container.Add(NamedOnnxValue.CreateFromTensor(name, tensor));
}
using (var results = session.Run(container))
{
ValidateRunResults(results);
}
}
}
[Fact(DisplayName = "TestTensorRTProviderOptions")]
private void TestTensorRTProviderOptions()
{
string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
string calTablePath = "squeezenet_calibration.flatbuffers";
string enginePath = "./";
string engineDecrptLibPath = "engine_decryp";
string defaultDeviceId = "0";
string deviceIdFromEnv = System.Environment.GetEnvironmentVariable("OnnxruntimeTestGpuDeviceId");
if (!string.IsNullOrEmpty(deviceIdFromEnv) && int.TryParse(deviceIdFromEnv, out int deviceId) && deviceId >= 0)
{
defaultDeviceId = deviceIdFromEnv;
output.WriteLine($"Parsed ID: {deviceIdFromEnv}");
}
using (var cleanUp = new DisposableListTest())
{
var trtProviderOptions = new OrtTensorRTProviderOptions();
cleanUp.Add(trtProviderOptions);
var providerOptionsDict = new Dictionary();
providerOptionsDict["device_id"] = defaultDeviceId;
providerOptionsDict["trt_fp16_enable"] = "1";
providerOptionsDict["trt_int8_enable"] = "1";
providerOptionsDict["trt_int8_calibration_table_name"] = calTablePath;
providerOptionsDict["trt_engine_cache_enable"] = "1";
providerOptionsDict["trt_engine_cache_path"] = enginePath;
providerOptionsDict["trt_engine_decryption_enable"] = "0";
providerOptionsDict["trt_engine_decryption_lib_path"] = engineDecrptLibPath;
trtProviderOptions.UpdateOptions(providerOptionsDict);
var resultProviderOptionsDict = new Dictionary();
ProviderOptionsValueHelper.StringToDict(trtProviderOptions.GetOptions(), resultProviderOptionsDict);
// test provider options configuration
string value;
value = resultProviderOptionsDict["device_id"];
Assert.Equal(defaultDeviceId, value);
value = resultProviderOptionsDict["trt_fp16_enable"];
Assert.Equal("1", value);
value = resultProviderOptionsDict["trt_int8_enable"];
Assert.Equal("1", value);
value = resultProviderOptionsDict["trt_int8_calibration_table_name"];
Assert.Equal(calTablePath, value);
value = resultProviderOptionsDict["trt_engine_cache_enable"];
Assert.Equal("1", value);
value = resultProviderOptionsDict["trt_engine_cache_path"];
Assert.Equal(enginePath, value);
value = resultProviderOptionsDict["trt_engine_decryption_enable"];
Assert.Equal("0", value);
value = resultProviderOptionsDict["trt_engine_decryption_lib_path"];
Assert.Equal(engineDecrptLibPath, value);
// test correctness of provider options
SessionOptions options = SessionOptions.MakeSessionOptionWithTensorrtProvider(trtProviderOptions);
cleanUp.Add(options);
var session = new InferenceSession(modelPath, options);
cleanUp.Add(session);
var inputMeta = session.InputMetadata;
var container = new List();
float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
foreach (var name in inputMeta.Keys)
{
Assert.Equal(typeof(float), inputMeta[name].ElementType);
Assert.True(inputMeta[name].IsTensor);
var tensor = new DenseTensor(inputData, inputMeta[name].Dimensions);
container.Add(NamedOnnxValue.CreateFromTensor(name, tensor));
}
session.Run(container);
}
}
#endif
private static Func> getOpsetDirectories = delegate (DirectoryInfo modelsDirInfo)
{
return modelsDirInfo.EnumerateDirectories("opset*", SearchOption.AllDirectories);
};
private static Dictionary GetSkippedModels(DirectoryInfo modelsDirInfo)
{
var skipModels = new Dictionary() {
{ "mxnet_arcface", "Model is an invalid ONNX model"},
{ "tf_inception_v2", "TODO: Debug failing model, skipping for now" },
{ "fp16_tiny_yolov2", "Tolerance level for float16 is not known. We now support fp16." },
{ "fp16_test_tiny_yolov2", "ImageScaler is not a registered function/op"},
{ "fp16_coreml_FNS-Candy", "ImageScaler is not a registered function/op" },
{ "fp16_coreml_LinearRegression_NYCTaxi", "Error in Node:featureVectorizer : No Op registered for FeatureVectorizer with domain_version of 1"},
{ "test_mnist", "Does not run in opset9, runs in other opsets. The model runs but I don't have a data set to debug output locally. Tensors of type ElementType not currently supported in the LoadTensorFromFile" },
{ "BERT_Squad", "Could not find an implementation for the nodeMeta bert / embeddings / one_hot:OneHot(9)" },
{ "mlperf_ssd_mobilenet_300", "Could not find file output_0.pb" },
{ "tf_resnet_v1_50", "result mismatch when Conv BN Fusion is applied" },
{ "tf_resnet_v1_101", "result mismatch when Conv BN Fusion is applied" },
{ "tf_resnet_v1_152", "result mismatch when Conv BN Fusion is applied" },
{ "cntk_simple_seg", "Bad onnx test output caused by wrong SAME_UPPER/SAME_LOWER for ConvTranspose" },
{ "coreml_Imputer-LogisticRegression_sklearn_load_breast_cancer", "Can't determine model file name" },
{ "mask_rcnn_keras", "Model should be edited to remove the extra outputs" },
{ "test_maxunpool_export_with_output_shape", "results mismatch"},
{ "test_min_int8", "Could not find an implementation for Min(13) node with name"},
{ "test_min_uint8", "Could not find an implementation for Min(13) node with name"},
{ "test_min_int16", "Could not find an implementation for Min(13) node with name"},
{ "test_min_uint16", "Could not find an implementation for Min(13) node with name"},
{ "test_max_int8", "Could not find an implementation for Max(13) node with name"},
{ "test_max_uint8", "Could not find an implementation for Max(13) node with name"},
{ "test_max_int16", "Could not find an implementation for Max(13) node with name"},
{ "test_max_uint16", "Could not find an implementation for Max(13) nodeMeta with name '"},
{ "test_mul_uint8", "Could not find an implementation for Mul(14) node with name" },
{ "test_bitshift_right_uint16", "Could not find an implementation for BitShift(11) nodeMeta with name ''"},
{ "test_bitshift_left_uint16", "Could not find an implementation for BitShift(11)"},
{ "test_pow_types_float32_uint64", "Could not find an implementation for Pow(15) node with name ''"},
{ "test_pow_types_float32_uint32", "Could not find an implementation for Pow(15) node with name ''"},
{ "test_resize_downsample_scales_cubic_align_corners", "Results mismatch"},
{ "test_resize_downsample_scales_linear_align_corners", "Results mismatch"},
{ "test_gru_batchwise", "batchwise operations not supported"},
{ "test_lstm_batchwise", "Batchwise recurrent operations(layout == 1) are not supported.If you need support create a github issue with justification."},
{ "test_simple_rnn_batchwise", "batchwise operations not supported"},
{ "test_batchnorm_example_training_mode", "opset14 version not implemented yet"},
{ "test_bernoulli", "random generator, results mismatch"},
{ "test_bernoulli_seed", "random generator, results mismatch"},
{ "test_bernoulli_double", "random generator, results mismatch"},
{ "test_bernoulli_expanded", "random generator, results mismatch"},
{ "test_bernoulli_seed_expanded", "random generator, results mismatch"},
{ "test_bernoulli_double_expanded", "random generator, results mismatch"},
// the expansion of Softplus uses Exp(1). ORT has a Softplus kernel, so testing the expansion is
// unnecessary and fails as ORT support for Exp started at opset 6 (as ORT didn't exist until opset 7).
{ "test_clip_default_int8_max_expanded", "Could not find an implementation for Less(13) nodeMeta with name ''" },
{ "test_softplus_expanded", "Could not find an implementation for Exp(1) node with name ''"},
{ "test_softplus_example_expanded", "Could not find an implementation for Exp(1) node with name ''"},
{ "test_div_uint8", "Could not find an implementation for Div(14) nodeMeta with name ''"},
{ "test_add_uint8", "Opset18 Could not find an implementation for Add(14) nodeMeta with name ''"},
{ "test_col2im_pads", "Results mismatch due to a typo in test data"},
{ "test_optional_has_element_empty_optional_input", "OptionalProto test metadata. Unable to load 'optional_input' optional element type of: Undefined type"},
{ "test_loop13_seq", "3rd input is an empty sequence. Ort API does not tolerate empty seq: Number of values should be at least 1" },
// Training tests
{ "BERT-Squad-int8", "training domain"},
{ "YOLOv3-12-int8", "training_domain"},
{ "test_training_dropout_default", "results mismatch"},
{ "test_training_dropout_default_mask", "Results mismatch"},
{ "test_training_dropout", "results mismatch"},
{ "test_training_dropout_mask", "results mismatch."},
{ "test_momentum", "ai.onnx.preview.training:Momentum(-1) is not a registered function/op"},
{ "test_momentum_multiple", "ai.onnx.preview.training:Momentum(-1) is not a registered function/op"},
{ "test_nesterov_momentum", "ai.onnx.preview.training:Momentum(-1) is not a registered function/op"},
{ "test_adam", "ai.onnx.preview.training:Adam(-1) is not a registered function/op"},
{ "test_adam_multiple", "ai.onnx.preview.training:Adam(-1) is not a registered function/op"},
{ "test_adagrad", "ai.onnx.preview.training:Adagrad(-1) is not a registered function/op"},
{ "test_adagrad_multiple", "ai.onnx.preview.training:Adagrad(-1) is not a registered function/op"},
{ "test_zfnet512", "skip it as ZFNET-512"},
};
// The following models fails on nocontribops win CI
var disableContribOpsEnvVar = Environment.GetEnvironmentVariable("DisableContribOps");
var isContribOpsDisabled = (disableContribOpsEnvVar != null) ? disableContribOpsEnvVar.Equals("ON") : false;
if (isContribOpsDisabled)
{
skipModels["test_tiny_yolov2"] = "Fails when ContribOps is disabled";
skipModels["mask_rcnn_keras"] = "Pad is not a registered function/op";
}
// Skip traditional ML models
var disableMlOpsEnvVar = Environment.GetEnvironmentVariable("DisableMlOps");
var isMlOpsDisabled = (disableMlOpsEnvVar != null) ? disableMlOpsEnvVar.Equals("ON") : false;
if (isMlOpsDisabled)
{
foreach (var opsetDir in getOpsetDirectories(modelsDirInfo))
{
foreach (var modelDir in opsetDir.EnumerateDirectories())
{
var modelDirName = modelDir.Name;
if (modelDirName.StartsWith("scikit_") ||
modelDirName.StartsWith("libsvm_") ||
modelDirName.StartsWith("coreml_") ||
modelDirName.StartsWith("keras2coreml_") ||
modelDirName.StartsWith("XGBoost_"))
{
skipModels[modelDirName] = "Fails when ML ops are disabled";
}
} //model
} //opset
}
// This model fails on x86 Win CI
if (System.Environment.Is64BitProcess == false)
{
skipModels["test_vgg19"] = "Get preallocated buffer for initializer conv4_4_b_0 failed";
skipModels["GPT2_LM_HEAD"] = "System out of memory";
skipModels["GPT2"] = "System out of memory";
skipModels["test_GPT2"] = "System out of memory";
skipModels["tf_pnasnet_large"] = "Get preallocated buffer for initializer ConvBnFusion_BN_B_cell_5/comb_iter_1/left/bn_sep_7x7_1/beta:0_203 failed";
skipModels["tf_nasnet_large"] = "Get preallocated buffer for initializer ConvBnFusion_BN_B_cell_11/beginning_bn/beta:0_331 failed";
skipModels["ZFNet-512"] = "System out of memory";
skipModels["test_bvlc_reference_caffenet"] = "System out of memory";
skipModels["coreml_VGG16_ImageNet"] = "System out of memory";
skipModels["test_ssd"] = "System out of memory";
skipModels["roberta_sequence_classification"] = "System out of memory";
// models from model zoo
skipModels["VGG 19"] = "bad allocation";
skipModels["VGG 19-caffe2"] = "bad allocation";
skipModels["VGG 19-bn"] = "bad allocation";
skipModels["VGG 16"] = "bad allocation";
skipModels["VGG 16-bn"] = "bad allocation";
skipModels["VGG 16-fp32"] = "bad allocation";
}
return skipModels;
}
public static IEnumerable