Update model_tests.cc: avoid auto adding new tests from new opsets (#17084)

### Description
1. Update model_tests.cc: avoid auto adding new tests from new opsets. 
2. Simplify the "ConcatPathComponent" function. It does not need to be a
template.

### Motivation and Context
All our Windows/Linux CI build machines are preloaded with some test
data. In model_tests.cc, we auto add all of them to
onnxruntime_test_all.exe's unit tests. However, it causes problems when
we update the CI build machine images: new data could cause pipelines
suddenly failing.
Therefore, instead of auto discovering test data and adding all of them
to tests, this PR changes it to explicitly specify the opset names.

This change doesn't impact how Web CI pipeline runs its tests.

Going forward, the workflow would be like:
Step 1: update the onnx version in deps.txt
Step 2: Update js/scripts/prepare-onnx-node-tests.ts. Like #16943 .
Better to put step 1 and step 2 in the same PR.
Step 3: onnxruntime-es team regenerates VM images, test them and deploy
them.
Step 4: Enable the new opset test data for EPs. 


[AB#18340](https://aiinfra.visualstudio.com/6a833879-cd9b-44a4-a9de-adc2d818f13c/_workitems/edit/18340)
This commit is contained in:
Changming Sun 2023-08-10 11:11:26 -07:00 committed by GitHub
parent 12837ba5c7
commit 6dffd1a890
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 154 additions and 121 deletions

View file

@ -140,8 +140,8 @@ static Status GetExternalDataInfo(const ONNX_NAMESPACE::TensorProto& tensor_prot
external_file_path = location;
} else {
if (tensor_proto_dir != nullptr) {
external_file_path = onnxruntime::ConcatPathComponent<ORTCHAR_T>(tensor_proto_dir,
external_data_info->GetRelPath());
external_file_path = onnxruntime::ConcatPathComponent(tensor_proto_dir,
external_data_info->GetRelPath());
} else {
external_file_path = external_data_info->GetRelPath();
}

View file

@ -178,9 +178,8 @@ inline wchar_t GetPathSep<wchar_t>() {
}
#endif
template <typename PATH_CHAR_TYPE>
std::basic_string<PATH_CHAR_TYPE> ConcatPathComponent(const std::basic_string<PATH_CHAR_TYPE>& left,
const std::basic_string<PATH_CHAR_TYPE>& right) {
inline std::basic_string<PATH_CHAR_TYPE> ConcatPathComponent(std::basic_string_view<PATH_CHAR_TYPE> left,
std::basic_string_view<PATH_CHAR_TYPE> right) {
std::basic_string<PATH_CHAR_TYPE> ret(left);
ret.append(1, GetPathSep<PATH_CHAR_TYPE>()).append(right);
return ret;

View file

@ -472,7 +472,7 @@ void OnnxTestCase::LoadTestData(size_t id, onnxruntime::test::HeapBuffer& b,
ORT_THROW("index out of bound");
}
PATH_STRING_TYPE test_data_pb = ConcatPathComponent<PATH_CHAR_TYPE>(
PATH_STRING_TYPE test_data_pb = ConcatPathComponent(
test_data_dirs_[id], (is_input ? ORT_TSTR("inputs.pb") : ORT_TSTR("outputs.pb")));
int test_data_pb_fd;
auto st = Env::Default().FileOpenRd(test_data_pb, test_data_pb_fd);
@ -512,7 +512,7 @@ void OnnxTestCase::LoadTestData(size_t id, onnxruntime::test::HeapBuffer& b,
const std::basic_string<PATH_CHAR_TYPE> file_prefix =
is_input ? ORT_TSTR("input_") : ORT_TSTR("output_");
if (!filename_str.compare(0, file_prefix.length(), file_prefix)) {
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(dir_path, filename_str);
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent(dir_path, filename_str);
test_data_pb_files.push_back(p);
}
return true;
@ -693,7 +693,7 @@ OnnxTestCase::OnnxTestCase(const std::string& test_case_name, _In_ std::unique_p
// parse config
std::basic_string<PATH_CHAR_TYPE> config_path =
ConcatPathComponent<PATH_CHAR_TYPE>(test_case_dir, ORT_TSTR("config.txt"));
ConcatPathComponent(test_case_dir, ORT_TSTR("config.txt"));
/* Note: protobuf-lite doesn't support reading protobuf files as text-format. Config.txt is exactly that.
That's the reason I've to parse the file in a different way to read the configs. Currently
this affects 2 tests - fp16_tiny_yolov2 and fp16_inception_v1. It's not clear why we've to use protobuf
@ -718,7 +718,7 @@ OnnxTestCase::OnnxTestCase(const std::string& test_case_name, _In_ std::unique_p
LoopDir(test_case_dir, [&test_case_dir, this](const PATH_CHAR_TYPE* filename, OrtFileType f_type) -> bool {
if (filename[0] == '.') return true;
if (f_type == OrtFileType::TYPE_DIR) {
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(test_case_dir, filename);
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent(test_case_dir, filename);
test_data_dirs_.push_back(p);
debuginfo_strings_.push_back(ToUTF8String(p));
}
@ -739,7 +739,7 @@ void LoadTests(const std::vector<std::basic_string<PATH_CHAR_TYPE>>& input_paths
LoopDir(node_data_root_path, [&](const PATH_CHAR_TYPE* filename, OrtFileType f_type) -> bool {
if (filename[0] == '.') return true;
if (f_type == OrtFileType::TYPE_DIR) {
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(node_data_root_path, filename);
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent(node_data_root_path, filename);
paths.push_back(p);
return true;
}
@ -766,7 +766,7 @@ void LoadTests(const std::vector<std::basic_string<PATH_CHAR_TYPE>>& input_paths
}
if (disabled_tests.find(test_case_name) != disabled_tests.end()) return true;
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(node_data_root_path, filename_str);
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent(node_data_root_path, filename_str);
std::unique_ptr<TestModelInfo> model_info;

View file

@ -79,7 +79,7 @@ static TestTolerances LoadTestTolerances(bool enable_cuda, bool enable_openvino,
if (useCustom) {
return TestTolerances(atol, rtol, absolute_overrides, relative_overrides);
}
std::ifstream overrides_ifstream(ConcatPathComponent<ORTCHAR_T>(
std::ifstream overrides_ifstream(ConcatPathComponent(
ORT_TSTR("testdata"), ORT_TSTR("onnx_backend_test_series_overrides.jsonc")));
if (!overrides_ifstream.good()) {
constexpr double absolute = 1e-3;

View file

@ -166,8 +166,8 @@ TEST(ComputeOptimizerTests, GatherND_E2E) {
// check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("computation_reduction_transformer_after.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("computation_reduction_transformer_after.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
InputContainer input_container;
@ -283,8 +283,8 @@ TEST(ComputeOptimizerTests, GatherMatMul_ScalarSlicingOnBatchDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_matmul_scalar_batch_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_matmul_scalar_batch_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -389,8 +389,8 @@ TEST(ComputeOptimizerTests, GatherMatMul_SlicingOnBatchDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_matmul_batch_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_matmul_batch_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -487,8 +487,8 @@ TEST(ComputeOptimizerTests, GatherMatMul_ScalarSlicingOnLastDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_matmul_scalar_last_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_matmul_scalar_last_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -585,8 +585,8 @@ TEST(ComputeOptimizerTests, GatherMatMul_SlicingOnLastDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_matmul_last_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_matmul_last_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -683,7 +683,7 @@ TEST(ComputeOptimizerTests, GatherMatMul_ScalarSlicingOnSecondLastDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(
PathString new_model_uri{ConcatPathComponent(
tmp_dir.Path(),
ORT_TSTR("gather_matmul_scalar_second_last_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
@ -782,8 +782,8 @@ TEST(ComputeOptimizerTests, GatherMatMul_SlicingOnSecondLastDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_matmul_second_last_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_matmul_second_last_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -876,8 +876,8 @@ TEST(ComputeOptimizerTests, GatherReshape_ScalarSlicingOnBatchDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_reshape_scalar_batch_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_reshape_scalar_batch_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -970,8 +970,8 @@ TEST(ComputeOptimizerTests, GatherReshape_SlicingOnBatchDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_reshape_batch_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_reshape_batch_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -1063,8 +1063,8 @@ TEST(ComputeOptimizerTests, GatherReshape_ScalarSlicingOnSeqlenDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_reshape_scalar_seqlen_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_reshape_scalar_seqlen_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -1157,8 +1157,8 @@ TEST(ComputeOptimizerTests, GatherReshape_SlicingOnSeqlenDim) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_reshape_seqlen_dim_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_reshape_seqlen_dim_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -1251,8 +1251,8 @@ TEST(ComputeOptimizerTests, GatherReshape_SlicingOnSeqlenDim2) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_reshape_seqlen_dim2_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_reshape_seqlen_dim2_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -1395,8 +1395,8 @@ TEST(ComputeOptimizerTests, GatherRobertaE2E) {
// Check the result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("gather_roberta_e2e_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("gather_roberta_e2e_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;
@ -2717,8 +2717,8 @@ TEST(ComputeOptimizerTests, ReshapeMlmBertE2E) {
// Check result diff after the re-order
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(tmp_dir.Path(),
ORT_TSTR("reshape_bert_e2e_optimized.onnx"))};
PathString new_model_uri{ConcatPathComponent(tmp_dir.Path(),
ORT_TSTR("reshape_bert_e2e_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));
int64_t batch_size = 8;

View file

@ -108,24 +108,6 @@ TEST_P(ModelTest, Run) {
}
std::unique_ptr<OnnxModelInfo> model_info = std::make_unique<OnnxModelInfo>(model_path.c_str());
if ((model_info->GetONNXOpSetVersion() < 14 || model_info->GetONNXOpSetVersion() > 17) &&
provider_name == "tensorrt") {
// TensorRT can run most of the model tests, but only part of
// them is enabled here to save CI build time.
// Besides saving CI build time, TRT isnt able to support full ONNX ops spec and therefore some testcases will
// fail. That's one of reasons we skip those testcases and only test latest ONNX opsets.
SkipTest(" tensorrt: only enable opset 14 to 17 of onnx tests");
return;
}
if ((model_info->GetONNXOpSetVersion() == 10 || model_info->GetONNXOpSetVersion() >= 18) && provider_name == "dnnl") {
// DNNL can run most of the model tests, but only part of
// them is enabled here to save CI build time.
std::ostringstream oss;
oss << " dnnl doesn't support opset " << model_info->GetONNXOpSetVersion();
SkipTest(oss.str());
return;
}
if (model_info->HasDomain(ONNX_NAMESPACE::AI_ONNX_TRAINING_DOMAIN) ||
model_info->HasDomain(ONNX_NAMESPACE::AI_ONNX_PREVIEW_TRAINING_DOMAIN)) {
@ -875,44 +857,89 @@ TEST_P(ModelTest, Run) {
}
}
// TODO: all providers
::std::vector<::std::basic_string<ORTCHAR_T>> GetParameterStrings() {
std::vector<const ORTCHAR_T*> provider_names;
provider_names.push_back(ORT_TSTR("cpu"));
using ORT_STRING_VIEW = std::basic_string_view<ORTCHAR_T>;
static ORT_STRING_VIEW opset7 = ORT_TSTR("opset7");
static ORT_STRING_VIEW opset8 = ORT_TSTR("opset8");
static ORT_STRING_VIEW opset9 = ORT_TSTR("opset9");
static ORT_STRING_VIEW opset10 = ORT_TSTR("opset10");
static ORT_STRING_VIEW opset11 = ORT_TSTR("opset11");
static ORT_STRING_VIEW opset12 = ORT_TSTR("opset12");
static ORT_STRING_VIEW opset13 = ORT_TSTR("opset13");
static ORT_STRING_VIEW opset14 = ORT_TSTR("opset14");
static ORT_STRING_VIEW opset15 = ORT_TSTR("opset15");
static ORT_STRING_VIEW opset16 = ORT_TSTR("opset16");
static ORT_STRING_VIEW opset17 = ORT_TSTR("opset17");
static ORT_STRING_VIEW opset18 = ORT_TSTR("opset18");
// TODO: enable opset19 tests
// static ORT_STRING_VIEW opset19 = ORT_TSTR("opset19");
static ORT_STRING_VIEW provider_name_cpu = ORT_TSTR("cpu");
static ORT_STRING_VIEW provider_name_tensorrt = ORT_TSTR("tensorrt");
#ifdef USE_MIGRAPHX
static ORT_STRING_VIEW provider_name_migraphx = ORT_TSTR("migraphx");
#endif
static ORT_STRING_VIEW provider_name_openvino = ORT_TSTR("openvino");
static ORT_STRING_VIEW provider_name_cuda = ORT_TSTR("cuda");
#ifdef USE_ROCM
static ORT_STRING_VIEW provider_name_rocm = ORT_TSTR("rocm");
#endif
static ORT_STRING_VIEW provider_name_dnnl = ORT_TSTR("dnnl");
// For any non-Android system, NNAPI will only be used for ort model converter
#if defined(USE_NNAPI) && defined(__ANDROID__)
static ORT_STRING_VIEW provider_name_nnapi = ORT_TSTR("nnapi");
#endif
#ifdef USE_RKNPU
static ORT_STRING_VIEW provider_name_rknpu = ORT_TSTR("rknpu");
#endif
#ifdef USE_ACL
static ORT_STRING_VIEW provider_name_acl = ORT_TSTR("acl");
#endif
#ifdef USE_ARMNN
static ORT_STRING_VIEW provider_name_armnn = ORT_TSTR("armnn");
#endif
static ORT_STRING_VIEW provider_name_dml = ORT_TSTR("dml");
::std::vector<::std::basic_string<ORTCHAR_T>> GetParameterStrings() {
// Map key is provider name(CPU, CUDA, etc). Value is the ONNX node tests' opsets to run.
std::map<ORT_STRING_VIEW, std::vector<ORT_STRING_VIEW>> provider_names;
// The default CPU provider always supports all opsets, and must maintain backwards compatibility.
provider_names[provider_name_cpu] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
// The other EPs can choose which opsets to test.
// If an EP doesn't have any CI build pipeline, then there is no need to specify any opset.
#ifdef USE_TENSORRT
provider_names.push_back(ORT_TSTR("tensorrt"));
// tensorrt: only enable opset 14 to 17 of onnx tests
provider_names[provider_name_tensorrt] = {opset14, opset15, opset16, opset17};
#endif
#ifdef USE_MIGRAPHX
provider_names.push_back(ORT_TSTR("migraphx"));
provider_names[provider_name_migraphx] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
#endif
#ifdef USE_OPENVINO
provider_names.push_back(ORT_TSTR("openvino"));
provider_names[provider_name_openvino] = {};
#endif
#ifdef USE_CUDA
provider_names.push_back(ORT_TSTR("cuda"));
provider_names[provider_name_cuda] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
#endif
#ifdef USE_ROCM
provider_names.push_back(ORT_TSTR("rocm"));
provider_names[provider_name_rocm] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
#endif
#ifdef USE_DNNL
provider_names.push_back(ORT_TSTR("dnnl"));
provider_names[provider_name_dnnl] = {opset10};
#endif
// For any non-Android system, NNAPI will only be used for ort model converter
#if defined(USE_NNAPI) && defined(__ANDROID__)
provider_names.push_back(ORT_TSTR("nnapi"));
provider_names[provider_name_nnapi] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
#endif
#ifdef USE_RKNPU
provider_names.push_back(ORT_TSTR("rknpu"));
provider_names[provider_name_rknpu] = {};
#endif
#ifdef USE_ACL
provider_names.push_back(ORT_TSTR("acl"));
provider_names[provider_name_acl] = {};
#endif
#ifdef USE_ARMNN
provider_names.push_back(ORT_TSTR("armnn"));
provider_names[provider_name_armnn] = {};
#endif
#ifdef USE_DML
provider_names.push_back(ORT_TSTR("dml"));
provider_names[provider_name_dml] = {opset7, opset8, opset9, opset10, opset11, opset12, opset13, opset14, opset15, opset16, opset17, opset18};
#endif
std::vector<std::basic_string<ORTCHAR_T>> v;
// Permanently exclude following tests because ORT support only opset starting from 7,
@ -1088,22 +1115,47 @@ TEST_P(ModelTest, Run) {
ORT_TSTR("conv_with_strides_padding"),
ORT_TSTR("size") // INVALID_ARGUMENT: Cannot find binding of given name: x
};
for (const ORTCHAR_T* provider_name : provider_names) {
std::vector<std::basic_string<ORTCHAR_T>> paths;
for (std::pair<ORT_STRING_VIEW, std::vector<ORT_STRING_VIEW>> kvp : provider_names) {
// Setup ONNX node tests. The test data is preloaded on our CI build machines.
#if !defined(_WIN32)
ORT_STRING_VIEW node_test_root_path = ORT_TSTR("/data/onnx");
#else
ORT_STRING_VIEW node_test_root_path = ORT_TSTR("c:\\local\\data\\onnx");
#endif
for (auto p : kvp.second) {
paths.push_back(ConcatPathComponent(node_test_root_path, p));
}
// Same as the above, except this one is for large models
#if defined(NDEBUG) || defined(RUN_MODELTEST_IN_DEBUG_MODE)
#ifdef _WIN32
ORT_STRING_VIEW model_test_root_path = ORT_TSTR("..\\models");
#else
ORT_STRING_VIEW model_test_root_path = ORT_TSTR("../models");
#endif
for (auto p : kvp.second) {
paths.push_back(ConcatPathComponent(model_test_root_path, p));
}
#endif
ORT_STRING_VIEW provider_name = kvp.first;
std::unordered_set<std::basic_string<ORTCHAR_T>> all_disabled_tests(std::begin(immutable_broken_tests),
std::end(immutable_broken_tests));
if (CompareCString(provider_name, ORT_TSTR("cuda")) == 0) {
if (provider_name == provider_name_cuda) {
all_disabled_tests.insert(std::begin(cuda_flaky_tests), std::end(cuda_flaky_tests));
} else if (CompareCString(provider_name, ORT_TSTR("dml")) == 0) {
} else if (provider_name == provider_name_dml) {
all_disabled_tests.insert(std::begin(dml_disabled_tests), std::end(dml_disabled_tests));
} else if (CompareCString(provider_name, ORT_TSTR("dnnl")) == 0) {
} else if (provider_name == provider_name_dnnl) {
// these models run but disabled tests to keep memory utilization low
// This will be removed after LRU implementation
all_disabled_tests.insert(std::begin(dnnl_disabled_tests), std::end(dnnl_disabled_tests));
} else if (CompareCString(provider_name, ORT_TSTR("tensorrt")) == 0) {
} else if (provider_name == provider_name_tensorrt) {
// these models run but disabled tests to keep memory utilization low
// This will be removed after LRU implementation
all_disabled_tests.insert(std::begin(tensorrt_disabled_tests), std::end(tensorrt_disabled_tests));
} else if (CompareCString(provider_name, ORT_TSTR("openvino")) == 0) {
} else if (provider_name == provider_name_openvino) {
// these models run but disabled tests to keep memory utilization low
// This will be removed after LRU implementation
all_disabled_tests.insert(std::begin(openvino_disabled_tests), std::end(openvino_disabled_tests));
@ -1135,23 +1187,6 @@ TEST_P(ModelTest, Run) {
all_disabled_tests.insert(ORT_TSTR("fp16_shufflenet"));
all_disabled_tests.insert(ORT_TSTR("fp16_inception_v1"));
all_disabled_tests.insert(ORT_TSTR("fp16_tiny_yolov2"));
std::vector<std::basic_string<ORTCHAR_T>> paths;
#if defined(NDEBUG) || defined(RUN_MODELTEST_IN_DEBUG_MODE)
#ifdef _WIN32
paths.push_back(ORT_TSTR("..\\models"));
#else
paths.push_back(ORT_TSTR("../models"));
#endif
#endif
// TENSORRT/OpenVino has too many test failures in the single node tests
#if !defined(USE_OPENVINO)
#if !defined(_WIN32)
paths.push_back(ORT_TSTR("/data/onnx"));
#else
paths.push_back(ORT_TSTR("c:\\local\\data\\onnx"));
#endif
#endif
while (!paths.empty()) {
std::basic_string<ORTCHAR_T> node_data_root_path = paths.back();
@ -1162,7 +1197,7 @@ TEST_P(ModelTest, Run) {
if (filename[0] == ORT_TSTR('.'))
return true;
if (f_type == OrtFileType::TYPE_DIR) {
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(node_data_root_path, filename);
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent(node_data_root_path, filename);
paths.push_back(p);
return true;
}
@ -1185,10 +1220,9 @@ TEST_P(ModelTest, Run) {
return true;
}
#endif
std::basic_string<PATH_CHAR_TYPE> p = ConcatPathComponent<PATH_CHAR_TYPE>(node_data_root_path, filename_str);
std::basic_string<PATH_CHAR_TYPE> r = provider_name;
r.append(ORT_TSTR("_")).append(p);
v.emplace_back(r);
std::basic_ostringstream<PATH_CHAR_TYPE> oss;
oss << provider_name << ORT_TSTR("_") << ConcatPathComponent(node_data_root_path, filename_str);
v.emplace_back(oss.str());
return true;
});
}

View file

@ -32,15 +32,15 @@ constexpr const PathChar* k_tensors_data_file_name = ORT_TSTR("tensors.bin");
constexpr const PathChar* k_properties_file_name = ORT_TSTR("properties.pbseq");
PathString GetCheckpointTensorsFilePath(const PathString& checkpoint_directory) {
return ConcatPathComponent<PathChar>(checkpoint_directory, k_tensors_file_name);
return ConcatPathComponent(checkpoint_directory, k_tensors_file_name);
}
PathString GetCheckpointTensorsDataFilePath(const PathString& checkpoint_directory) {
return ConcatPathComponent<PathChar>(checkpoint_directory, k_tensors_data_file_name);
return ConcatPathComponent(checkpoint_directory, k_tensors_data_file_name);
}
PathString GetCheckpointPropertiesFilePath(const PathString& checkpoint_directory) {
return ConcatPathComponent<PathChar>(checkpoint_directory, k_properties_file_name);
return ConcatPathComponent(checkpoint_directory, k_properties_file_name);
}
Status SaveRuntimeTensor(

View file

@ -24,7 +24,7 @@ static std::vector<PathString> GetAllDataFiles(const PathString& dir_path) {
!HasExtensionOf(filename_str, ORT_TSTR("pb"))) {
return true;
}
data_files.push_back(ConcatPathComponent<PathChar>(dir_path, filename_str));
data_files.push_back(ConcatPathComponent(dir_path, filename_str));
return true;
});

View file

@ -106,10 +106,10 @@ TEST(CheckpointingTest, SaveAndLoad) {
TemporaryDirectory tmp_dir{ORT_TSTR("checkpointing_test_dir")};
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("test_checkpoint"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("test_checkpoint"))};
// this path doesn't need to exist, we just consider its parent directory
PathString model_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("test_model.onnx"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("test_model.onnx"))};
DataTransferManager data_transfer{};
ASSERT_STATUS_OK(data_transfer.RegisterDataTransfer(std::make_unique<CPUDataTransfer>()));

View file

@ -113,7 +113,7 @@ TEST(TrainingDataLoaderTest, DataLoader_OneSingleFile) {
constexpr size_t max_num_files_preload = 3;
const MapStringToString input_name_map = {{"a", "a"}, {"b", "b"}, {"c", "c"}};
TemporaryDirectory tmp_dir{ORT_TSTR("training_data_loader_test_dir")};
const PathString& train_data_dir = ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("single_file"));
const PathString& train_data_dir = ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("single_file"));
ASSERT_STATUS_OK(CreateInputDataFiles(train_data_dir, 1, {"a", "b", "c"}));
DataLoader data_loader(input_name_map,
train_data_dir,
@ -131,7 +131,7 @@ TEST(TrainingDataLoaderTest, DataLoader_OneSingleFileFailParsing) {
constexpr size_t max_num_files_preload = 3;
const MapStringToString input_name_map = {{"a_invalid", "a"}, {"b", "b"}, {"c", "c"}};
TemporaryDirectory tmp_dir{ORT_TSTR("training_data_loader_test_dir")};
const PathString& train_data_dir = ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("single_file"));
const PathString& train_data_dir = ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("single_file"));
ASSERT_STATUS_OK(CreateInputDataFiles(train_data_dir, 1, {"a", "b", "c"}));
DataLoader data_loader(input_name_map,
train_data_dir,
@ -147,7 +147,7 @@ void TestDataLoaderWithMultipleFiles(
const size_t* const start_data_set_index = nullptr) {
const MapStringToString input_name_map = {{"a", "a"}, {"b", "b"}, {"c", "c"}};
TemporaryDirectory tmp_dir{ORT_TSTR("training_data_loader_test_dir")};
const PathString& train_data_dir = ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("multiple_files"));
const PathString& train_data_dir = ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("multiple_files"));
ASSERT_STATUS_OK(CreateInputDataFiles(
train_data_dir, num_input_files, {"a", "b", "c"}));

View file

@ -20,9 +20,9 @@ namespace training {
namespace test {
const PathString k_original_model_path =
ConcatPathComponent<PathChar>(ORT_TSTR("testdata"), ORT_TSTR("test_training_model.onnx"));
ConcatPathComponent(ORT_TSTR("testdata"), ORT_TSTR("test_training_model.onnx"));
const PathString k_backward_model_path =
ConcatPathComponent<PathChar>(ORT_TSTR("testdata"), ORT_TSTR("temp_backward_model.onnx"));
ConcatPathComponent(ORT_TSTR("testdata"), ORT_TSTR("temp_backward_model.onnx"));
const PathString k_output_directory = ORT_TSTR("training_runner_test_output");

View file

@ -407,7 +407,7 @@ TEST(ComputeOptimizerTests, InsertGatherBeforeSceLoss_MlmBertE2E) {
}
onnxruntime::test::TemporaryDirectory tmp_dir{ORT_TSTR("compute_optimizer_test_tmp_dir")};
PathString new_model_uri{ConcatPathComponent<PathChar>(
PathString new_model_uri{ConcatPathComponent(
tmp_dir.Path(),
ORT_TSTR("insert_gather_before_sceloss_bert_e2e_optimized.onnx"))};
ASSERT_STATUS_OK(Model::Save(*model, new_model_uri));

View file

@ -94,7 +94,7 @@ TEST(CheckpointApiTest, SaveOnnxModelAsCheckpoint_ThenLoad_CPU) {
// Call Save APIs.
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ASSERT_STATUS_OK(SaveCheckpoint(trainable_param_values, non_trainable_param_values, checkpoint_path));
/// Phase 3 - Run load checkpoint APIs.
@ -192,7 +192,7 @@ TEST(CheckpointApiTest, SaveOnnxModelAsCheckpointThenLoadFromBufferCPU) {
// Call Save APIs.
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ASSERT_STATUS_OK(SaveCheckpoint(trainable_param_values, non_trainable_param_values, checkpoint_path));
/// Phase 3 - Run load checkpoint APIs.
@ -341,7 +341,7 @@ TEST(CheckpointApiTest, SaveOptimizerStateAsCheckpoint_ThenLoad_CUDA) {
// Call Save APIs.
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ASSERT_STATUS_OK(SaveCheckpoint(state, checkpoint_path, true));
/// Phase 2 - Run load checkpoint APIs.
@ -419,7 +419,7 @@ TEST(CheckpointApiTest, SaveCustomPropertyAsCheckpoint_ThenLoad_CPU) {
// Call Save APIs.
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("e2e_ckpt_save_cpu"))};
ASSERT_STATUS_OK(SaveCheckpoint(checkpoint_state, checkpoint_path, false));
// Call Load APIs

View file

@ -86,7 +86,7 @@ void TestModuleExport(const std::vector<std::shared_ptr<IExecutionProvider>>& pr
}
onnxruntime::test::TemporaryDirectory tmp_dir{test_dir};
PathString inference_model_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("inference_model.onnx"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("inference_model.onnx"))};
std::vector<std::string> graph_output_names({"output-0"});
ASSERT_STATUS_OK(model->ExportModelForInferencing(ToUTF8String(inference_model_path), graph_output_names));

View file

@ -30,7 +30,7 @@ TEST(TrainingCApiTest, SaveCheckpoint) {
}
onnxruntime::test::TemporaryDirectory tmp_dir{test_dir};
PathString checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("new_checkpoint.ckpt"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("new_checkpoint.ckpt"))};
Ort::CheckpointState::SaveCheckpoint(checkpoint_state, checkpoint_path);
@ -59,7 +59,7 @@ TEST(TrainingCApiTest, LoadCheckpointFromBuffer) {
}
onnxruntime::test::TemporaryDirectory tmp_dir{test_dir};
PathString new_checkpoint_path{
ConcatPathComponent<PathChar>(tmp_dir.Path(), ORT_TSTR("new_checkpoint.ckpt"))};
ConcatPathComponent(tmp_dir.Path(), ORT_TSTR("new_checkpoint.ckpt"))};
Ort::CheckpointState::SaveCheckpoint(checkpoint_state, new_checkpoint_path);

View file

@ -321,7 +321,7 @@ int RunTraining(const TestRunnerParameters& params) {
// Save trained weights
std::ostringstream oss;
oss << "ckpt_" << params.model_name << std::to_string(batch_idx);
PathString ckpt_file = ConcatPathComponent<PathChar>(params.output_dir, ToPathString(oss.str()));
PathString ckpt_file = ConcatPathComponent(params.output_dir, ToPathString(oss.str()));
checkpoint_state.AddProperty("epoch", epoch);
checkpoint_state.AddProperty("loss", *loss);
checkpoint_state.AddProperty("framework", "onnxruntime");
@ -336,7 +336,7 @@ int RunTraining(const TestRunnerParameters& params) {
// Save trained weights
std::ostringstream oss;
oss << "ckpt_" << params.model_name;
PathString ckpt_file = ConcatPathComponent<PathChar>(params.output_dir, ToPathString(oss.str()));
PathString ckpt_file = ConcatPathComponent(params.output_dir, ToPathString(oss.str()));
Ort::CheckpointState::SaveCheckpoint(checkpoint_state, ckpt_file);
auto end = std::chrono::high_resolution_clock::now();