onnxruntime/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests.Capi/InferenceTestCapi.cpp
shahasad a4a459477a
Windows packaging build pipeline for C-api packages (CPU and GPU) (#535)
* added packaging pipeline

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* put the c-api header file at root instead of under core/session

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* Update win-ci-pipeline.yml for Azure Pipelines

* parameterize the windows build script

* Update win-package-pipeline.yml for Azure Pipelines

* fixed indenting

* fixed indenting

* fix parameter reference syntax

* try using arch = amd64 for the vcvarsall

* remove duplicate tasks

* use vcvarsall

* some more refactor

* fix typo

* fix typo

* factored out the packaging step into a template

* add x86 build to package pipeline

* use amd64 for vcvars arg

* added gpu pipeline. added msbuild platform param

* fix the msbuild platform

* use amd64 host for x86 build

* use buildarch=x86 for vcvarsall

* remove vcvars from setup steps

* add some logging for PNG lib, and disable fns_candy demo for win32

* set allocator alignment to 32 bit for win32 compiler

* disable parallel execution test for x86

* use 64 bit toolchain for x86 build

* add missing -T flag for toolset

* fix string delimietr in workingdirectory name for package build test step

* fix gpu pipeline

* make io_types test conditional

* use cuda 10 instead of cuda 9.1, similar to the ci build

* try some workaround on the io test

* undo inadvertent local change in build.py, also reenable the io test

* make all test run single threaded

* blacklist few failing tests for x86

* added some log in build.py

* edit build.py to disable parallel test

* add the failed tests into the blacklist for win32

* add tf_pasnet_large to blacklist

* change control flow for build.py onnx tests

* add README, license and TPN to the package

* updated build.py test sequence for parallel executor

* updated onnx test flow as per review comment

* add type checking log in the compare_mlvalue

* fix type cast

* blacklist some failed test as of now

* one more blacklisted test
2019-03-05 18:12:02 -08:00

94 lines
3.6 KiB
C++

// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
//
#include "CppUnitTest.h"
#include <assert.h>
#include <onnxruntime_c_api.h>
wchar_t* GetWideString(const char* c) {
const size_t cSize = strlen(c) + 1;
wchar_t* wc = new wchar_t[cSize];
mbstowcs(wc, c, cSize);
return wc;
}
#define ORT_ABORT_ON_ERROR(expr) \
{ \
OrtStatus* onnx_status = (expr); \
if (onnx_status != NULL) { \
const char* msg = OrtGetErrorMessage(onnx_status); \
fprintf(stderr, "%s\n", msg); \
OrtReleaseStatus(onnx_status); \
wchar_t* wmsg = GetWideString(msg); \
Assert::Fail(L"Failed on ORT_ABORT_ON_ERROR"); \
free(wmsg); \
} \
}
using namespace Microsoft::VisualStudio::CppUnitTestFramework;
namespace UnitTest1
{
TEST_CLASS(UnitTest1)
{
public:
int run_inference(OrtSession* session) {
size_t input_height = 224;
size_t input_width = 224;
float* model_input = (float *) malloc (sizeof (float) * 224 * 224 * 3);
size_t model_input_ele_count = 224 * 224 * 3;
// initialize to values between 0.0 and 1.0
for (unsigned int i = 0; i < model_input_ele_count; i++)
model_input[i] = (float)i / (float)(model_input_ele_count + 1);
OrtAllocatorInfo* allocator_info;
ORT_ABORT_ON_ERROR(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info));
const size_t input_shape[] = { 1, 3, 224, 224 };
const size_t input_shape_len = sizeof(input_shape) / sizeof(input_shape[0]);
const size_t model_input_len = model_input_ele_count * sizeof(float);
OrtValue* input_tensor = NULL;
ORT_ABORT_ON_ERROR(OrtCreateTensorWithDataAsOrtValue(allocator_info, model_input, model_input_len, input_shape, input_shape_len, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor));
assert(input_tensor != NULL);
assert(OrtIsTensor(input_tensor));
OrtReleaseAllocatorInfo(allocator_info);
const char* input_names[] = { "data_0" };
const char* output_names[] = { "softmaxout_1" };
OrtValue* output_tensor = NULL;
ORT_ABORT_ON_ERROR(OrtRun(session, NULL, input_names, (const OrtValue* const*)&input_tensor, 1, output_names, 1, &output_tensor));
assert(output_tensor != NULL);
assert(OrtIsTensor(output_tensor));
OrtReleaseValue(output_tensor);
OrtReleaseValue(input_tensor);
free(model_input);
return 0;
}
int test()
{
const wchar_t * model_path = L"squeezenet.onnx";
OrtEnv* env;
ORT_ABORT_ON_ERROR(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env));
OrtSessionOptions* session_option = OrtCreateSessionOptions();
OrtSession* session;
OrtSetSessionThreadPoolSize(session_option, 1);
ORT_ABORT_ON_ERROR(OrtCreateSession(env, model_path, session_option, &session));
OrtReleaseSessionOptions(session_option);
int result = run_inference(session);
OrtReleaseSession(session);
OrtReleaseEnv(env);
}
TEST_METHOD(TestMethod1)
{
int res = test();
Assert::AreEqual(res, 0);
}
};
}