Updating C_API end-to-end test and user samples (#564)

* Updating user sample and C_API unit test

* remove debugging info

* remove precompiled headers

* header file location changed in master...updating
This commit is contained in:
jignparm 2019-03-07 00:28:15 -08:00 committed by Pranav Sharma
parent b68079fe5d
commit 4635bcc624
5 changed files with 270 additions and 157 deletions

View file

@ -0,0 +1,148 @@
// Copyright(c) Microsoft Corporation.All rights reserved.
// Licensed under the MIT License.
//
#include <assert.h>
#include <onnxruntime_c_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
//*****************************************************************************
// helper function to check for status
#define CHECK_STATUS(expr) \
{ \
OrtStatus* onnx_status = (expr); \
if (onnx_status != NULL) { \
const char* msg = OrtGetErrorMessage(onnx_status); \
fprintf(stderr, "%s\n", msg); \
OrtReleaseStatus(onnx_status); \
exit(1); \
} \
}
int main(int argc, char* argv[]) {
//*************************************************************************
// initialize enviroment...one enviroment per process
// enviroment maintains thread pools and other state info
OrtEnv* env;
CHECK_STATUS(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env));
// initialize session options if needed
OrtSessionOptions* session_option = OrtCreateSessionOptions();
OrtSetSessionThreadPoolSize(session_option, 1);
//*************************************************************************
// create session and load model into memory
// using squeezenet version 1.3
// URL = https://github.com/onnx/models/tree/master/squeezenet
OrtSession* session;
const wchar_t* model_path = L"squeezenet.onnx";
CHECK_STATUS(OrtCreateSession(env, model_path, session_option, &session));
//*************************************************************************
// print model input layer (node names, types, shape etc.)
size_t num_input_nodes;
OrtStatus* status;
OrtAllocator* allocator;
OrtCreateDefaultAllocator(&allocator);
// print number of model input nodes
status = OrtSessionGetInputCount(session, &num_input_nodes);
std::vector<const char*> input_node_names(num_input_nodes);
std::vector<size_t> input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}.
// Otherwise need vector<vector<>>
printf("Number of inputs = %zu\n", num_input_nodes);
// iterate over all input nodes
for (int i = 0; i < num_input_nodes; i++) {
// print input node names
char* input_name;
status = OrtSessionGetInputName(session, i, allocator, &input_name);
printf("Input %d : name=%s\n", i, input_name);
input_node_names[i] = input_name;
// print input node types
OrtTypeInfo* typeinfo;
status = OrtSessionGetInputTypeInfo(session, i, &typeinfo);
const OrtTensorTypeAndShapeInfo* tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
printf("Input %d : type=%d\n", i, type);
// print input shapes/dims
size_t num_dims = OrtGetNumOfDimensions(tensor_info);
printf("Input %d : num_dims=%zu\n", i, num_dims);
input_node_dims.resize(num_dims);
OrtGetDimensions(tensor_info, (int64_t*)input_node_dims.data(), num_dims);
for (int j = 0; j < num_dims; j++)
printf("Input %d : dim %d=%jd\n", i, j, input_node_dims[j]);
OrtReleaseTypeInfo(typeinfo);
}
OrtReleaseAllocator(allocator);
// Results should be...
// Number of inputs = 1
// Input 0 : name = data_0
// Input 0 : type = 1
// Input 0 : num_dims = 4
// Input 0 : dim 0 = 1
// Input 0 : dim 1 = 3
// Input 0 : dim 2 = 224
// Input 0 : dim 3 = 224
//*************************************************************************
// Similar operations to get output node information.
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName()
// OrtSessionGetOutputTypeInfo() as shown above.
//*************************************************************************
// Score the model using sample data, and inspect values
size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
// use OrtGetTensorShapeElementCount() to get official size!
std::vector<float> input_tensor_values(input_tensor_size);
std::vector<const char*> output_node_names = {"softmaxout_1"};
// initialize input data with values in [0.0, 1.0]
for (unsigned int i = 0; i < input_tensor_size; i++)
input_tensor_values[i] = (float)i / (input_tensor_size + 1);
// create input tensor object from data values
OrtAllocatorInfo* allocator_info;
CHECK_STATUS(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info));
OrtValue* input_tensor = NULL;
CHECK_STATUS(OrtCreateTensorWithDataAsOrtValue(allocator_info, input_tensor_values.data(), input_tensor_size * sizeof(float), input_node_dims.data(), 4, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor));
assert(OrtIsTensor(input_tensor));
OrtReleaseAllocatorInfo(allocator_info);
// score model & input tensor, get back output tensor
OrtValue* output_tensor = NULL;
CHECK_STATUS(OrtRun(session, NULL, input_node_names.data(), (const OrtValue* const*)&input_tensor, 1, output_node_names.data(), 1, &output_tensor));
assert(OrtIsTensor(output_tensor));
// Get pointer to output tensor float values
float* floatarr;
OrtGetTensorMutableData(output_tensor, (void**)&floatarr);
assert(abs(floatarr[0] - 0.000045) < 1e-6);
// score the model, and print scores for first 5 classes
for (int i = 0; i < 5; i++)
printf("Score for class [%d] = %f\n", i, floatarr[i]);
// Results should be as below...
// Score for class[0] = 0.000045
// Score for class[1] = 0.003846
// Score for class[2] = 0.000125
// Score for class[3] = 0.001180
// Score for class[4] = 0.001317
OrtReleaseValue(output_tensor);
OrtReleaseValue(input_tensor);
OrtReleaseSession(session);
OrtReleaseEnv(env);
printf("Done!\n");
return 0;
}

View file

@ -0,0 +1,111 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<OnnxRuntimeCsharpRoot>$(MSBuildThisFileDirectory)..\..</OnnxRuntimeCsharpRoot>
</PropertyGroup>
<Import Project="..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props" Condition="Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props')" />
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{B8CA7F10-0171-4EA5-8662-5A9942DDF415}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MicrosoftMLOnnxRuntimeEndToEndTestsRunCapi</RootNamespace>
<WindowsTargetPlatformVersion>10.0.17763.0</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="C_Api_Sample.cpp" />
</ItemGroup>
<ItemGroup>
<None Include="$(OnnxRuntimeCSharpRoot)\testdata\*">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
<Visible>false</Visible>
</None>
<None Include="packages.config" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
<Import Project="..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets" Condition="Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets')" />
</ImportGroup>
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup>
<ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
</PropertyGroup>
<Error Condition="!Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props'))" />
<Error Condition="!Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets'))" />
</Target>
</Project>

View file

@ -36,7 +36,7 @@ for /f "delims=" %%i in ('type "%templateFile%" ^& break ^> "packages.config" ')
echo on
REM Restore NuGet Packages
nuget restore -PackagesDirectory ..\packages -Source %LocalNuGetRepo% Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.vcxproj
nuget restore -PackagesDirectory ..\packages -Source %LocalNuGetRepo% Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.vcxproj
if NOT %ERRORLEVEL% EQU 0 (
echo "Error:Nuget restore failed"
popd
@ -44,7 +44,7 @@ if NOT %ERRORLEVEL% EQU 0 (
)
REM Build Native project
msbuild Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.vcxproj
msbuild Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.vcxproj
if NOT %ERRORLEVEL% EQU 0 (
echo "Error:MSBuild failed to compile project"
popd
@ -54,7 +54,8 @@ if NOT %ERRORLEVEL% EQU 0 (
REM Run Unit Tests
pushd x64\Debug
vstest.console.exe /platform:x64 Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.dll
REM vstest.console.exe /platform:x64 Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.dll
.\Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.exe
if NOT %ERRORLEVEL% EQU 0 (
echo "Unit test failure: %ERRORLEVEL%"
popd

View file

@ -4,6 +4,12 @@ The ONNX runtime provides a C# .Net binding for running inference on ONNX models
## NuGet Package
The Microsoft.ML.OnnxRuntime Nuget package includes the precompiled binaries for ONNX runtime, and includes libraries for Windows and Linux platforms with X64 CPUs. The APIs conform to .Net Standard 1.1.
## Sample Code
The unit tests contain several examples of loading models, inspecting input/output node shapes and types, as well as constructing tensors for scoring.
* [../csharp/test/Microsoft.ML.OnnxRuntime.Tests/InferenceTest.cs#L54](../csharp/test/Microsoft.ML.OnnxRuntime.Tests/InferenceTest.cs#L54)
## Getting Started
Here is simple tutorial for getting started with running inference on an existing ONNX model for a given input data. The model is typically trained using any of the well-known training frameworks and exported into the ONNX format. To start scoring using the model, open a session using the `InferenceSession` class, passing in the file path to the model as a parameter.

View file

@ -25,158 +25,5 @@
The example below shows a sample run using the SqueezeNet model from ONNX model zoo, including dynamically reading model inputs, outputs, shape and type information, as well as running a sample vector and fetching the resulting class probabilities for inspection.
```c
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
//
#include <assert.h>
#include <core/session/onnxruntime_c_api.h>
#include <core/providers/cpu/cpu_provider_factory.h>
#include <stdlib.h>
#include <stdio.h>
//*****************************************************************************
// helper function to check for status
#define CHECK_STATUS(expr) \
do { \
OrtStatus* onnx_status = (expr); \
if (onnx_status != NULL) { \
const char* msg = OrtGetErrorMessage(onnx_status); \
fprintf(stderr, "%s\n", msg); \
OrtReleaseStatus(onnx_status); \
abort(); \
} \
} while (0);
int main(int argc, char *argv[])
{
//*************************************************************************
// initialize enviroment...one enviroment per process
// enviroment maintains thread pools and other state info
OrtEnv* env;
CHECK_STATUS(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env));
// initialize session options if needed
OrtSessionOptions* session_option = OrtCreateSessionOptions();
OrtSetSessionThreadPoolSize(session_option, 1);
//*************************************************************************
// create session and load model into memory
// using squeezenet version 1.3
// URL = https://github.com/onnx/models/tree/master/squeezenet
OrtSession* session;
const wchar_t * model_path = L"model.onnx";
CHECK_STATUS(OrtCreateSession(env, model_path, session_option, &session));
//*************************************************************************
// print model input layer (node names, types, shape etc.)
size_t num_inputs;
OrtStatus* status;
OrtAllocator* allocator;
OrtCreateDefaultAllocator(&allocator);
// print number of model input nodes
status = OrtSessionGetInputCount(session, &num_inputs);
char **input_names = (char**)malloc(num_inputs * sizeof(char*));
printf("Number of inputs = %zu\n", num_inputs);
// iterate over all input nodes
for (int i = 0; i < num_inputs; i++)
{
// print input node names
char* input_name;
status = OrtSessionGetInputName(session, i, allocator, &input_name);
printf("Input %d : name=%s\n", i, input_name);
input_names[i] = input_name;
// print input node types
OrtTypeInfo* typeinfo;
status = OrtSessionGetInputTypeInfo(session, i, &typeinfo);
const OrtTensorTypeAndShapeInfo* tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
printf("Input %d : type=%d\n", i, type);
// print input shapes
size_t num_dims = OrtGetNumOfDimensions(tensor_info);
int64_t* dims = (int64_t*)malloc(num_dims * sizeof(int64_t));
printf("Input %d : num_dims=%zu\n", i, num_dims);
OrtGetDimensions(tensor_info, dims, num_dims);
for (int j = 0; j < num_dims; j++)
printf("Input %d : dim %d=%jd\n", i, j, dims[j]);
OrtReleaseTypeInfo(typeinfo);
}
OrtReleaseAllocator(allocator);
// Results should be...
// Number of inputs = 1
// Input 0 : name = data_0
// Input 0 : type = 1
// Input 0 : num_dims = 4
// Input 0 : dim 0 = 1
// Input 0 : dim 1 = 3
// Input 0 : dim 2 = 224
// Input 0 : dim 3 = 224
//*************************************************************************
// Similar operations to get output node information.
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName()
// OrtSessionGetOutputTypeInfo() as shown above.
//*************************************************************************
// Score the model using sample data, and inspect values
size_t input_dims[] = { 1, 3, 224, 224 };
size_t input_count = 3 * 224 * 224; // input tensor count = product of dims
float* input_data = (float *) malloc(sizeof(float) * input_count);
const char* output_names[] = { "softmaxout_1"};
// initialize input data with values in [0.0, 1.0]
for (unsigned int i = 0; i < input_count; i++)
input_data[i] = (float)i / (float)(input_count + 1);
// create input tensor object from data values
OrtAllocatorInfo* allocator_info;
CHECK_STATUS(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info));
OrtValue* input_tensor = NULL;
CHECK_STATUS(OrtCreateTensorWithDataAsOrtValue(allocator_info, input_data, input_count * sizeof(float), input_dims, 4, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor));
assert(OrtIsTensor(input_tensor));
OrtReleaseAllocatorInfo(allocator_info);
// score model & input tensor, get back output tensor
OrtValue* output_tensor = NULL;
CHECK_STATUS(OrtRun(session, NULL, input_names, (const OrtValue* const*)&input_tensor, 1, output_names, 1, &output_tensor));
assert(OrtIsTensor(output_tensor));
// copy output tensor values to float array
// model produces scores for 1000 classes
float* floatarr = (float *) malloc(1000 * sizeof(float));
OrtGetTensorMutableData(output_tensor, (void **) &floatarr);
// score the model, and print scores for first 5 classes
for (int i = 0; i < 5; i++)
printf("Score for class [%d] = %f\n", i, floatarr[i]);
// Results should be as below...
// Score for class[0] = 0.000045
// Score for class[1] = 0.003846
// Score for class[2] = 0.000125
// Score for class[3] = 0.001180
// Score for class[4] = 0.001317
free(input_data);
OrtReleaseValue(output_tensor);
OrtReleaseValue(input_tensor);
OrtReleaseEnv(env);
printf("Done!\n");
return 0;
}
* [../csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests.Capi/C_Api_Sample.cpp](../csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests.Capi/C_Api_Sample.cpp)