[OpenVINO-EP] Update hardware branding of VAD-R as VAD-M (#1552)

Replaces all occurrences of VAD-R/VAD_R with VAD-M/VAD_M.
Aligns with the official hardware branding.
This commit is contained in:
S. Manohar Karlapalem 2019-08-05 15:28:46 -07:00 committed by jywu-msft
parent ceb8f1c1a2
commit 05bbb3065c
9 changed files with 23 additions and 23 deletions

View file

@ -191,7 +191,7 @@ The OpenVINO Execution Provider can be built using the following commands:
| <code>GPU_FP32</code> | Intel<sup>®</sup> Integrated Graphics |
| <code>GPU_FP16</code> | Intel<sup>®</sup> Integrated Graphics with FP16 quantization of models |
| <code>MYRIAD_FP16</code> | Intel<sup>®</sup> Movidius<sup>TM</sup> USB sticks | 
| <code>VAD-R_FP16</code> | Intel<sup>®</sup> Vision Accelerator Design based on 8 Movidius<sup>TM</sup> MyriadX VPUs |
| <code>VAD-M_FP16</code> | Intel<sup>®</sup> Vision Accelerator Design based on 8 Movidius<sup>TM</sup> MyriadX VPUs |
For more information on OpenVINO Execution Provider&#39;s ONNX Layer support, Topology support, and Intel hardware enabled, please refer to the document OpenVINO-ExecutionProvider.md in <code>$onnxruntime_root/docs/execution_providers</code>

View file

@ -542,8 +542,8 @@ if(onnxruntime_USE_OPENVINO)
add_definitions(-DOPENVINO_CONFIG_CPU_FP32=1)
endif()
if(onnxruntime_USE_OPENVINO_VAD_R)
add_definitions(-DOPENVINO_CONFIG_VAD_R=1)
if(onnxruntime_USE_OPENVINO_VAD_M)
add_definitions(-DOPENVINO_CONFIG_VAD_M=1)
endif()
endif()

View file

@ -102,7 +102,7 @@
| <code>GPU_FP32</code> |Intel<sup></sup> Integrated Graphics |
| <code>GPU_FP16</code> | Intel<sup></sup> Integrated Graphics |
| <code>MYRIAD_FP16</code> | Intel<sup></sup> Movidius<sup>TM</sup> USB sticks |
| <code>VAD-R_FP16</code> | Intel<sup></sup> Vision Accelerator Design based on Movidius<sup>TM</sup> MyriadX VPUs |
| <code>VAD-M_FP16</code> | Intel<sup></sup> Vision Accelerator Design based on Movidius<sup>TM</sup> MyriadX VPUs |
## CPU Version
@ -155,12 +155,12 @@
docker run -it --network host --privileged -v /dev:/dev onnxruntime-myriad:latest
```
## VAD-R Accelerator Version
## VAD-M Accelerator Version
1. Retrieve your docker image in one of the following ways.
- Build the docker image from the DockerFile in this repository.
```
docker build -t onnxruntime-vadr --build-arg DEVICE=VAD-R_FP16 --network host .
docker build -t onnxruntime-vadr --build-arg DEVICE=VAD-M_FP16 --network host .
```
- Pull the official image from DockerHub.
```

View file

@ -83,9 +83,9 @@ Below topologies are supported from ONNX open model zoo using OpenVINO Execution
|TinyYOLOv2 | Yes | Yes | Yes
| ResNet101\_DUC\_HDC | Yes | No | No
# Application code changes for VAD-R performance scaling
# Application code changes for VAD-M performance scaling
VAD-R has 8 VPUs and is suitable for applications that require multiple inferences to run in parallel. We use batching approach for performance scaling on VAD-R.
VAD-M has 8 VPUs and is suitable for applications that require multiple inferences to run in parallel. We use batching approach for performance scaling on VAD-M.
Below python code snippets provide sample classification code to batch input images, load a model and process the output results.

View file

@ -453,7 +453,7 @@ std::vector<std::unique_ptr<ComputeCapability>> OpenVINOExecutionProvider::GetCa
device_id = "MYRIAD";
#endif
#ifdef OPENVINO_CONFIG_VAD_R
#ifdef OPENVINO_CONFIG_VAD_M
precision_fp32 = false;
device_id = "HDDL";
#endif

View file

@ -50,7 +50,7 @@ OpenVINOGraph::OpenVINOGraph(const onnxruntime::Node* fused_node) {
precision_ = InferenceEngine::Precision::FP16;
precision_str = "FP16";
#endif
#ifdef OPENVINO_CONFIG_VAD_R
#ifdef OPENVINO_CONFIG_VAD_M
device_id_ = "HDDL";
precision_ = InferenceEngine::Precision::FP16;
precision_str = "FP16";
@ -65,8 +65,8 @@ OpenVINOGraph::OpenVINOGraph(const onnxruntime::Node* fused_node) {
// operations associated with the Infer Requests may be scheduled in parallel.
// Infer Requests hold resources representing the entire network on their target hardware. So,
// having more Infer Requests than needed would waste system resources.
// In VAD-R (HDDL) accelerator, there are 8 parallel execution units. So, creating 8 instances
// of Infer Requests only if the VAD-R accelerator is being used.
// In VAD-M (HDDL) accelerator, there are 8 parallel execution units. So, creating 8 instances
// of Infer Requests only if the VAD-M accelerator is being used.
// sets number of maximum parallel inferences
num_inf_reqs_ = (device_id_ == "HDDL") ? 8 : 1;

View file

@ -34,8 +34,8 @@ void TestUnaryElementwiseOp(const char* szOp, std::vector<float>& input_vals,
excluded_providers.insert(kTensorrtExecutionProvider);
}
//Disabled because of accuracy issues for MYRIAD FP16 and VAD_R
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R)
//Disabled because of accuracy issues for MYRIAD FP16 and VAD_M
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M)
int relu = strcmp(szOp, "Relu");
int leaky = strcmp(szOp, "LeakyRelu");
if(relu == 0 || leaky == 0){

View file

@ -42,7 +42,7 @@ TEST(MathOpTest, Add_float) {
0.0f, 5.0f, -36.0f,
-10.8f, 18.6f, 0.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy mismatch for FP16
#else
test.Run();
@ -159,7 +159,7 @@ TEST(MathOpTest, Add_Broadcast_2x1x4_1x3x1) {
221.0f, 222.0f, 223.0f, 224.0f,
231.0f, 232.0f, 233.0f, 234.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M)
//OpenVINO: Disabled due to software limitation for VPU Plugin.
//This test runs fine on CPU and GPU Plugins
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider});
@ -185,7 +185,7 @@ TEST(MathOpTest, Add_Broadcast_2x1x1_3x4) {
211.0f, 212.0f, 213.0f, 214.0f,
221.0f, 222.0f, 223.0f, 224.0f,
231.0f, 232.0f, 233.0f, 234.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M)
//OpenVINO: Disabled due to software limitation for VPU Plugin.
//This test runs fine on CPU and GPU Plugins
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider});
@ -275,7 +275,7 @@ TEST(MathOpTest, Mul) {
0.0f, 5.25f, -6'400.0f,
29.16f, 86.49f, -100'000'000.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy issues for MYRIAD FP16
#else
test.Run();
@ -537,7 +537,7 @@ TEST(MathOpTest, Sum_6) {
-6.0f, 6.6f, 28.0f,
-1.0f, 0.06f, 0.25f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy mismatch for FP16
#else
test.Run();
@ -561,7 +561,7 @@ TEST(MathOpTest, Sum_8_Test1) {
311.0f, 312.0f, 313.0f,
321.0f, 322.0f, 323.0f,
331.0f, 332.0f, 333.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M)
//OpenVINO: Disabled due to software limitation for VPU Plugin.
//This test runs fine on CPU and GPU Plugins
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider});
@ -596,7 +596,7 @@ TEST(MathOpTest, Sum_8_Test2) {
3.3f, 4.4f, -94.7f,
59.6f, 64.01f, -8.0f});
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R)
#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M)
//OpenVINO: Disabled due to software limitation for VPU Plugin.
//This test runs fine on CPU and GPU Plugins
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider});

View file

@ -129,7 +129,7 @@ Use the individual flags to only run the specified stages.
parser.add_argument("--use_mklml", action='store_true', help="Build with MKLML.")
parser.add_argument("--use_ngraph", action='store_true', help="Build with nGraph.")
parser.add_argument("--use_openvino", nargs="?", const="CPU_FP32",
choices=["CPU_FP32","GPU_FP32","GPU_FP16","VAD-R_FP16","MYRIAD_FP16"], help="Build with OpenVINO for specific hardware.")
choices=["CPU_FP32","GPU_FP32","GPU_FP16","VAD-M_FP16","MYRIAD_FP16"], help="Build with OpenVINO for specific hardware.")
parser.add_argument("--use_dnnlibrary", action='store_true', help="Build with DNNLibrary.")
parser.add_argument("--use_nsync", action='store_true', help="Build with NSYNC.")
parser.add_argument("--use_preinstalled_eigen", action='store_true', help="Use pre-installed eigen.")
@ -340,7 +340,7 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + ("ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + ("ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + ("ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_R=" + ("ON" if args.use_openvino == "VAD-R_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + ("ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_NNAPI=" + ("ON" if args.use_dnnlibrary else "OFF"),
"-Donnxruntime_USE_OPENMP=" + ("ON" if args.use_openmp and not args.use_dnnlibrary and not args.use_mklml and not args.use_ngraph else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_tvm else "OFF"),