mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
make sure samples run (#2276)
Adjust instructions for imagenet. Fix compile warning. Fix input/output names. Update build/run instructions.
This commit is contained in:
parent
d1096b524f
commit
09eb8ff8b8
3 changed files with 17 additions and 10 deletions
|
|
@ -261,16 +261,16 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
if (execution_provider)
|
||||
{
|
||||
if (tcscmp(execution_provider, ORT_TSTR("cpu"))) {
|
||||
if (tcscmp(execution_provider, ORT_TSTR("cpu")) == 0) {
|
||||
// Nothing; this is the default
|
||||
} else if (tcscmp(execution_provider, ORT_TSTR("cuda"))) {
|
||||
} else if (tcscmp(execution_provider, ORT_TSTR("cuda")) == 0) {
|
||||
#ifdef USE_CUDA
|
||||
enable_cuda(session_options);
|
||||
#else
|
||||
puts("CUDA is not enabled in this build.");
|
||||
return -1;
|
||||
#endif
|
||||
} else if (tcscmp(execution_provider, ORT_TSTR("dml"))) {
|
||||
} else if (tcscmp(execution_provider, ORT_TSTR("dml")) == 0) {
|
||||
#ifdef USE_DML
|
||||
enable_dml(session_options);
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ WARNING: If you want to train the model by yourself, you need at least 500GB dis
|
|||
# Install tensorflow
|
||||
Install Python 3.x from [python.org](https://www.python.org/), then execute
|
||||
```
|
||||
pip install --upgrade tensorflow
|
||||
pip install --upgrade tensorflow==1.14
|
||||
```
|
||||
For more information, see [Install Tensorflow](https://www.tensorflow.org/install)
|
||||
|
||||
|
|
@ -36,10 +36,12 @@ tar -zxvf inception_v4_2016_09_09.tar.gz
|
|||
The [Inception V4] zip file only contains a single checkpoint file: inception_v4.ckpt. It can't be directly used for inferencing.
|
||||
You need to combine the network definition and the checkpoint. Please follow the steps below:
|
||||
|
||||
1. Export the graph
|
||||
Create an new folder. At there, execute
|
||||
1. Export the graph.
|
||||
```
|
||||
git clone https://github.com/tensorflow/models .
|
||||
git clone https://github.com/tensorflow/models
|
||||
# Copy inception_v4.ckpt into models
|
||||
cd models
|
||||
# Ignore deprecation warnings
|
||||
python research\slim\export_inference_graph.py --model_name=inception_v4 --output_file=grpah.pb
|
||||
```
|
||||
|
||||
|
|
@ -56,6 +58,12 @@ pip install --upgrade tf2onnx
|
|||
python -m tf2onnx.convert --input inception_v4.pb --inputs input:0 --outputs InceptionV4/Logits/Predictions:0 --opset 10 --output inception_v4.onnx
|
||||
```
|
||||
|
||||
You should see messages like these:
|
||||
|
||||
INFO - Successfully converted TensorFlow model inception_v4.pb to ONNX
|
||||
|
||||
INFO - ONNX model is saved at inception_v4.onnx
|
||||
|
||||
# Run the inferencing
|
||||
In your build dir of onnxruntime_samples, search for "image_classifier.exe" and run
|
||||
```
|
||||
|
|
|
|||
|
|
@ -124,11 +124,10 @@ class Validator : public OutputCollector<TCharString> {
|
|||
VerifyInputOutputCount(session_);
|
||||
Ort::AllocatorWithDefaultOptions ort_alloc;
|
||||
{
|
||||
char* t;
|
||||
session_.GetInputName(0, ort_alloc);
|
||||
char* t = session_.GetInputName(0, ort_alloc);
|
||||
input_name_ = my_strdup(t);
|
||||
ort_alloc.Free(t);
|
||||
session_.GetOutputName(0, ort_alloc);
|
||||
t = session_.GetOutputName(0, ort_alloc);
|
||||
output_name_ = my_strdup(t);
|
||||
ort_alloc.Free(t);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue