diff --git a/docs/TVM_EP.md b/docs/TVM_EP.md
index 8a34d75635..4deec4817f 100644
--- a/docs/TVM_EP.md
+++ b/docs/TVM_EP.md
@@ -146,7 +146,7 @@ To verify the CUDA installation use `nvcc --version` in cmd.
-Build ONNX Runtime with TVM Execution Provider from source:
+#### **Build ONNX Runtime with TVM Execution Provider from source (Python):**
- Use command line and clone sources from github:
```cmd
git clone --recursive https://github.com/Microsoft/onnxruntime
@@ -183,8 +183,24 @@ print(onnxruntime.get_available_providers())
pip uninstall onnxruntime-tvm
```
+#### **Build ONNX Runtime with TVM Execution Provider from source (C#):**
+- Use command line and clone sources from github:
+```cmd
+git clone --recursive https://github.com/Microsoft/onnxruntime
+cd onnxruntime
+```
+- CPU build:
+
+Make sure you download [nuget.exe](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools#nugetexe-cli) and add path to it into `PATH` env.
+```
+build.bat --config Release --build_nuget --skip_tests --parallel --use_tvm --skip_onnx_tests --cmake_generator "Visual Studio 17 2022" --llvm_config llvm-config.exe
+```
+- Install C# nuget package for TVM EP. Default path to the package is `\build\Windows\Release\Release`.
+
+
## Configuration options
TVM Executor Provider can be configured with the following provider options:
+1. Python
```python
po = [dict(executor=tvm_executor_type,
so_folder=folder_with_pretuned_files,
@@ -201,6 +217,31 @@ po = [dict(executor=tvm_executor_type,
input_shapes = input_shapes_str)]
tvm_session = onnxruntime.InferenceSession(model_path, providers=["TvmExecutionProvider"], provider_options=po)
```
+
+2. C#
+
+Currently, only precompiled models are supported in C# (see the related section below).
+
+```CSharp
+SessionOptions session_options = new SessionOptions{};
+string tvm_ep_options =
+ $"executor: {tvm_executor_type}, " +
+ $"so_folder: {folder_with_pretuned_files}, " +
+ $"check_hash: {check_hash}, " +
+ $"hash_file_path: {hash_file_path}, " +
+ $"target: {client_target}, " +
+ $"target_host: {client_target_host}, " +
+ $"opt_level: {client_opt_level}, " +
+ $"freeze_weights: {freeze}, " +
+ $"to_nhwc: {layout_transform}, " +
+ $"tuning_type: {tvm_optimizer_type}, " +
+ $"tuning_file_path: {client_tuning_logfile}, " +
+ $"input_names: {input_names_str}, " +
+ $"input_shapes: {input_shapes_str}";
+
+session_options.AppendExecutionProvider_Tvm(tvm_ep_options);
+using var tvm_session = new InferenceSession(modelFilePath, session_options);
+```
- `executor` is executor type used by TVM. There is choice between two types: GraphExecutor and VirtualMachine which are corresponded to "graph" and "vm" tags. VirtualMachine is used by default.
diff --git a/onnxruntime/core/providers/tvm/tvm_ep_options.cc b/onnxruntime/core/providers/tvm/tvm_ep_options.cc
index c6343e6024..1d701dc11d 100644
--- a/onnxruntime/core/providers/tvm/tvm_ep_options.cc
+++ b/onnxruntime/core/providers/tvm/tvm_ep_options.cc
@@ -31,6 +31,9 @@ constexpr const char* kInputShapes = "input_shapes";
static const std::unordered_set valid_keys {
std::string{kExecutor},
+ std::string{kSoFolder},
+ std::string{kCheckHash},
+ std::string{kHashFilePath},
std::string{kTarget},
std::string{kTargetHost},
std::string{kOptLevel},
diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py
index 666a0af315..79314f3451 100644
--- a/tools/ci_build/build.py
+++ b/tools/ci_build/build.py
@@ -2116,6 +2116,7 @@ def build_nuget_package(
elif use_cuda:
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Gpu"'
elif use_tvm:
+ execution_provider = '/p:ExecutionProvider="tvm"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Tvm"'
else:
# use the solution file that includes Xamarin mobile targets
@@ -2157,7 +2158,7 @@ def build_nuget_package(
run_subprocess(cmd_args, cwd=csharp_build_dir)
if is_windows():
- if use_openvino:
+ if use_openvino or use_tvm:
# user needs to make sure nuget is installed and added to the path variable
nuget_exe = "nuget.exe"
else:
diff --git a/tools/nuget/generate_nuspec_for_native_nuget.py b/tools/nuget/generate_nuspec_for_native_nuget.py
index 67227de1c6..ca84e8ada1 100644
--- a/tools/nuget/generate_nuspec_for_native_nuget.py
+++ b/tools/nuget/generate_nuspec_for_native_nuget.py
@@ -119,7 +119,7 @@ def parse_arguments():
required=False,
default="None",
type=str,
- choices=["cuda", "dnnl", "openvino", "tensorrt", "snpe", "None"],
+ choices=["cuda", "dnnl", "openvino", "tensorrt", "snpe", "tvm", "None"],
help="The selected execution provider for this build.",
)
parser.add_argument("--dependency_id", required=False, default="None", type=str, help="ependency id.")
@@ -335,6 +335,7 @@ def generate_files(list, args):
"tensorrt_ep_shared_lib": "onnxruntime_providers_tensorrt.dll",
"openvino_ep_shared_lib": "onnxruntime_providers_openvino.dll",
"cuda_ep_shared_lib": "onnxruntime_providers_cuda.dll",
+ "tvm_ep_shared_lib": "onnxruntime_providers_tvm.lib",
"onnxruntime_perf_test": "onnxruntime_perf_test.exe",
"onnx_test_runner": "onnx_test_runner.exe",
}
@@ -387,6 +388,14 @@ def generate_files(list, args):
+ '" target="build\\native\\include" />'
)
+ if args.execution_provider == "tvm":
+ files_list.append(
+ "'
+ )
+
if args.execution_provider == "openvino":
files_list.append(
"'
)
+ if args.execution_provider == "tvm":
+ files_list.append(
+ "'
+ )
+ files_list.append(
+ "'
+ )
+
+ tvm_build_path = os.path.join(args.ort_build_path, args.build_config, "_deps", "tvm-build")
+ if is_windows():
+ files_list.append(
+ "'
+ )
+ else:
+ # TODO(agladyshev): Add support for Linux.
+ raise RuntimeError("Now only Windows is supported for TVM EP.")
+
if args.execution_provider == "openvino":
openvino_path = get_env_var("INTEL_OPENVINO_DIR")
files_list.append(