mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-15 20:50:42 +00:00
* remove onnx-tensorrt submodule * add new onnx-tensorrt submodule (experiment) for trt6 * update engine build for trt6 * update compile and compute for tensorrt6.0 * Update tensorrt_execution_provider.cc * Update tensorrt_execution_provider.cc * Update tensorrt_execution_provider.cc * Update tensorrt_execution_provider.cc * switch to onnx-tensorrt master for TensorRT6' * Update tensorrt_execution_provider.cc * Handle dynamic batch size and add memcpy in TensorRT EP * update test cases * Update tensorrt_execution_provider.cc * update onnx-tensorrt submodule * Update Dockerfile.ubuntu_tensorrt * Update Dockerfile.ubuntu_tensorrt * Update run_dockerbuild.sh * Update run_dockerbuild.sh * Update install_ubuntu.sh * Update concat_op_test.cc * Update tensorrt_execution_provider.cc * Upgrade TensorRT to version 6.0.1.5 * Update onnxruntime_providers.cmake * Update CMakeLists.txt * Update reduction_ops_test.cc * Update install_ubuntu.sh * Update Dockerfile.ubuntu_tensorrt * Update Dockerfile.tensorrt * Update BUILD.md * Update run_dockerbuild.sh * Update install_ubuntu.sh * Update onnxruntime_providers.cmake * Update install_ubuntu.sh * Update install_ubuntu.sh * Update gemm_test.cc * Update gather_op_test.cc * Update CMakeLists.txt * Removed submodule * update onnx-tensorrt submodule * Add Ubuntu18.04 build option * Add Ubuntu18.04 build option * Add Ubuntu18.04 build option * Add Ubuntu18.04 build option * Remove redundency * Fix issue that it does not add memcopy node correctly if some nodes fall back to CUDA EP. e.g. after partition, there's TRT_Node -> Cuda_node (with CPU memory expected), we still need to add memcpy node between them. * update for Trt Windows build * Update onnxruntime_providers.cmake * Disable opset11 tests on TensorRT * Update pad_test.cc * Update build.py * update scripts for ubuntu18.04 * Disable warning for Windows build
29 lines
1.5 KiB
Text
29 lines
1.5 KiB
Text
# --------------------------------------------------------------
|
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
# Licensed under the MIT License.
|
|
# --------------------------------------------------------------
|
|
# Dockerfile to run ONNXRuntime with TensorRT integration
|
|
|
|
# nVidia TensorRT Base Image
|
|
FROM nvcr.io/nvidia/tensorrt:19.09-py3
|
|
MAINTAINER Vinitra Swamy "viswamy@microsoft.com"
|
|
|
|
ARG ONNXRUNTIME_REPO=https://github.com/Microsoft/onnxruntime
|
|
ARG ONNXRUNTIME_SERVER_BRANCH=master
|
|
|
|
RUN apt-get update &&\
|
|
apt-get install -y sudo git bash
|
|
|
|
WORKDIR /code
|
|
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/code/cmake-3.14.3-Linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
|
|
|
# Prepare onnxruntime repository & build onnxruntime with TensorRT
|
|
RUN git clone --single-branch --branch ${ONNXRUNTIME_SERVER_BRANCH} --recursive ${ONNXRUNTIME_REPO} onnxruntime &&\
|
|
/bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh &&\
|
|
cp onnxruntime/dockerfiles/LICENSE-IMAGE.txt /code/LICENSE-IMAGE.txt &&\
|
|
cp onnxruntime/ThirdPartyNotices.txt /code/ThirdPartyNotices.txt &&\
|
|
cd onnxruntime &&\
|
|
/bin/sh ./build.sh --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /workspace/tensorrt --config Release --build_wheel --update --build --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) &&\
|
|
pip install /code/onnxruntime/build/Linux/Release/dist/*.whl &&\
|
|
cd .. &&\
|
|
rm -rf onnxruntime cmake-3.14.3-Linux-x86_64
|