diff --git a/dockerfiles/scripts/install_common_deps.sh b/dockerfiles/scripts/install_common_deps.sh index 41bdc068d8..bbb672a99e 100644 --- a/dockerfiles/scripts/install_common_deps.sh +++ b/dockerfiles/scripts/install_common_deps.sh @@ -21,6 +21,6 @@ pip install "wheel>=0.35.1" rm -rf /opt/miniconda/pkgs # Dependencies: cmake -wget --quiet https://github.com/Kitware/CMake/releases/download/v3.30.1/cmake-3.30.1-linux-x86_64.tar.gz -tar zxf cmake-3.30.1-linux-x86_64.tar.gz -rm -rf cmake-3.30.1-linux-x86_64.tar.gz +wget --quiet https://github.com/Kitware/CMake/releases/download/v3.31.5/cmake-3.31.5-linux-x86_64.tar.gz +tar zxf cmake-3.31.5-linux-x86_64.tar.gz +rm -rf cmake-3.31.5-linux-x86_64.tar.gz diff --git a/onnxruntime/test/onnx/TestCase.cc b/onnxruntime/test/onnx/TestCase.cc index 639bad9e8b..c27eba03d4 100644 --- a/onnxruntime/test/onnx/TestCase.cc +++ b/onnxruntime/test/onnx/TestCase.cc @@ -34,115 +34,35 @@ using namespace onnxruntime; using namespace onnxruntime::common; -using google::protobuf::RepeatedPtrField; static constexpr int protobuf_block_size_in_bytes = 4 * 1024 * 1024; const std::string TestModelInfo::unknown_version = "unknown version"; namespace { +using PATH_STRING_TYPE = std::basic_string; -template -inline Ort::Value CreateTensorWithDataAsOrtValue(const Ort::MemoryInfo& info, - OrtAllocator*, - const std::vector& dims, - std::vector& input) { - return Ort::Value::CreateTensor(static_cast(info), input.data(), input.size() * sizeof(T), - dims.data(), dims.size()); -} - -template <> -inline Ort::Value CreateTensorWithDataAsOrtValue(const Ort::MemoryInfo&, - OrtAllocator* allocator, - const std::vector& dims, - std::vector& input) { - auto tensor_value = Ort::Value::CreateTensor(allocator, dims.data(), dims.size(), - ONNXTensorElementDataType::ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING); - - std::vector p_str; - for (const auto& s : input) { - p_str.push_back(s.c_str()); - } - - tensor_value.FillStringTensor(p_str.data(), p_str.size()); - return tensor_value; -} - -template -Ort::Value PbMapToOrtValue(const google::protobuf::Map& map) { - auto info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault); - Ort::AllocatorWithDefaultOptions allocator; - const size_t ele_count = map.size(); - std::vector dims(1, static_cast(ele_count)); - std::vector keys(ele_count); - std::vector values(ele_count); - size_t i = 0; - for (auto& kvp : map) { - keys[i] = kvp.first; - values[i] = kvp.second; - ++i; - } - - //// See helper above - auto ort_keys = CreateTensorWithDataAsOrtValue(info, allocator, dims, keys); - auto ort_values = CreateTensorWithDataAsOrtValue(info, allocator, dims, values); - return Ort::Value::CreateMap(ort_keys, ort_values); -} - -template -Ort::Value VectorProtoToOrtValue(const RepeatedPtrField& input) { - auto info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault); - Ort::AllocatorWithDefaultOptions allocator; - std::vector seq; - seq.reserve(input.size()); - for (const T& v : input) { - // create key tensor - const auto& map = v.v(); - size_t ele_count = map.size(); - using key_type = typename std::remove_reference::type::key_type; - using value_type = typename std::remove_reference::type::mapped_type; - std::vector dims(1, static_cast(ele_count)); - std::vector keys(ele_count); - std::vector values(ele_count); - size_t i = 0; - for (auto& kvp : map) { - keys[i] = kvp.first; - values[i] = kvp.second; - ++i; - } - - auto ort_keys = CreateTensorWithDataAsOrtValue(info, allocator, dims, keys); - auto ort_values = CreateTensorWithDataAsOrtValue(info, allocator, dims, values); - auto ort_map = Ort::Value::CreateMap(ort_keys, ort_values); - seq.push_back(std::move(ort_map)); - } - return Ort::Value::CreateSequence(seq); -} - -template -static int ExtractFileNo(const std::basic_string& name) { +static int ExtractFileNo(const std::filesystem::path& pathstr) { + PATH_STRING_TYPE name = pathstr; size_t p1 = name.rfind('.'); size_t p2 = name.rfind('_', p1); ++p2; - std::basic_string number_str = name.substr(p2, p1 - p2); - const CHAR_T* start = number_str.c_str(); - const CHAR_T* end = number_str.c_str(); - long ret = OrtStrtol(start, const_cast(&end)); + PATH_STRING_TYPE number_str = name.substr(p2, p1 - p2); + const PATH_CHAR_TYPE* start = number_str.c_str(); + const PATH_CHAR_TYPE* end = start; + long ret = OrtStrtol(start, const_cast(&end)); if (end == start) { ORT_THROW("parse file name failed"); } return static_cast(ret); } -using PATH_STRING_TYPE = std::basic_string; -static void SortFileNames(std::vector>& input_pb_files) { +static void SortFileNames(std::vector& input_pb_files) { if (input_pb_files.size() <= 1) return; std::sort(input_pb_files.begin(), input_pb_files.end(), - [](const std::basic_string& left, const std::basic_string& right) -> bool { - std::basic_string leftname = GetLastComponent(left); - std::basic_string rightname = GetLastComponent(right); - int left1 = ExtractFileNo(leftname); - int right1 = ExtractFileNo(rightname); + [](const std::filesystem::path& left, std::filesystem::path& right) -> bool { + int left1 = ExtractFileNo(left.filename()); + int right1 = ExtractFileNo(right.filename()); return left1 < right1; }); @@ -374,7 +294,7 @@ void OnnxTestCase::LoadTestData(size_t id, onnxruntime::test::HeapBuffer& b, ORT_THROW("index out of bound"); } - std::vector test_data_pb_files; + std::vector test_data_pb_files; std::filesystem::path dir_fs_path = test_data_dirs_[id]; if (!std::filesystem::exists(dir_fs_path)) return; @@ -390,7 +310,7 @@ void OnnxTestCase::LoadTestData(size_t id, onnxruntime::test::HeapBuffer& b, is_input ? ORT_TSTR("input_") : ORT_TSTR("output_"); auto filename_str = path.filename().native(); if (filename_str.compare(0, file_prefix.length(), file_prefix) == 0) { - test_data_pb_files.push_back(path.native()); + test_data_pb_files.push_back(path); } } diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 index 23a1c9a1a0..f454d21164 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 @@ -10,7 +10,7 @@ FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04 AS base # The local directory into which to build and install CMAKE ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH} +ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.31.5-linux-x86_64/bin:/opt/miniconda/bin:${PATH} ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update &&\ diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 index ff1fccae75..a9dbefc6fa 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 @@ -10,7 +10,7 @@ FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS base # The local directory into which to build and install CMAKE ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH} +ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.31.5-linux-x86_64/bin:/opt/miniconda/bin:${PATH} ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update &&\ diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino index 26d895533d..7b1e3fa677 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino @@ -34,13 +34,13 @@ RUN wget "https://github.com/intel/compute-runtime/releases/download/21.48.21782 sudo dpkg -i *.deb && rm -rf *.deb RUN mkdir -p /opt/cmake/bin && \ - wget https://github.com/Kitware/CMake/releases/download/v3.30.1/cmake-3.30.1-linux-x86_64.tar.gz && \ - tar -xf cmake-3.30.1-linux-x86_64.tar.gz --strip 1 -C /opt/cmake && rm -rf /cmake-3.30.1-linux-x86_64.tar.gz && \ + wget https://github.com/Kitware/CMake/releases/download/v3.31.5/cmake-3.31.5-linux-x86_64.tar.gz && \ + tar -xf cmake-3.31.5-linux-x86_64.tar.gz --strip 1 -C /opt/cmake && rm -rf /cmake-3.31.5-linux-x86_64.tar.gz && \ ln -sf /opt/cmake/bin/* /usr/bin ARG BUILD_UID=1000 ARG BUILD_USER=onnxruntimedev -WORKDIR /home/$BUILD_USER -RUN adduser --uid $BUILD_UID $BUILD_USER +RUN adduser --gecos 'onnxruntime Build User' --disabled-password $BUILD_USER --uid $BUILD_UID RUN adduser $BUILD_USER video USER $BUILD_USER +WORKDIR /home/$BUILD_USER diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt_bin b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt_bin index 359cd8e0bd..7dd302a6b0 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt_bin +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt_bin @@ -10,7 +10,7 @@ FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS base # The local directory into which to build and install CMAKE ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH} +ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.31.5-linux-x86_64/bin:/opt/miniconda/bin:${PATH} ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update &&\ diff --git a/tools/ci_build/github/linux/docker/rocm-ci-pipeline-env.Dockerfile b/tools/ci_build/github/linux/docker/rocm-ci-pipeline-env.Dockerfile index 1ba0a7cfc0..55c578dd2d 100644 --- a/tools/ci_build/github/linux/docker/rocm-ci-pipeline-env.Dockerfile +++ b/tools/ci_build/github/linux/docker/rocm-ci-pipeline-env.Dockerfile @@ -44,10 +44,10 @@ ENV LANG C.UTF-8 WORKDIR /stage # Cmake -ENV CMAKE_VERSION=3.30.1 +ENV CMAKE_VERSION=3.31.5 RUN cd /usr/local && \ wget -q https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz && \ - tar -zxf /usr/local/cmake-3.30.1-Linux-x86_64.tar.gz --strip=1 -C /usr + tar -zxf /usr/local/cmake-3.31.5-Linux-x86_64.tar.gz --strip=1 -C /usr # ccache RUN mkdir -p /tmp/ccache && \ diff --git a/tools/ci_build/github/linux/docker/scripts/install_os_deps.sh b/tools/ci_build/github/linux/docker/scripts/install_os_deps.sh index 87b9b960b7..eba897c7c8 100755 --- a/tools/ci_build/github/linux/docker/scripts/install_os_deps.sh +++ b/tools/ci_build/github/linux/docker/scripts/install_os_deps.sh @@ -70,18 +70,18 @@ if [[ $SYS_LONG_BIT = "64" && "$GLIBC_VERSION" -gt "9" ]]; then tar --strip 1 -xf /tmp/azcopy/azcopy.tar.gz -C /tmp/azcopy cp /tmp/azcopy/azcopy /usr/bin echo "Installing cmake" - GetFile https://github.com/Kitware/CMake/releases/download/v3.30.1/cmake-3.30.1-Linux-x86_64.tar.gz /tmp/src/cmake-3.30.1-Linux-x86_64.tar.gz - tar -zxf /tmp/src/cmake-3.30.1-Linux-x86_64.tar.gz --strip=1 -C /usr + GetFile https://github.com/Kitware/CMake/releases/download/v3.31.5/cmake-3.31.5-Linux-x86_64.tar.gz /tmp/src/cmake-3.31.5-Linux-x86_64.tar.gz + tar -zxf /tmp/src/cmake-3.31.5-Linux-x86_64.tar.gz --strip=1 -C /usr echo "Installing Node.js" # The EOL for nodejs v18.17.1 LTS is April 2025 GetFile https://nodejs.org/dist/v18.17.1/node-v18.17.1-linux-x64.tar.xz /tmp/src/node-v18.17.1-linux-x64.tar.xz tar -xf /tmp/src/node-v18.17.1-linux-x64.tar.xz --strip=1 -C /usr else echo "Installing cmake" - GetFile https://github.com/Kitware/CMake/releases/download/v3.30.1/cmake-3.30.1.tar.gz /tmp/src/cmake-3.30.1.tar.gz - tar -xf /tmp/src/cmake-3.30.1.tar.gz -C /tmp/src + GetFile https://github.com/Kitware/CMake/releases/download/v3.31.5/cmake-3.31.5.tar.gz /tmp/src/cmake-3.31.5.tar.gz + tar -xf /tmp/src/cmake-3.31.5.tar.gz -C /tmp/src pushd . - cd /tmp/src/cmake-3.30.1 + cd /tmp/src/cmake-3.31.5 ./bootstrap --prefix=/usr --parallel=$(getconf _NPROCESSORS_ONLN) --system-bzip2 --system-curl --system-zlib --system-expat make -j$(getconf _NPROCESSORS_ONLN) make install