Remove unused code in tools/ci_build/github/azure-pipelines/nuget/templates/gpu.yml

This commit is contained in:
Changming Sun 2021-08-30 11:10:00 -07:00
parent 7cd46cb9c4
commit 6df4e293ff

View file

@ -1,31 +1,7 @@
parameters:
DoEsrp: 'false'
PackageName: 'Microsoft.ML.OnnxRuntime.Gpu'
jobs:
- template: ../../templates/win-ci-2019.yml
parameters:
AgentPool : 'onnxruntime-gpu-winbuild'
ArtifactName: 'drop-nuget'
JobName: 'Windows_CI_GPU_CUDA_Dev'
BuildCommand: --build_dir $(Build.BinariesDirectory) --skip_submodule_sync --build_shared_lib --enable_onnx_tests --use_telemetry --cmake_generator "Visual Studio 16 2019" --use_cuda --cuda_version=11.4 --cuda_home="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4" --cudnn_home="C:\local\cudnn-11.4-windows-x64-v8.2.2.26\cuda" --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=37;50;52;60;61;70;75;80"
BuildArch: 'x64'
msbuildArchitecture: 'amd64'
EnvSetupScript: 'setup_env_cuda_11.bat'
sln_platform: 'x64'
DoDebugBuild: 'false'
DoNugetPack : 'true'
DoCompliance: 'false'
DoEsrp: ${{ parameters.DoEsrp }}
CudaVersion: '11.4'
OrtPackageId: 'Microsoft.ML.OnnxRuntime.Gpu'
NuPackScript: |
msbuild $(Build.SourcesDirectory)\csharp\OnnxRuntime.CSharp.proj /p:Configuration=RelWithDebInfo /t:CreatePackage /p:OrtPackageId=Microsoft.ML.OnnxRuntime.Gpu
copy $(Build.SourcesDirectory)\csharp\src\Microsoft.ML.OnnxRuntime\bin\RelWithDebInfo\*.nupkg $(Build.ArtifactStagingDirectory)
copy $(Build.BinariesDirectory)\RelWithDebInfo\RelWithDebInfo\*.nupkg $(Build.ArtifactStagingDirectory)
mkdir $(Build.ArtifactStagingDirectory)\testdata
copy $(Build.BinariesDirectory)\RelWithDebInfo\RelWithDebInfo\custom_op_library.* $(Build.ArtifactStagingDirectory)\testdata
- template: ../../templates/win-ci-2019.yml
parameters:
AgentPool : 'onnxruntime-gpu-winbuild'
@ -115,63 +91,17 @@ jobs:
mkdir $(Build.ArtifactStagingDirectory)\testdata
copy $(Build.BinariesDirectory)\RelWithDebInfo\RelWithDebInfo\custom_op_library.* $(Build.ArtifactStagingDirectory)\testdata
- job: 'Linux_CI_GPU_Dev'
workspace:
clean: all
timeoutInMinutes: 120
pool: 'Onnxruntime-Linux-GPU'
steps:
- template: ../../templates/set-version-number-variables-step.yml
- template: ../../templates/get-docker-image-steps.yml
parameters:
Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.manylinux2014_cuda11
Context: tools/ci_build/github/linux/docker
DockerBuildArgs: "--network=host --build-arg POLICY=manylinux2014 --build-arg PLATFORM=x86_64 --build-arg BASEIMAGE=nvidia/cuda:11.4.0-cudnn8-devel-centos7 --build-arg DEVTOOLSET_ROOTPATH=/opt/rh/devtoolset-10/root --build-arg PREPEND_PATH=/opt/rh/devtoolset-10/root/usr/bin: --build-arg LD_LIBRARY_PATH_ARG=/opt/rh/devtoolset-10/root/usr/lib64:/opt/rh/devtoolset-10/root/usr/lib:/opt/rh/devtoolset-10/root/usr/lib64/dyninst:/opt/rh/devtoolset-10/root/usr/lib/dyninst:/usr/local/lib64 --build-arg BUILD_UID=$( id -u )"
Repository: onnxruntimecuda11build
- task: CmdLine@2
inputs:
script: |
mkdir -p $HOME/.onnx
docker run --gpus all -e CC=/opt/rh/devtoolset-10/root/usr/bin/cc -e CXX=/opt/rh/devtoolset-10/root/usr/bin/c++ -e CFLAGS="-Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -fstack-clash-protection -fcf-protection -O3 -Wl,--strip-all" -e CXXFLAGS="-Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -fstack-clash-protection -fcf-protection -O3 -Wl,--strip-all" -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimecuda11build \
/bin/bash -c "/opt/python/cp37-cp37m/bin/python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_cuda --cuda_version=11.4 --cuda_home=/usr/local/cuda-11.4 --cudnn_home=/usr/local/cuda-11.4 --enable_onnx_tests --cmake_extra_defines CMAKE_CUDA_HOST_COMPILER=/opt/rh/devtoolset-10/root/usr/bin/cc 'CMAKE_CUDA_ARCHITECTURES=37;50;52;60;61;70;75;80' && cd /build/Release && make install DESTDIR=/build/linux-x64"
- script: |
set -e -x
mv $(Build.BinariesDirectory)/linux-x64/usr/local/lib64 $(Build.BinariesDirectory)/linux-x64/linux-x64
cd $(Build.BinariesDirectory)/linux-x64
zip -r linux-x64.zip linux-x64
cp $(Build.BinariesDirectory)/linux-x64/linux*.zip $(Build.ArtifactStagingDirectory)
mkdir $(Build.ArtifactStagingDirectory)/testdata
cp $(Build.BinariesDirectory)/Release/libcustom_op_library.so* $(Build.ArtifactStagingDirectory)/testdata
ls -al $(Build.ArtifactStagingDirectory)
displayName: 'Create Artifacts'
- task: PublishPipelineArtifact@0
displayName: 'Publish Pipeline Artifact'
inputs:
artifactName: 'drop-linux'
targetPath: '$(Build.ArtifactStagingDirectory)'
- template: ../../templates/component-governance-component-detection-steps.yml
parameters :
condition : 'succeeded'
- template: ../../templates/clean-agent-build-directory-step.yml
- job: NuGet_Packaging
workspace:
clean: all
pool: 'onnxruntime-gpu-winbuild'
dependsOn:
- Windows_CI_GPU_CUDA_Dev
- Windows_CI_GPU_DML_Dev
- Windows_CI_GPU_DML_Dev_x86
- Windows_CI_GPU_DML_Dev_arm64
- Windows_CI_GPU_DML_Dev_arm
- Linux_CI_GPU_Dev
condition: succeeded()
steps:
- task: DownloadPipelineArtifact@0
displayName: 'Download Pipeline Artifact - NuGet CUDA'
inputs:
artifactName: 'drop-nuget'
targetPath: '$(Build.BinariesDirectory)/nuget-artifact'
- task: DownloadPipelineArtifact@0
displayName: 'Download Pipeline Artifact - NuGet DirectML'
@ -197,12 +127,6 @@ jobs:
artifactName: 'drop-win-dml-arm-zip'
targetPath: '$(Build.BinariesDirectory)/nuget-artifact-dml'
- task: DownloadPipelineArtifact@0
displayName: 'Download Pipeline Artifact - Linux'
inputs:
artifactName: 'drop-linux'
targetPath: '$(Build.BinariesDirectory)/nuget-artifact'
- script: |
pushd $(Build.BinariesDirectory)\nuget-artifact-dml
dir