Update orttraining release pipelines to use torch 1.11.0 (#11018)

* Update orttraining release pipelines to use torch 1.11.0

* Change requirements_torch...txt to requirements.txt

* Update cuda cmake architectures and clean up old files
This commit is contained in:
Baiju Meswani 2022-03-31 21:51:06 -07:00 committed by GitHub
parent 8e6dbad287
commit 249c4dec7f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 85 additions and 401 deletions

View file

@ -62,8 +62,9 @@ jobs:
export ONNX_VERSION=$(cat $(Build.SourcesDirectory)/cmake/external/onnx/VERSION_NUMBER)
sed -i "s/git+http:\/\/github\.com\/onnx\/onnx.*/onnx==$ONNX_VERSION/" $(Build.BinariesDirectory)/requirements.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements.txt
cp $(Build.SourcesDirectory)/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch_cpu.txt $(Build.BinariesDirectory)/requirements_torch_cpu.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements_torch_cpu.txt
mkdir $(Build.BinariesDirectory)/requirements_torch_cpu/
cp $(Build.SourcesDirectory)/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch_cpu/requirements.txt $(Build.BinariesDirectory)/requirements_torch_cpu/requirements.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements_torch_cpu/requirements.txt
ln -sf /data/models $(Build.BinariesDirectory)
cd $(Build.SourcesDirectory)/java
/usr/local/gradle/bin/gradle "cmakeCheck" "-DcmakeBuildDir=$(Build.BinariesDirectory)/Release"

View file

@ -63,8 +63,9 @@ jobs:
#Do not explicitly specify numpy version as this is not a packaging pipeline, any version should be ok
sed -i "/^numpy/d" $(Build.BinariesDirectory)/requirements.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements.txt
cp $(Build.SourcesDirectory)/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch_cpu.txt $(Build.BinariesDirectory)/requirements_torch_cpu.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements_torch_cpu.txt
mkdir $(Build.BinariesDirectory)/requirements_torch_cpu
cp $(Build.SourcesDirectory)/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch_cpu/requirements.txt $(Build.BinariesDirectory)/requirements_torch_cpu/requirements.txt
python3 -m pip install -r $(Build.BinariesDirectory)/requirements_torch_cpu/requirements.txt
ln -sf /data/models $(Build.BinariesDirectory)
cd $(Build.SourcesDirectory)/java
/usr/local/gradle/bin/gradle "cmakeCheck" "-DcmakeBuildDir=$(Build.BinariesDirectory)/Release"

View file

@ -1,15 +1,14 @@
trigger: none
stages:
- template: templates/py-packaging-stage.yml
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
enable_linux_cpu: false
enable_linux_gpu: false
enable_linux_gpu_training_cu102: true
enable_linux_gpu_training_cu111: false
enable_linux_rocm_training: false
enable_windows_cpu: false
enable_windows_gpu: false
enable_mac_cpu: false
enable_linux_arm: false
build_py_parameters: --enable_training --update --build
torch_version: '1.11.0'
opset_version: '14'
cuda_version: '10.2'
gcc_version: 8
cmake_cuda_architectures: 37;50;52;60;61;70;80
docker_file: Dockerfile.manylinux2014_training_cuda10_2
agent_pool: Onnxruntime-Linux-GPU-NV6
upload_wheel: 'yes'

View file

@ -1,15 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
enable_linux_cpu: false
enable_linux_gpu: false
enable_linux_gpu_training_cu102: false
enable_linux_gpu_training_cu111: true
enable_linux_rocm_training: false
enable_windows_cpu: false
enable_windows_gpu: false
enable_mac_cpu: false
enable_linux_arm: false

View file

@ -1,14 +1,14 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.11.0'
opset_version: '14'
cuda_version: '11.3'
gcc_version: 10
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda11_3
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.11.0'
opset_version: '14'
cuda_version: '11.3'
gcc_version: 10
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda11_3
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -1,14 +1,14 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.11.0'
opset_version: '14'
cuda_version: '10.2'
gcc_version: 8
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda10_2
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.11.0'
opset_version: '14'
cuda_version: '11.5'
gcc_version: 10
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86;87
docker_file: Dockerfile.manylinux2014_training_cuda11_5
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -1,14 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.10.0'
opset_version: '14'
cuda_version: '10.2'
gcc_version: 8
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda10_2
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -1,14 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.10.0'
opset_version: '14'
cuda_version: '11.3'
gcc_version: 10
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda11_3
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -1,14 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.9.0'
opset_version: '12'
cuda_version: '10.2'
gcc_version: 8
cmake_cuda_architectures: 35;37;50;52;60;61;70
docker_file: Dockerfile.manylinux2014_training_cuda10_2
agent_pool: Onnxruntime-Linux-GPU-NV6
upload_wheel: 'yes'

View file

@ -1,14 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.9.0'
opset_version: '12'
cuda_version: '11.1'
gcc_version: 9
cmake_cuda_architectures: 37;50;52;60;61;70;75;80
docker_file: Dockerfile.manylinux2014_training_cuda11_1
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -1,14 +0,0 @@
trigger: none
stages:
- template: templates/py-packaging-training-cuda-stage.yml
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '1.9.0'
opset_version: '12'
cuda_version: '11.4'
gcc_version: 10
cmake_cuda_architectures: 37;50;52;60;61;70;75;80;86
docker_file: Dockerfile.manylinux2014_training_cuda11_4
agent_pool: Onnxruntime-Linux-GPU
upload_wheel: 'yes'

View file

@ -33,7 +33,7 @@ steps:
--volume /bert_data:/bert_data \
--volume /hf_models_cache:/hf_models_cache \
${{ parameters.DockerImageTag }} \
bash -c "python3 -m pip uninstall -y -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/requirements.txt && python3 -m pip install -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch1.11.0_cu11.3.txt && python3 -m pip install -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage2/requirements.txt && rm -rf /build/onnxruntime/ && python3 -m pip install /build/dist/onnxruntime*.whl && python3 -m onnxruntime.training.ortmodule.torch_cpp_extensions.install && /build/launch_test.py --cmd_line_with_args 'python orttraining_ortmodule_tests.py --mnist /mnist --bert_data /bert_data/hf_data/glue_data/CoLA/original/raw --transformers_cache /hf_models_cache/huggingface/transformers' --cwd /build" \
bash -c "python3 -m pip uninstall -y -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/requirements.txt && python3 -m pip install -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage1/requirements_torch1.11.0_cu11.3/requirements.txt && python3 -m pip install -r /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/training/ortmodule/stage2/requirements.txt && rm -rf /build/onnxruntime/ && python3 -m pip install /build/dist/onnxruntime*.whl && python3 -m onnxruntime.training.ortmodule.torch_cpp_extensions.install && /build/launch_test.py --cmd_line_with_args 'python orttraining_ortmodule_tests.py --mnist /mnist --bert_data /bert_data/hf_data/glue_data/CoLA/original/raw --transformers_cache /hf_models_cache/huggingface/transformers' --cwd /build" \
displayName: 'Run orttraining_ortmodule_tests.py'
condition: succeededOrFailed()
timeoutInMinutes: 60

View file

@ -115,7 +115,7 @@ jobs:
- ${{ if eq(parameters.isTraining, true) }}:
- script: |
python -m pip install -r $(Build.SourcesDirectory)\tools\ci_build\github\linux\docker\scripts\training\ortmodule\stage1\requirements_torch1.9.0_cu11.1.txt
python -m pip install -r $(Build.SourcesDirectory)\tools\ci_build\github\linux\docker\scripts\training\ortmodule\stage1\requirements_torch1.11.0_cu11.3\requirements.txt
python -m pip install -r $(Build.SourcesDirectory)\tools\ci_build\github\linux\docker\scripts\training\requirements.txt
workingDirectory: '$(Build.BinariesDirectory)'
displayName: 'Install python modules'

View file

@ -120,11 +120,6 @@ RUN export OPENSSL_ROOT=openssl-1.1.1l && \
COPY build_scripts/build-cpython.sh /build_scripts/
FROM build_cpython AS build_cpython36
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.6.15
FROM build_cpython AS build_cpython37
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.7.12
@ -150,7 +145,6 @@ COPY build_scripts/install-pypy.sh /build_scripts/install-pypy.sh
COPY build_scripts/pypy.sha256 /build_scripts/pypy.sha256
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.7 7.3.7
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.8 7.3.7
COPY --from=build_cpython36 /opt/_internal /opt/_internal/
COPY --from=build_cpython37 /opt/_internal /opt/_internal/
COPY --from=build_cpython38 /opt/_internal /opt/_internal/
COPY --from=build_cpython39 /opt/_internal /opt/_internal/
@ -166,7 +160,6 @@ COPY --from=all_python /opt/_internal /opt/_internal/
COPY build_scripts/finalize.sh \
build_scripts/update-system-packages.sh \
build_scripts/python-tag-abi-tag.py \
build_scripts/requirements3.6.txt \
build_scripts/requirements3.7.txt \
build_scripts/requirements3.8.txt \
build_scripts/requirements3.9.txt \
@ -181,10 +174,9 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.6
ARG PYTHON_VERSION=3.9
ARG TORCH_VERSION=1.11.0
ARG OPSET_VERSION=12
ARG OPSET_VERSION=14
ARG INSTALL_DEPS_EXTRA_ARGS
#Add our own dependencies

View file

@ -1,204 +0,0 @@
ARG BASEIMAGE=nvcr.io/nvidia/cuda:11.1-cudnn8-devel-centos7
ARG POLICY=manylinux2014
ARG PLATFORM=x86_64
ARG DEVTOOLSET_ROOTPATH=
ARG LD_LIBRARY_PATH_ARG=
ARG PREPEND_PATH=
#We need both CUDA and manylinux. But the CUDA Toolkit End User License Agreement says NVIDIA CUDA Driver Libraries(libcuda.so, libnvidia-ptxjitcompiler.so) are only distributable in applications that meet this criteria:
#1. The application was developed starting from a NVIDIA CUDA container obtained from Docker Hub or the NVIDIA GPU Cloud, and
#2. The resulting application is packaged as a Docker container and distributed to users on Docker Hub or the NVIDIA GPU Cloud only.
#So we use CUDA as the base image then add manylinux on top of it.
#Build manylinux2014 docker image begin
FROM $BASEIMAGE AS runtime_base
ARG POLICY
ARG PLATFORM
ARG DEVTOOLSET_ROOTPATH
ARG LD_LIBRARY_PATH_ARG
ARG PREPEND_PATH
LABEL maintainer="The ManyLinux project"
ENV AUDITWHEEL_POLICY=${POLICY} AUDITWHEEL_ARCH=${PLATFORM} AUDITWHEEL_PLAT=${POLICY}_${PLATFORM}
ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8
ENV DEVTOOLSET_ROOTPATH=${DEVTOOLSET_ROOTPATH}
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH_ARG}
ENV PATH=${PREPEND_PATH}${PATH}
ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
# first copy the fixup mirrors script, keep the script around
COPY build_scripts/fixup-mirrors.sh /usr/local/sbin/fixup-mirrors
# setup entrypoint, this will wrap commands with `linux32` with i686 images
COPY build_scripts/install-entrypoint.sh \
build_scripts/update-system-packages.sh \
build_scripts/build_utils.sh \
/build_scripts/
RUN /build_scripts/install-entrypoint.sh && rm -rf /build_scripts
COPY manylinux-entrypoint /usr/local/bin/manylinux-entrypoint
ENTRYPOINT ["manylinux-entrypoint"]
COPY build_scripts/install-runtime-packages.sh \
build_scripts/update-system-packages.sh \
build_scripts/build_utils.sh \
/build_scripts/
RUN manylinux-entrypoint /build_scripts/install-runtime-packages.sh && rm -rf /build_scripts/
COPY build_scripts/build_utils.sh /build_scripts/
COPY build_scripts/install-autoconf.sh /build_scripts/
RUN export AUTOCONF_ROOT=autoconf-2.71 && \
export AUTOCONF_HASH=431075ad0bf529ef13cb41e9042c542381103e80015686222b8a9d4abef42a1c && \
export AUTOCONF_DOWNLOAD_URL=http://ftp.gnu.org/gnu/autoconf && \
manylinux-entrypoint /build_scripts/install-autoconf.sh
COPY build_scripts/install-automake.sh /build_scripts/
RUN export AUTOMAKE_ROOT=automake-1.16.5 && \
export AUTOMAKE_HASH=07bd24ad08a64bc17250ce09ec56e921d6343903943e99ccf63bbf0705e34605 && \
export AUTOMAKE_DOWNLOAD_URL=http://ftp.gnu.org/gnu/automake && \
manylinux-entrypoint /build_scripts/install-automake.sh
COPY build_scripts/install-libtool.sh /build_scripts/
RUN export LIBTOOL_ROOT=libtool-2.4.6 && \
export LIBTOOL_HASH=e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3 && \
export LIBTOOL_DOWNLOAD_URL=http://ftp.gnu.org/gnu/libtool && \
manylinux-entrypoint /build_scripts/install-libtool.sh
COPY build_scripts/install-patchelf.sh /build_scripts/
RUN export PATCHELF_VERSION=0.13 && \
export PATCHELF_HASH=60c6aeadb673de9cc1838b630c81f61e31c501de324ef7f1e8094a2431197d09 && \
export PATCHELF_DOWNLOAD_URL=https://github.com/NixOS/patchelf/archive && \
manylinux-entrypoint /build_scripts/install-patchelf.sh
COPY build_scripts/install-libxcrypt.sh /build_scripts/
RUN export LIBXCRYPT_VERSION=4.4.26 && \
export LIBXCRYPT_HASH=e8a544dd19171c1e6191a6044c96cc31496d781ba08b5a00f53310d001d58114 && \
export LIBXCRYPT_DOWNLOAD_URL=https://github.com/besser82/libxcrypt/archive && \
export PERL_ROOT=perl-5.34.0 && \
export PERL_HASH=551efc818b968b05216024fb0b727ef2ad4c100f8cb6b43fab615fa78ae5be9a && \
export PERL_DOWNLOAD_URL=https://www.cpan.org/src/5.0 && \
manylinux-entrypoint /build_scripts/install-libxcrypt.sh
FROM runtime_base AS build_base
COPY build_scripts/install-build-packages.sh /build_scripts/
RUN manylinux-entrypoint /build_scripts/install-build-packages.sh
FROM build_base AS build_git
COPY build_scripts/build-git.sh /build_scripts/
RUN export GIT_ROOT=git-2.33.1 && \
export GIT_HASH=02047f8dc8934d57ff5e02aadd8a2fe8e0bcf94a7158da375e48086cc46fce1d && \
export GIT_DOWNLOAD_URL=https://www.kernel.org/pub/software/scm/git && \
manylinux-entrypoint /build_scripts/build-git.sh
FROM build_base AS build_swig
COPY build_scripts/build-swig.sh /build_scripts/
RUN export SWIG_ROOT=swig-4.0.2 && \
export SWIG_HASH=d53be9730d8d58a16bf0cbd1f8ac0c0c3e1090573168bfa151b01eb47fa906fc && \
export SWIG_DOWNLOAD_URL=https://sourceforge.net/projects/swig/files/swig/${SWIG_ROOT} && \
export PCRE_ROOT=pcre-8.45 && \
export PCRE_HASH=4e6ce03e0336e8b4a3d6c2b70b1c5e18590a5673a98186da90d4f33c23defc09 && \
export PCRE_DOWNLOAD_URL=https://sourceforge.net/projects/pcre/files/pcre/8.45 && \
manylinux-entrypoint /build_scripts/build-swig.sh
FROM build_base AS build_cpython
COPY build_scripts/build-sqlite3.sh /build_scripts/
RUN export SQLITE_AUTOCONF_ROOT=sqlite-autoconf-3360000 && \
export SQLITE_AUTOCONF_HASH=bd90c3eb96bee996206b83be7065c9ce19aef38c3f4fb53073ada0d0b69bbce3 && \
export SQLITE_AUTOCONF_DOWNLOAD_URL=https://www.sqlite.org/2021 && \
manylinux-entrypoint /build_scripts/build-sqlite3.sh
COPY build_scripts/build-openssl.sh /build_scripts/
RUN export OPENSSL_ROOT=openssl-1.1.1l && \
export OPENSSL_HASH=0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1 && \
export OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source && \
manylinux-entrypoint /build_scripts/build-openssl.sh
COPY build_scripts/build-cpython.sh /build_scripts/
FROM build_cpython AS build_cpython36
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.6.15
FROM build_cpython AS build_cpython37
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.7.12
FROM build_cpython AS build_cpython38
COPY build_scripts/ambv-pubkey.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.8.12
FROM build_cpython AS build_cpython39
COPY build_scripts/ambv-pubkey.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.9.8
FROM build_cpython AS build_cpython310
COPY build_scripts/cpython-pubkey-310-311.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.10.0
FROM build_cpython AS all_python
COPY build_scripts/install-pypy.sh /build_scripts/install-pypy.sh
COPY build_scripts/pypy.sha256 /build_scripts/pypy.sha256
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.7 7.3.7
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.8 7.3.7
COPY --from=build_cpython36 /opt/_internal /opt/_internal/
COPY --from=build_cpython37 /opt/_internal /opt/_internal/
COPY --from=build_cpython38 /opt/_internal /opt/_internal/
COPY --from=build_cpython39 /opt/_internal /opt/_internal/
COPY --from=build_cpython310 /opt/_internal /opt/_internal/
RUN hardlink -cv /opt/_internal
FROM runtime_base
COPY --from=build_git /manylinux-rootfs /
COPY --from=build_swig /manylinux-rootfs /
COPY --from=build_cpython /manylinux-rootfs /
COPY --from=all_python /opt/_internal /opt/_internal/
COPY build_scripts/finalize.sh \
build_scripts/update-system-packages.sh \
build_scripts/python-tag-abi-tag.py \
build_scripts/requirements3.6.txt \
build_scripts/requirements3.7.txt \
build_scripts/requirements3.8.txt \
build_scripts/requirements3.9.txt \
build_scripts/requirements3.10.txt \
build_scripts/requirements-base-tools.txt \
/build_scripts/
COPY build_scripts/requirements-tools/* /build_scripts/requirements-tools/
RUN manylinux-entrypoint /build_scripts/finalize.sh && rm -rf /build_scripts
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.6
ARG TORCH_VERSION=1.9.0
ARG OPSET_VERSION=12
ARG INSTALL_DEPS_EXTRA_ARGS
#Add our own dependencies
ADD scripts /tmp/scripts
RUN cd /tmp/scripts && \
/tmp/scripts/manylinux/install_centos.sh && \
/tmp/scripts/install_os_deps.sh -d gpu $INSTALL_DEPS_EXTRA_ARGS && \
/tmp/scripts/install_ninja.sh && \
/tmp/scripts/install_python_deps.sh -d gpu -v 11.1 -p $PYTHON_VERSION -h $TORCH_VERSION $INSTALL_DEPS_EXTRA_ARGS && \
rm -rf /tmp/scripts
ARG BUILD_UID=1001
ARG BUILD_USER=onnxruntimedev
RUN adduser --uid $BUILD_UID $BUILD_USER
WORKDIR /home/$BUILD_USER
USER $BUILD_USER
ENV PATH /usr/local/gradle/bin:/usr/local/dotnet:$PATH
ENV ORTMODULE_ONNX_OPSET_VERSION=$OPSET_VERSION

View file

@ -120,11 +120,6 @@ RUN export OPENSSL_ROOT=openssl-1.1.1l && \
COPY build_scripts/build-cpython.sh /build_scripts/
FROM build_cpython AS build_cpython36
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.6.15
FROM build_cpython AS build_cpython37
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.7.12
@ -150,7 +145,6 @@ COPY build_scripts/install-pypy.sh /build_scripts/install-pypy.sh
COPY build_scripts/pypy.sha256 /build_scripts/pypy.sha256
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.7 7.3.7
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.8 7.3.7
COPY --from=build_cpython36 /opt/_internal /opt/_internal/
COPY --from=build_cpython37 /opt/_internal /opt/_internal/
COPY --from=build_cpython38 /opt/_internal /opt/_internal/
COPY --from=build_cpython39 /opt/_internal /opt/_internal/
@ -166,7 +160,6 @@ COPY --from=all_python /opt/_internal /opt/_internal/
COPY build_scripts/finalize.sh \
build_scripts/update-system-packages.sh \
build_scripts/python-tag-abi-tag.py \
build_scripts/requirements3.6.txt \
build_scripts/requirements3.7.txt \
build_scripts/requirements3.8.txt \
build_scripts/requirements3.9.txt \
@ -181,7 +174,7 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.6
ARG PYTHON_VERSION=3.9
ARG TORCH_VERSION=1.11.0
ARG OPSET_VERSION=14
ARG INSTALL_DEPS_EXTRA_ARGS

View file

@ -1,4 +1,4 @@
ARG BASEIMAGE=nvidia/cuda:11.4.0-cudnn8-devel-centos7
ARG BASEIMAGE=nvidia/cuda:11.5.0-cudnn8-devel-centos7
ARG POLICY=manylinux2014
ARG PLATFORM=x86_64
ARG DEVTOOLSET_ROOTPATH=
@ -120,11 +120,6 @@ RUN export OPENSSL_ROOT=openssl-1.1.1l && \
COPY build_scripts/build-cpython.sh /build_scripts/
FROM build_cpython AS build_cpython36
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.6.15
FROM build_cpython AS build_cpython37
COPY build_scripts/cpython-pubkeys.txt /build_scripts/cpython-pubkeys.txt
RUN manylinux-entrypoint /build_scripts/build-cpython.sh 3.7.12
@ -150,7 +145,6 @@ COPY build_scripts/install-pypy.sh /build_scripts/install-pypy.sh
COPY build_scripts/pypy.sha256 /build_scripts/pypy.sha256
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.7 7.3.7
RUN manylinux-entrypoint /build_scripts/install-pypy.sh 3.8 7.3.7
COPY --from=build_cpython36 /opt/_internal /opt/_internal/
COPY --from=build_cpython37 /opt/_internal /opt/_internal/
COPY --from=build_cpython38 /opt/_internal /opt/_internal/
COPY --from=build_cpython39 /opt/_internal /opt/_internal/
@ -166,7 +160,6 @@ COPY --from=all_python /opt/_internal /opt/_internal/
COPY build_scripts/finalize.sh \
build_scripts/update-system-packages.sh \
build_scripts/python-tag-abi-tag.py \
build_scripts/requirements3.6.txt \
build_scripts/requirements3.7.txt \
build_scripts/requirements3.8.txt \
build_scripts/requirements3.9.txt \
@ -181,9 +174,9 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.6
ARG TORCH_VERSION=1.9.0
ARG OPSET_VERSION=12
ARG PYTHON_VERSION=3.9
ARG TORCH_VERSION=1.11.0
ARG OPSET_VERSION=14
ARG INSTALL_DEPS_EXTRA_ARGS
#Add our own dependencies
@ -192,7 +185,7 @@ RUN cd /tmp/scripts && \
/tmp/scripts/manylinux/install_centos.sh && \
/tmp/scripts/install_os_deps.sh -d gpu $INSTALL_DEPS_EXTRA_ARGS && \
/tmp/scripts/install_ninja.sh && \
/tmp/scripts/install_python_deps.sh -d gpu -v 11.4 -p $PYTHON_VERSION -h $TORCH_VERSION $INSTALL_DEPS_EXTRA_ARGS && \
/tmp/scripts/install_python_deps.sh -d gpu -v 11.5 -p $PYTHON_VERSION -h $TORCH_VERSION $INSTALL_DEPS_EXTRA_ARGS && \
rm -rf /tmp/scripts
ARG BUILD_UID=1001

View file

@ -57,20 +57,11 @@ if [ $DEVICE_TYPE = "gpu" ]; then
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/requirements.txt}
else
if [[ $TARGET_ROCM = false ]]; then
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/ortmodule\/stage1\/requirements_torch${TORCH_VERSION}_cu${CU_VER}.txt}
# Due to a [bug on DeepSpeed](https://github.com/microsoft/DeepSpeed/issues/663), we install it separately through ortmodule/stage2/requirements.txt
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/ortmodule\/stage1\/requirements_torch${TORCH_VERSION}_cu${CU_VER}\/requirements.txt}
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/ortmodule\/stage2\/requirements.txt}
else
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/ortmodule\/stage1\/requirements-torch${TORCH_VERSION}_rocm.txt}
${PYTHON_EXE} -m pip install -r ${0/%install_python_deps.sh/training\/ortmodule\/stage1\/requirements_torch${TORCH_VERSION}_rocm\/requirements.txt}
${PYTHON_EXE} -m pip install fairscale
# remove DeepSpeed until it's required for testing purposes
# remove triton requirement from getting triggered in requirements-sparse_attn.txt
# git clone https://github.com/ROCmSoftwarePlatform/DeepSpeed
# cd DeepSpeed &&\
# rm requirements/requirements-sparse_attn.txt &&\
# ${PYTHON_EXE} setup.py bdist_wheel &&\
# ${PYTHON_EXE} -m pip install dist/deepspeed*.whl &&\
# cd .. && rm -fr DeepSpeed
fi
fi
fi

View file

@ -94,7 +94,7 @@ for PYTHON_EXE in "${PYTHON_EXES[@]}"
do
${PYTHON_EXE} -m pip install -r ${0/%install_deps\.sh/requirements\.txt}
if ![[ ${PYTHON_EXE} = "/opt/python/cp310-cp310/bin/python3.10" ]]; then
${PYTHON_EXE} -m pip install -r ${0/%install_deps\.sh/..\/training\/ortmodule\/stage1\/requirements_torch_cpu.txt}
${PYTHON_EXE} -m pip install -r ${0/%install_deps\.sh/..\/training\/ortmodule\/stage1\/requirements_torch_cpu\/requirements.txt}
else
${PYTHON_EXE} -m pip install torch==1.11.0
fi

View file

@ -1,6 +1,5 @@
--pre
-f https://download.pytorch.org/whl/torch_stable.html
torch==1.11.0
torchvision==0.12.0
torchtext==0.12.0
setuptools>=41.4.0
--extra-index-url https://download.pytorch.org/whl/cu102
torch==1.11.0
torchvision==0.12.0
torchtext==0.12.0
setuptools>=41.4.0

View file

@ -1,6 +0,0 @@
--pre
-f https://download.pytorch.org/whl/cu113/torch_stable.html
torch==1.11.0+cu113
torchvision==0.12.0+cu113
torchtext==0.12.0
setuptools>=41.4.0

View file

@ -0,0 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu113
torch==1.11.0
torchvision==0.12.0
torchtext==0.12.0
setuptools>=41.4.0

View file

@ -0,0 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu115
torch==1.11.0
torchvision==0.12.0
torchtext==0.12.0
setuptools>=41.4.0

View file

@ -1,13 +1,13 @@
# transformers requires sklearn
--pre
-f https://download.pytorch.org/whl/rocm4.2/torch_stable.html
torch==1.11.0
pandas
sklearn
numpy==1.21.0
transformers==v4.3.2
tensorboard>=2.2.0,<2.5.0
h5py
wget
setuptools>=41.4.0
# transformers requires sklearn
--pre
-f https://download.pytorch.org/whl/rocm4.2/torch_stable.html
torch==1.11.0
pandas
sklearn
numpy==1.21.0
transformers==v4.3.2
tensorboard>=2.2.0,<2.5.0
h5py
wget
setuptools>=41.4.0