diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp index 95f9029c8df..693b70f99a4 100644 --- a/aten/src/ATen/autocast_mode.cpp +++ b/aten/src/ATen/autocast_mode.cpp @@ -530,7 +530,6 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(ADD_NS(cholesky_solve), "cholesky_solve", Tensor(const Tensor &, const Tensor &, bool), fp32) KERNEL_CPU(ADD_NS(inverse), "inverse", Tensor(const Tensor &), fp32) KERNEL_CPU(ADD_NS(lu_solve), "lu_solve", Tensor(const Tensor &, const Tensor &, const Tensor &), fp32) - KERNEL_CPU(ADD_NS(matrix_rank), "matrix_rank", Tensor(const Tensor &, bool), fp32) KERNEL_CPU(ADD_NS(orgqr), "orgqr", Tensor(const Tensor &, const Tensor &), fp32) KERNEL_CPU(ADD_NS(ormqr), "ormqr", Tensor(const Tensor &, const Tensor &, const Tensor &, bool, bool), fp32) KERNEL_CPU(ADD_NS(pinverse), "pinverse", Tensor(const Tensor &, double), fp32) diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index bbc0f20a706..c658d4427c9 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -713,24 +713,6 @@ Tensor linalg_matrix_rank(const Tensor& input, double tol, bool hermitian) { return matrix_rank_impl(input, atol_tensor, rtol_tensor, hermitian, result); } -Tensor matrix_rank(const Tensor& self, double tol, bool symmetric) { - TORCH_WARN_ONCE( - "torch.matrix_rank is deprecated in favor of torch.linalg.matrix_rank", - "and will be removed in a future PyTorch release. The parameter 'symmetric' was ", - "renamed in torch.linalg.matrix_rank to 'hermitian'." - ); - return at::linalg_matrix_rank(self, tol, symmetric); -} - -Tensor matrix_rank(const Tensor& self, bool symmetric) { - TORCH_WARN_ONCE( - "torch.matrix_rank is deprecated in favor of torch.linalg.matrix_rank", - "and will be removed in a future PyTorch release. The parameter 'symmetric' was ", - "renamed in torch.linalg.matrix_rank to 'hermitian'." - ); - return at::linalg_matrix_rank(self, 0.0, c10::nullopt, symmetric); -} - // multi_dot helper functions namespace { diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index ab6e9ca57a9..5903927e053 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3237,10 +3237,6 @@ CompositeImplicitAutograd: matmul_out NestedTensorCPU, NestedTensorCUDA: matmul_out_nested -- func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor - -- func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor - # Alias to linalg.matrix_power - func: matrix_power(Tensor self, int n) -> Tensor variants: function, method diff --git a/docs/source/amp.rst b/docs/source/amp.rst index 3c0c77d4bc4..81587521919 100644 --- a/docs/source/amp.rst +++ b/docs/source/amp.rst @@ -294,7 +294,6 @@ CPU Ops that can autocast to ``float32`` ``cholesky_solve``, ``inverse``, ``lu_solve``, -``matrix_rank``, ``orgqr``, ``inverse``, ``ormqr``, diff --git a/docs/source/torch.rst b/docs/source/torch.rst index 147795e7dea..997de20682f 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -572,7 +572,6 @@ BLAS and LAPACK Operations lu_unpack matmul matrix_power - matrix_rank matrix_exp mm mv diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py index 03111d373e8..4ba19a18d0a 100644 --- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py +++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py @@ -65,6 +65,8 @@ ALLOW_LIST = [ ("aten::eig.e", datetime.date(9999, 1, 1)), ("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)), ("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)), + ("aten::matrix_rank", datetime.date(9999, 1, 1)), + ("aten::matrix_rank.tol", datetime.date(9999, 1, 1)), ("aten::randperm", datetime.date(9999, 1, 1)), ("aten::linalg_solve", datetime.date(2022, 8, 31)), ("aten::linalg_solve.out", datetime.date(2022, 8, 31)), diff --git a/test/test_linalg.py b/test/test_linalg.py index 54a16be7cbe..b154ab5bb8e 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -141,6 +141,11 @@ class TestLinalg(TestCase): run_test_case(zero_strided, b) run_test_case(a, zero_strided) + def test_matrix_rank_removed_error(self, device): + a = make_tensor(5, 5, device=device, dtype=torch.float32) + with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"): + torch.matrix_rank(a) + def test_solve_removed_error(self, device): a = make_tensor(5, 5, device=device, dtype=torch.float32) b = make_tensor(5, 1, device=device, dtype=torch.float32) @@ -3420,37 +3425,6 @@ class TestLinalg(TestCase): self.assertEqual(matrix_rank(a).item(), 9) self.assertEqual(matrix_rank(a, hermitian=True).item(), 9) - @skipCUDAIfNoMagma - @skipCPUIfNoLapack - @dtypes(*floating_and_complex_types()) - def test_old_matrix_rank(self, device, dtype): - a = torch.eye(10, dtype=dtype, device=device) - self.assertEqual(torch.matrix_rank(a).item(), 10) - self.assertEqual(torch.matrix_rank(a, True).item(), 10) - - a[5, 5] = 0 - self.assertEqual(torch.matrix_rank(a).item(), 9) - self.assertEqual(torch.matrix_rank(a, True).item(), 9) - - a = torch.randn(24, 42, dtype=dtype, device=device) - self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t())) - aaT = torch.mm(a, a.conj().t()) - self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True)) - aTa = torch.mm(a.conj().t(), a) - self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True)) - - a = torch.randn(35, 75, dtype=dtype, device=device) - self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy())) - self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01)) - - aaT = torch.mm(a, a.conj().t()) - self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy())) - self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01)) - - if np.lib.NumpyVersion(np.__version__) >= '1.14.0': - self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True)) - self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True)) - @onlyNativeDeviceTypes @dtypes(torch.double) # This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for. diff --git a/torch/__init__.py b/torch/__init__.py index c5c3e69ddcb..18a6b19b8ec 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -955,7 +955,11 @@ from torch.utils.dlpack import from_dlpack, to_dlpack from . import _masked # Import removed ops with error message about removal -from ._linalg_utils import eig, solve +from ._linalg_utils import ( # type: ignore[misc] + matrix_rank, + eig, + solve, +) def _register_device_module(device_type, module): diff --git a/torch/_linalg_utils.py b/torch/_linalg_utils.py index b9261cb25ae..6e8959d7fc6 100644 --- a/torch/_linalg_utils.py +++ b/torch/_linalg_utils.py @@ -98,6 +98,13 @@ def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]: # These functions were deprecated and removed # This nice error message can be removed in version 1.13+ +def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor: + raise RuntimeError( + "This function was deprecated since version 1.9 and is now removed.", + "Please use the `torch.linalg.matrix_rank` function instead.", + ) + + def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]: raise RuntimeError( "This function was deprecated since version 1.9 and is now removed. Please use the `torch.linalg.solve` function instead.", diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index 0732fc17ea9..dfe86ddcf25 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -6621,51 +6621,6 @@ Example:: ), ) -add_docstr( - torch.matrix_rank, - r""" -matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor - -Returns the numerical rank of a 2-D tensor. The method to compute the -matrix rank is done using SVD by default. If :attr:`symmetric` is ``True``, -then :attr:`input` is assumed to be symmetric, and the computation of the -rank is done by obtaining the eigenvalues. - -:attr:`tol` is the threshold below which the singular values (or the eigenvalues -when :attr:`symmetric` is ``True``) are considered to be 0. If :attr:`tol` is not -specified, :attr:`tol` is set to ``S.max() * max(S.size()) * eps`` where `S` is the -singular values (or the eigenvalues when :attr:`symmetric` is ``True``), and ``eps`` -is the epsilon value for the datatype of :attr:`input`. - -.. warning:: - - :func:`torch.matrix_rank` is deprecated in favor of :func:`torch.linalg.matrix_rank` - and will be removed in a future PyTorch release. The parameter :attr:`symmetric` was - renamed in :func:`torch.linalg.matrix_rank` to :attr:`hermitian`. - -Args: - input (Tensor): the input 2-D tensor - tol (float, optional): the tolerance value. Default: ``None`` - symmetric(bool, optional): indicates whether :attr:`input` is symmetric. - Default: ``False`` - -Keyword args: - {out} - -Example:: - - >>> a = torch.eye(10) - >>> torch.matrix_rank(a) - tensor(10) - >>> b = torch.eye(10) - >>> b[0, 0] = 0 - >>> torch.matrix_rank(b) - tensor(9) -""".format( - **common_args - ), -) - add_docstr( torch.matrix_power, r""" diff --git a/torch/overrides.py b/torch/overrides.py index e06e3403f0e..a4dc06b71e0 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -671,7 +671,6 @@ def get_testing_overrides() -> Dict[Callable, Callable]: torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul torch.matrix_power: lambda input, n: -1, torch.linalg.matrix_power: lambda input, n, out=None: -1, - torch.matrix_rank: lambda input, tol=None, symmetric=False: -1, torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1, torch.linalg.multi_dot: lambda tensors, out=None: -1, torch.matrix_exp: lambda input: -1,