Remove deprecated torch.matrix_rank (#70981)

The time has come to remove deprecated linear algebra related functions. This PR removes `torch.matrix_rank`.

cc @jianyuh @nikitaved @pearu @mruberry @walterddr @IvanYashchuk @xwang233 @Lezcano
Pull Request resolved: https://github.com/pytorch/pytorch/pull/70981
Approved by: https://github.com/lezcano, https://github.com/kit1980
This commit is contained in:
Ivan Yashchuk 2022-09-22 17:40:46 +00:00 committed by PyTorch MergeBot
parent e342976907
commit bcf93181a0
11 changed files with 19 additions and 103 deletions

View file

@ -530,7 +530,6 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) {
KERNEL_CPU(ADD_NS(cholesky_solve), "cholesky_solve", Tensor(const Tensor &, const Tensor &, bool), fp32)
KERNEL_CPU(ADD_NS(inverse), "inverse", Tensor(const Tensor &), fp32)
KERNEL_CPU(ADD_NS(lu_solve), "lu_solve", Tensor(const Tensor &, const Tensor &, const Tensor &), fp32)
KERNEL_CPU(ADD_NS(matrix_rank), "matrix_rank", Tensor(const Tensor &, bool), fp32)
KERNEL_CPU(ADD_NS(orgqr), "orgqr", Tensor(const Tensor &, const Tensor &), fp32)
KERNEL_CPU(ADD_NS(ormqr), "ormqr", Tensor(const Tensor &, const Tensor &, const Tensor &, bool, bool), fp32)
KERNEL_CPU(ADD_NS(pinverse), "pinverse", Tensor(const Tensor &, double), fp32)

View file

@ -713,24 +713,6 @@ Tensor linalg_matrix_rank(const Tensor& input, double tol, bool hermitian) {
return matrix_rank_impl(input, atol_tensor, rtol_tensor, hermitian, result);
}
Tensor matrix_rank(const Tensor& self, double tol, bool symmetric) {
TORCH_WARN_ONCE(
"torch.matrix_rank is deprecated in favor of torch.linalg.matrix_rank",
"and will be removed in a future PyTorch release. The parameter 'symmetric' was ",
"renamed in torch.linalg.matrix_rank to 'hermitian'."
);
return at::linalg_matrix_rank(self, tol, symmetric);
}
Tensor matrix_rank(const Tensor& self, bool symmetric) {
TORCH_WARN_ONCE(
"torch.matrix_rank is deprecated in favor of torch.linalg.matrix_rank",
"and will be removed in a future PyTorch release. The parameter 'symmetric' was ",
"renamed in torch.linalg.matrix_rank to 'hermitian'."
);
return at::linalg_matrix_rank(self, 0.0, c10::nullopt, symmetric);
}
// multi_dot helper functions
namespace {

View file

@ -3237,10 +3237,6 @@
CompositeImplicitAutograd: matmul_out
NestedTensorCPU, NestedTensorCUDA: matmul_out_nested
- func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor
- func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor
# Alias to linalg.matrix_power
- func: matrix_power(Tensor self, int n) -> Tensor
variants: function, method

View file

@ -294,7 +294,6 @@ CPU Ops that can autocast to ``float32``
``cholesky_solve``,
``inverse``,
``lu_solve``,
``matrix_rank``,
``orgqr``,
``inverse``,
``ormqr``,

View file

@ -572,7 +572,6 @@ BLAS and LAPACK Operations
lu_unpack
matmul
matrix_power
matrix_rank
matrix_exp
mm
mv

View file

@ -65,6 +65,8 @@ ALLOW_LIST = [
("aten::eig.e", datetime.date(9999, 1, 1)),
("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)),
("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)),
("aten::matrix_rank", datetime.date(9999, 1, 1)),
("aten::matrix_rank.tol", datetime.date(9999, 1, 1)),
("aten::randperm", datetime.date(9999, 1, 1)),
("aten::linalg_solve", datetime.date(2022, 8, 31)),
("aten::linalg_solve.out", datetime.date(2022, 8, 31)),

View file

@ -141,6 +141,11 @@ class TestLinalg(TestCase):
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
def test_matrix_rank_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.matrix_rank(a)
def test_solve_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
b = make_tensor(5, 1, device=device, dtype=torch.float32)
@ -3420,37 +3425,6 @@ class TestLinalg(TestCase):
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.

View file

@ -955,7 +955,11 @@ from torch.utils.dlpack import from_dlpack, to_dlpack
from . import _masked
# Import removed ops with error message about removal
from ._linalg_utils import eig, solve
from ._linalg_utils import ( # type: ignore[misc]
matrix_rank,
eig,
solve,
)
def _register_device_module(device_type, module):

View file

@ -98,6 +98,13 @@ def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
# These functions were deprecated and removed
# This nice error message can be removed in version 1.13+
def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
raise RuntimeError(
"This function was deprecated since version 1.9 and is now removed.",
"Please use the `torch.linalg.matrix_rank` function instead.",
)
def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
raise RuntimeError(
"This function was deprecated since version 1.9 and is now removed. Please use the `torch.linalg.solve` function instead.",

View file

@ -6621,51 +6621,6 @@ Example::
),
)
add_docstr(
torch.matrix_rank,
r"""
matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor
Returns the numerical rank of a 2-D tensor. The method to compute the
matrix rank is done using SVD by default. If :attr:`symmetric` is ``True``,
then :attr:`input` is assumed to be symmetric, and the computation of the
rank is done by obtaining the eigenvalues.
:attr:`tol` is the threshold below which the singular values (or the eigenvalues
when :attr:`symmetric` is ``True``) are considered to be 0. If :attr:`tol` is not
specified, :attr:`tol` is set to ``S.max() * max(S.size()) * eps`` where `S` is the
singular values (or the eigenvalues when :attr:`symmetric` is ``True``), and ``eps``
is the epsilon value for the datatype of :attr:`input`.
.. warning::
:func:`torch.matrix_rank` is deprecated in favor of :func:`torch.linalg.matrix_rank`
and will be removed in a future PyTorch release. The parameter :attr:`symmetric` was
renamed in :func:`torch.linalg.matrix_rank` to :attr:`hermitian`.
Args:
input (Tensor): the input 2-D tensor
tol (float, optional): the tolerance value. Default: ``None``
symmetric(bool, optional): indicates whether :attr:`input` is symmetric.
Default: ``False``
Keyword args:
{out}
Example::
>>> a = torch.eye(10)
>>> torch.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.matrix_rank(b)
tensor(9)
""".format(
**common_args
),
)
add_docstr(
torch.matrix_power,
r"""

View file

@ -671,7 +671,6 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul
torch.matrix_power: lambda input, n: -1,
torch.linalg.matrix_power: lambda input, n, out=None: -1,
torch.matrix_rank: lambda input, tol=None, symmetric=False: -1,
torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
torch.linalg.multi_dot: lambda tensors, out=None: -1,
torch.matrix_exp: lambda input: -1,