soulitzer 2025-01-24 13:36:26 -05:00 committed by PyTorch MergeBot
parent c7ca1df37e
commit 3a3e2cf90a
5 changed files with 0 additions and 132 deletions

View file

@ -190,7 +190,6 @@ dtensor_fails = {
xfail("linalg.cholesky_ex"),
xfail("linalg.cross"),
xfail("linalg.det"),
xfail("linalg.det", "singular"),
xfail("linalg.eig"),
xfail("linalg.eigvals"),
xfail("linalg.householder_product"),

View file

@ -46,8 +46,6 @@ from torch.testing._internal.common_device_type import (
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import (
is_iterable_of_tensors,
IS_MACOS,
IS_X86,
noncontiguous_like,
parametrize,
run_tests,
@ -587,11 +585,6 @@ class TestOperators(TestCase):
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_scatter"),
decorate(
"linalg.det",
"singular",
decorator=expectedFailureIf(IS_MACOS and IS_X86),
),
}
),
)
@ -877,9 +870,6 @@ class TestOperators(TestCase):
tol1("masked.cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1("cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1("linalg.vander", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol2(
"linalg.det", "singular", {torch.float32: tol(atol=2e-05, rtol=2e-05)}
),
),
)
def test_vjpvjp(self, device, dtype, op):
@ -1357,11 +1347,6 @@ class TestOperators(TestCase):
vmapjvpall_fail.union(
{
xfail("as_strided_copy"),
decorate(
"linalg.det",
"singular",
decorator=expectedFailureIf(IS_MACOS and IS_X86),
),
}
),
)

View file

@ -282,7 +282,6 @@ inductor_expected_failures_single_sample["xpu"] = {
"inner": {f64},
"linalg.cholesky_ex": {f64},
"linalg.cholesky": {f64},
("linalg.det", "singular"): {f64},
"linalg.ldl_factor_ex": {f64},
"linalg.ldl_factor": {f64},
"linalg.ldl_solve": {f64},

View file

@ -701,7 +701,6 @@ def mps_ops_modifier(ops):
'linalg.cholesky': None,
'linalg.cholesky_ex': None,
'linalg.cond': None,
'linalg.detsingular': None,
'linalg.det': None,
'linalg.eigh': None,
'linalg.eigvalsh': None,

View file

@ -34,11 +34,9 @@ from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types,
floating_and_complex_types_and,
get_all_complex_dtypes,
)
from torch.testing._internal.common_utils import (
GRADCHECK_NONDET_TOL,
IS_MACOS,
make_fullrank_matrices_with_distinct_singular_values,
skipIfSlowGradcheckEnv,
slowTest,
@ -188,37 +186,6 @@ def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **k
yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None))
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank < size[-1]
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs, _ = torch.linalg.lu_factor_ex(x)
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(
u_diag_abs, k=(n - rank), largest=False
).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
matrix.requires_grad_(requires_grad)
return matrix
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield SampleInput(make_singular_matrix_batch_base(shape, rank))
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(
@ -1201,87 +1168,6 @@ op_db: list[OpInfo] = [
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
check_batched_gradgrad=False,
),
OpInfo(
"linalg.det",
aten_name="linalg_det",
op=torch.linalg.det,
variant_test_name="singular",
aliases=("det",),
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
skips=(
DecorateInfo(
unittest.skip("The backward may give different results"),
"TestCommon",
"test_noncontiguous_samples",
),
DecorateInfo(
unittest.skip("Gradients are incorrect on macos"),
"TestBwdGradients",
"test_fn_grad",
device_type="cpu",
dtypes=(torch.float64,),
active_if=IS_MACOS,
),
DecorateInfo(
unittest.skip("Gradients are incorrect on macos"),
"TestFwdGradients",
"test_forward_mode_AD",
device_type="cpu",
dtypes=(torch.float64,),
active_if=IS_MACOS,
),
# Both Hessians are incorrect on complex inputs??
DecorateInfo(
unittest.expectedFailure,
"TestBwdGradients",
"test_fn_gradgrad",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.expectedFailure,
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
dtypes=(torch.complex128,),
),
DecorateInfo(
unittest.skip("Skipped, see https://github.com//issues/84192"),
"TestBwdGradients",
"test_fn_gradgrad",
device_type="cuda",
),
DecorateInfo(
unittest.skip("Skipped, see https://github.com//issues/84192"),
"TestFwdGradients",
"test_fn_fwgrad_bwgrad",
device_type="cuda",
),
DecorateInfo(
unittest.skip(
"Flaky on ROCm https://github.com/pytorch/pytorch/issues/93044"
),
"TestBwdGradients",
"test_fn_grad",
device_type="cuda",
dtypes=get_all_complex_dtypes(),
active_if=TEST_WITH_ROCM,
),
DecorateInfo(
unittest.skip(
"Flaky on ROCm https://github.com/pytorch/pytorch/issues/93045"
),
"TestFwdGradients",
"test_forward_mode_AD",
device_type="cuda",
dtypes=get_all_complex_dtypes(),
active_if=TEST_WITH_ROCM,
),
),
),
OpInfo(
"linalg.diagonal",
aten_name="linalg_diagonal",