mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[functorch] classify some more test failures (#83520)
Classifies test failures for test_vmapvjp and test_vmapjvpall Test Plan: - tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/83520 Approved by: https://github.com/samdow
This commit is contained in:
parent
52b8a58197
commit
7e7afcabe7
1 changed files with 35 additions and 36 deletions
|
|
@ -342,26 +342,16 @@ class TestOperators(TestCase):
|
|||
|
||||
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
|
||||
@skipOps('TestOperators', 'test_jvp', set({
|
||||
skip('nn.functional.max_pool1d'), # fails on cpu, runs okay on cuda
|
||||
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
|
||||
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
|
||||
|
||||
# =============================================
|
||||
# NB: The above failures also fail using PyTorch core's
|
||||
# forward-mode AD and vmap.
|
||||
# The failures below are functorch-specific issues
|
||||
# =============================================
|
||||
|
||||
# Composite ops that do bad things. Need to be fixed in PyTorch core.
|
||||
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
|
||||
xfail('tensor_split'),
|
||||
|
||||
# BUG: runs and produces numerical differences
|
||||
# BUG: silent incorrectness: runs and produces numerical differences
|
||||
skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
|
||||
skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
|
||||
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
|
||||
|
||||
xfail('nn.functional.rrelu') # in-place test fails
|
||||
xfail('nn.functional.rrelu') # in-place test errors out with no formula implemented
|
||||
}))
|
||||
@opsToleranceOverride('TestOperators', 'test_jvp', (
|
||||
tol1('nn.functional.conv_transpose3d',
|
||||
|
|
@ -570,6 +560,7 @@ class TestOperators(TestCase):
|
|||
self.assertEqual(loop_out, batched_out)
|
||||
|
||||
vmapvjp_fail = vjp_fail.union({
|
||||
# -------------------- ALLOWED FAILURES --------------------------------
|
||||
# The following are not bugs and are expected behavior
|
||||
xfail('masked_select'), # Not possible due to dynamic shapes
|
||||
skip('bernoulli'), # randomness
|
||||
|
|
@ -580,6 +571,7 @@ class TestOperators(TestCase):
|
|||
skip('nn.functional.feature_alpha_dropout', 'without_train'), # randomness
|
||||
skip('nn.functional.dropout'), # randomness
|
||||
skip('nn.functional.dropout2d'), # randomness
|
||||
skip('nn.functional.dropout3d', ''), # randomness
|
||||
xfail('as_strided'), # as_strided is too wild for us to support, wontfix
|
||||
xfail('index_put', ''), # not possible due to dynamic shapes; we support a subset
|
||||
xfail('masked_scatter'), # dynamic
|
||||
|
|
@ -588,7 +580,9 @@ class TestOperators(TestCase):
|
|||
xfail('take'), # dynamic
|
||||
xfail('pca_lowrank', ''), # randomness
|
||||
xfail('svd_lowrank', ''), # randomness
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
# ---------------------------- BUGS ------------------------------------
|
||||
# All of the following are bugs and need to be fixed
|
||||
skip('linalg.svdvals'), # # really annoying thing where it passes correctness check but not has_batch_rule
|
||||
xfail('__getitem__', ''), # dynamic error
|
||||
|
|
@ -600,14 +594,15 @@ class TestOperators(TestCase):
|
|||
xfail('nn.functional.gaussian_nll_loss'), # checks var for if any value < 0
|
||||
xfail('prod'), # calls nonzero
|
||||
xfail('quantile', device_type='cpu'), # checks q via a .item() call
|
||||
xfail('stft'),
|
||||
xfail('view_as_complex'),
|
||||
xfail('stft'), # calls as_strided
|
||||
xfail('view_as_complex'), # Tensor must have a last dimension with stride 1
|
||||
|
||||
# required rank 4 tensor to use channels_last format
|
||||
xfail('bfloat16'),
|
||||
xfail('double'),
|
||||
xfail('float'),
|
||||
xfail('half'),
|
||||
xfail('chalf', ''),
|
||||
|
||||
xfail('scatter_reduce', 'prod'), # item call
|
||||
|
||||
|
|
@ -615,11 +610,10 @@ class TestOperators(TestCase):
|
|||
xfail('nn.functional.max_unpool2d'),
|
||||
xfail('nn.functional.max_unpool2d', 'grad'),
|
||||
|
||||
xfail('chalf', ''),
|
||||
xfail('sparse.sampled_addmm', ''),
|
||||
xfail('as_strided_scatter', ''),
|
||||
xfail('index_reduce', ''),
|
||||
xfail('nn.functional.dropout3d', ''),
|
||||
xfail('as_strided_scatter', ''), # calls as_strided
|
||||
xfail('index_reduce', ''), # .item() call
|
||||
# ---------------------------------------------------------------------
|
||||
})
|
||||
|
||||
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
|
||||
|
|
@ -653,6 +647,7 @@ class TestOperators(TestCase):
|
|||
self.assertEqual(loop_out, batched_out)
|
||||
|
||||
vmapjvpall_fail = {
|
||||
# -------------------- ALLOWED FAILURES --------------------------------
|
||||
# The following are expected (not a bug)
|
||||
skip('bernoulli', ''), # randomness
|
||||
skip('nn.functional.dropout'), # randomness
|
||||
|
|
@ -663,34 +658,37 @@ class TestOperators(TestCase):
|
|||
skip('nn.functional.feature_alpha_dropout', 'with_train'),
|
||||
xfail('nn.functional.fractional_max_pool2d'), # Cannot access data pointer of Tensor that doesn't have storage
|
||||
xfail('nn.functional.fractional_max_pool3d'), # Cannot access data pointer of Tensor that doesn't have storage
|
||||
|
||||
# The following are bugs that we should fix
|
||||
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
|
||||
xfail('_masked.mean'),
|
||||
xfail('_masked.prod'),
|
||||
|
||||
# Not actually a problem: embedding with max_norm mutates the weight
|
||||
# and causes different runs to produce different results.
|
||||
# skip because this is flaky depending on what the max_norm is!
|
||||
skip('nn.functional.embedding', ''),
|
||||
xfail('nn.functional.soft_margin_loss', ''),
|
||||
xfail('linalg.householder_product'),
|
||||
xfail('tensor_split'),
|
||||
xfail('quantile'),
|
||||
xfail('as_strided'),
|
||||
xfail('nn.functional.gaussian_nll_loss'),
|
||||
xfail('scatter'),
|
||||
xfail('nanquantile'),
|
||||
xfail('view_as_complex'),
|
||||
xfail('prod'),
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
skip('pca_lowrank', ''),
|
||||
skip('svd_lowrank', ''),
|
||||
# ---------------------------- BUGS ------------------------------------
|
||||
# The following are bugs that we should fix
|
||||
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
|
||||
xfail('_masked.mean'), # silent incorrectness (nan difference)
|
||||
xfail('_masked.prod'), # .item or data-dependent control flow
|
||||
|
||||
xfail('nn.functional.soft_margin_loss', ''), # soft_margin_loss_backward does not support forward-ad
|
||||
xfail('linalg.householder_product'), # output with shape [5, 5] doesn't match the broadcast shape [2, 5, 5]
|
||||
xfail('tensor_split'), # data_ptr composite compliance
|
||||
xfail('quantile'), # at::equal batching rule (cpu), also, in-place vmap (cuda)
|
||||
xfail('as_strided'), # as_strided too weird
|
||||
xfail('nn.functional.gaussian_nll_loss'), # .item or data-dependent control flow
|
||||
xfail('scatter'), # forward-mode AD does not support at::scatter
|
||||
xfail('nanquantile'), # at::equal batching rule (cpu), also, in-place vmap (cuda)
|
||||
xfail('view_as_complex'), # Tensor must have a last dimension with stride 1
|
||||
xfail('prod'), # .item or data-dependent control flow
|
||||
|
||||
skip('pca_lowrank', ''), # randomness
|
||||
skip('svd_lowrank', ''), # randomness
|
||||
|
||||
xfail('stft'), # transpose_ fallback
|
||||
|
||||
xfail('double'), # required rank 4 tensor to use channels_last format
|
||||
|
||||
# potential silent incorrectness
|
||||
skip('nn.functional.max_unpool1d'), # Flaky, seems to sometimes his max_unpool2d
|
||||
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
|
||||
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
|
||||
|
|
@ -700,6 +698,7 @@ class TestOperators(TestCase):
|
|||
# erroring because running_mean and running_var aren't differentiable
|
||||
xfail('nn.functional.batch_norm'),
|
||||
xfail('nn.functional.batch_norm', 'without_cudnn'),
|
||||
# ----------------------------------------------------------------------
|
||||
}
|
||||
|
||||
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
|
||||
|
|
|
|||
Loading…
Reference in a new issue