mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[dynamo] wrap GraphModule exceptions in dynamo-wrapped tests (#126341)
Better approach to https://github.com/pytorch/pytorch/pull/126197 to catch issues like https://github.com/pytorch/pytorch/issues/125568. Pull Request resolved: https://github.com/pytorch/pytorch/pull/126341 Approved by: https://github.com/anijain2305, https://github.com/jansel
This commit is contained in:
parent
cdf2133186
commit
5359af0c7e
18 changed files with 101 additions and 9 deletions
|
|
@ -106,6 +106,9 @@ class TestOptimizations(torch._dynamo.test_case.TestCase):
|
|||
def test_eager(self):
|
||||
self._check_backend_works("eager")
|
||||
|
||||
def test_eager_noexcept(self):
|
||||
self._check_backend_works("eager_noexcept")
|
||||
|
||||
@_force_skip_lazy_graph_module()
|
||||
def test_torchscript(self):
|
||||
self._check_backend_works("ts")
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ from torch.testing._internal.common_utils import (
|
|||
skipIfRocm,
|
||||
skipIfTorchDynamo,
|
||||
TestCase,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
from torch.testing._internal.hop_db import hop_db
|
||||
from torch.testing._internal.optests import (
|
||||
|
|
@ -576,6 +577,9 @@ def forward(self, primals_1, primals_2):
|
|||
|
||||
# This is a (hopefully) extremely rare case that is difficult to handle,
|
||||
# so we ban it.
|
||||
# https://github.com/pytorch/pytorch/issues/126236
|
||||
# https://github.com/pytorch/pytorch/pull/126113
|
||||
@xfailIfTorchDynamo
|
||||
def test_set__and_data_mutation_bad(self):
|
||||
def f(a):
|
||||
a_view = a.view(-1)
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ from torch.testing._internal.common_utils import (
|
|||
skipIfTorchDynamo,
|
||||
TEST_WITH_TORCHDYNAMO,
|
||||
TestCase,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -1021,6 +1022,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
graph_module = make_fx(torch.func.functionalize(f))(*example_inputs)
|
||||
self.assertEqual(graph_module(*example_inputs), f(*example_inputs))
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_input_mutation_on_true_branch(self):
|
||||
def true_fn(x):
|
||||
view_x = x.view(x.shape)
|
||||
|
|
@ -1046,6 +1049,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
):
|
||||
make_fx(torch.func.functionalize(f))(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_input_mutation_on_false_branch(self):
|
||||
def true_fn(x):
|
||||
return x.sin().sum()
|
||||
|
|
@ -1071,6 +1076,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
):
|
||||
make_fx(torch.func.functionalize(f))(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_output_alias_input(self):
|
||||
def true_fn(x):
|
||||
return x
|
||||
|
|
@ -1098,6 +1105,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
):
|
||||
make_fx(torch.func.functionalize(f))(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_nested_input_mutation(self):
|
||||
def true_true_fn(x):
|
||||
x.add_(4)
|
||||
|
|
@ -1129,6 +1138,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
):
|
||||
make_fx(torch.func.functionalize(f))(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_nested_input_mutation_with_aot_func(self):
|
||||
def true_true_fn(x):
|
||||
x.add_(4)
|
||||
|
|
@ -1180,6 +1191,8 @@ def forward(self, arg0_1, arg1_1):
|
|||
):
|
||||
make_fx(f_wrapper(f))(example_input_func)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_cond_functionalized_input_aliasing_with_aot_func(self):
|
||||
def true_fn(x):
|
||||
return x
|
||||
|
|
@ -1810,6 +1823,8 @@ def forward(self, arg0_1):
|
|||
|
||||
self.assertEqual(gm(*example_inputs), f(*example_inputs))
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_map_functionalized_arg_mutation(self):
|
||||
def map_fn(x, y):
|
||||
y.add_(4)
|
||||
|
|
@ -1825,6 +1840,8 @@ def forward(self, arg0_1):
|
|||
):
|
||||
functional_f(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_map_functionalized_elem_mutation(self):
|
||||
def map_fn(x, y):
|
||||
x.add_(4)
|
||||
|
|
@ -1860,6 +1877,8 @@ def forward(self, arg0_1):
|
|||
# Ensure no error is thrown when not running backward
|
||||
f(*example_inputs)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126988
|
||||
@xfailIfTorchDynamo
|
||||
def test_map_functionalized_elem_alias(self):
|
||||
def map_fn(x):
|
||||
x.view(x.shape)
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ from torch.testing._internal.common_utils import (
|
|||
subtest,
|
||||
TEST_WITH_TORCHDYNAMO,
|
||||
TestCase,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
|
||||
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
|
||||
|
|
@ -2340,6 +2341,8 @@ class TestJac(VmapTearDownMixin, TestCase):
|
|||
)(x)
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127036
|
||||
@xfailIfTorchDynamo
|
||||
@parametrize("_preallocate_and_copy", (True, False))
|
||||
def test_chunk_jacrev_chunksize_one(self, device, _preallocate_and_copy):
|
||||
# With chunk_size=1, we shouldn't `vmap` and hence not be limited
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ from torch.testing._internal.common_utils import (
|
|||
skipIfTorchDynamo,
|
||||
slowTest,
|
||||
TestCase,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
from torch.utils._mode_utils import no_dispatch
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
|
@ -6980,6 +6981,8 @@ for shape in [(1,), ()]:
|
|||
self.assertEqual(b_grad, c_grad)
|
||||
self.assertEqual(b_grad, d_grad)
|
||||
|
||||
# PYTORCH_TEST_WITH_DYNAMO=1 test fails on CI but can't repro locally
|
||||
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/127115")
|
||||
def test_checkpointing_without_reentrant_dataparallel(self):
|
||||
"""
|
||||
Verifies gradient correctness when checkpoint without reentrant autograd
|
||||
|
|
@ -7037,6 +7040,8 @@ for shape in [(1,), ()]:
|
|||
# should only call hook once
|
||||
self.assertEqual(count, 1)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127115
|
||||
@xfailIfTorchDynamo
|
||||
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
|
||||
"""
|
||||
Ensures checkpointing without reentrant autograd works with functions
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ from torch.testing._internal.common_utils import (
|
|||
TEST_SCIPY,
|
||||
TestCase,
|
||||
torch_to_numpy_dtype_dict,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
|
||||
if TEST_SCIPY:
|
||||
|
|
@ -1236,6 +1237,8 @@ class TestBinaryUfuncs(TestCase):
|
|||
expected_failure=expected_failure,
|
||||
)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126474
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(torch.double)
|
||||
def test_binary_op_mem_overlap(self, device, dtype):
|
||||
ops = [
|
||||
|
|
@ -3691,6 +3694,8 @@ class TestBinaryUfuncs(TestCase):
|
|||
actual = op(x, y, alpha=alpha)
|
||||
self.assertTrue(not (actual.isnan() or actual.isinf()))
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127003
|
||||
@xfailIfTorchDynamo
|
||||
def test_sub_typing(self, device):
|
||||
m1 = torch.tensor(
|
||||
[True, False, False, True, False, False], dtype=torch.bool, device=device
|
||||
|
|
|
|||
|
|
@ -199,6 +199,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
|
|||
):
|
||||
torch.library.opcheck(op, (x,), {})
|
||||
|
||||
@skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
|
||||
def test_incorrect_abstract_impl(self, device):
|
||||
lib = self.lib()
|
||||
lib.define("foo(Tensor x) -> Tensor")
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ from torch.testing._internal.common_utils import (
|
|||
skipIfTorchDynamo,
|
||||
TEST_CUDA,
|
||||
TestCase,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -1785,6 +1786,8 @@ class NumpyTests(TestCase):
|
|||
a[b] = 1.0
|
||||
self.assertEqual(a, tensor([[1.0, 1.0, 1.0]], device=device))
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127003
|
||||
@xfailIfTorchDynamo
|
||||
def test_boolean_assignment_value_mismatch(self, device):
|
||||
# A boolean assignment should fail when the shape of the values
|
||||
# cannot be broadcast to the subscription. (see also gh-3458)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ from torch.testing._internal.common_utils import \
|
|||
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
|
||||
make_fullrank_matrices_with_distinct_singular_values,
|
||||
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
|
||||
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
|
||||
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest,
|
||||
xfailIfTorchDynamo)
|
||||
from torch.testing._internal.common_device_type import \
|
||||
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
|
||||
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
|
||||
|
|
@ -802,6 +803,8 @@ class TestLinalg(TestCase):
|
|||
# when beta is not zero
|
||||
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127043
|
||||
@xfailIfTorchDynamo
|
||||
@precisionOverride({torch.bfloat16: 1e-1})
|
||||
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
|
||||
def test_addr_float_and_complex(self, device, dtype):
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import torch.backends.mkldnn
|
|||
from torch.utils import mkldnn as mkldnn_utils
|
||||
from torch.testing._internal.common_utils import TestCase, \
|
||||
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
|
||||
skipIfTorchDynamo
|
||||
skipIfTorchDynamo, xfailIfTorchDynamo
|
||||
from torch.testing._internal.common_device_type import (
|
||||
instantiate_device_type_tests,
|
||||
dtypes,
|
||||
|
|
@ -807,6 +807,8 @@ class TestMkldnn(TestCase):
|
|||
|
||||
self.assertEqual(y1, y2.to_dense())
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127111
|
||||
@xfailIfTorchDynamo
|
||||
def test_max_pool_unsupported(self):
|
||||
# OneDNN not support dilation max_pooling, will be avilabled in v2.0.
|
||||
N = torch.randint(3, 10, (1,)).item()
|
||||
|
|
@ -1159,6 +1161,8 @@ class TestMkldnn(TestCase):
|
|||
out_mkldnn = mkldnn_utils.to_mkldnn(m)(x)
|
||||
self.assertEqual(out_eager, out_mkldnn)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/127111
|
||||
@xfailIfTorchDynamo
|
||||
def test_view(self):
|
||||
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
|
||||
self.assertRaisesRegex(RuntimeError,
|
||||
|
|
|
|||
|
|
@ -3,11 +3,13 @@
|
|||
from copy import copy
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_utils import run_tests, TestCase
|
||||
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
|
||||
from torch.utils.module_tracker import ModuleTracker
|
||||
|
||||
|
||||
class TestModuleTracker(TestCase):
|
||||
# "https://github.com/pytorch/pytorch/issues/127112
|
||||
@xfailIfTorchDynamo
|
||||
def test_module_hierarchy(self):
|
||||
seen_fw = []
|
||||
seen_bw = []
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import torch
|
|||
from torch.utils._pytree import tree_map
|
||||
import unittest
|
||||
|
||||
from torch.testing._internal.common_utils import run_tests
|
||||
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
|
||||
from torch.fx.operator_schemas import normalize_function
|
||||
from torch._subclasses.schema_check_mode import SchemaCheckMode
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
|
@ -94,6 +94,11 @@ class IncorrectAliasTensor(torch.Tensor):
|
|||
|
||||
# Tests various schema checking functionalities.
|
||||
class TestSchemaCheck(JitTestCase):
|
||||
def setUp(self):
|
||||
if TEST_WITH_TORCHDYNAMO:
|
||||
self.skipTest("SchemaCheckMode is ignored by dynamo")
|
||||
super().setUp()
|
||||
|
||||
# Tests that SchemaCheckMode records operator order with grad
|
||||
def test_schema_check_mode_operator_order(self):
|
||||
with SchemaCheckMode() as schema_check:
|
||||
|
|
|
|||
|
|
@ -17,7 +17,8 @@ from torch.testing._internal.common_utils import (
|
|||
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
|
||||
torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict, slowTest,
|
||||
set_default_dtype, set_default_tensor_type,
|
||||
TEST_SCIPY, IS_MACOS, IS_PPC, IS_JETSON, IS_WINDOWS, parametrize, skipIfTorchDynamo)
|
||||
TEST_SCIPY, IS_MACOS, IS_PPC, IS_JETSON, IS_WINDOWS, parametrize, skipIfTorchDynamo,
|
||||
xfailIfTorchDynamo)
|
||||
from torch.testing._internal.common_device_type import (
|
||||
expectedFailureMeta, instantiate_device_type_tests, deviceCountAtLeast, onlyNativeDeviceTypes,
|
||||
onlyCPU, largeTensorTest, precisionOverride, dtypes,
|
||||
|
|
@ -1582,6 +1583,8 @@ class TestTensorCreation(TestCase):
|
|||
self.assertEqual(t.max(), True)
|
||||
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126834
|
||||
@xfailIfTorchDynamo
|
||||
def test_random_from_to_bool(self, device):
|
||||
size = 2000
|
||||
|
||||
|
|
@ -1661,7 +1664,9 @@ class TestTensorCreation(TestCase):
|
|||
|
||||
# NB: uint64 is broken because its max value is not representable in
|
||||
# int64_t, but this is what random expects
|
||||
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
|
||||
# https://github.com/pytorch/pytorch/issues/126834
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch .uint16, torch.uint32))
|
||||
def test_random_from_to(self, device, dtype):
|
||||
size = 2000
|
||||
alpha = 0.1
|
||||
|
|
@ -1750,6 +1755,8 @@ class TestTensorCreation(TestCase):
|
|||
lambda: t.random_(from_, to_)
|
||||
)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126834
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
|
||||
def test_random_to(self, device, dtype):
|
||||
size = 2000
|
||||
|
|
@ -3350,6 +3357,8 @@ class TestRandomTensorCreation(TestCase):
|
|||
with self.assertRaisesRegex(RuntimeError, r'normal expects all elements of std >= 0.0'):
|
||||
torch.normal(input, std)
|
||||
|
||||
# https://github.com/pytorch/pytorch/issues/126834
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(torch.float, torch.double, torch.half)
|
||||
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
|
||||
def test_uniform_from_to(self, device, dtype):
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
|
|||
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
|
||||
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
|
||||
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
|
||||
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO)
|
||||
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
|
||||
from multiprocessing.reduction import ForkingPickler
|
||||
from torch.testing._internal.common_device_type import (
|
||||
expectedFailureMeta,
|
||||
|
|
@ -4376,6 +4376,9 @@ else:
|
|||
getattr(x, op)(*args)
|
||||
|
||||
# FIXME: move to an elementwise ternary test suite and make this an OpInfo test
|
||||
# https://github.com/pytorch/pytorch/issues/126474
|
||||
@xfailIfTorchDynamo
|
||||
@skipIfTorchInductor("https://github.com/pytorch/pytorch/issues/126474")
|
||||
@dtypes(torch.double)
|
||||
def test_ternary_op_mem_overlap(self, device, dtype):
|
||||
if device == "cpu" and TEST_WITH_TORCHINDUCTOR:
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ import torch
|
|||
|
||||
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
|
||||
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
|
||||
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
|
||||
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
|
||||
xfailIfTorchDynamo)
|
||||
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
|
||||
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
|
||||
from torch.testing._internal.common_dtype import (
|
||||
|
|
@ -43,6 +44,8 @@ class TestTypePromotion(TestCase):
|
|||
# `int+float -> float` but `int.add_(float)` is rejected as an error.
|
||||
# Promoting inplace would require re-allocating and copying the memory of the
|
||||
# tensor data, since element size could change.
|
||||
# https://github.com/pytorch/pytorch/issues/127049
|
||||
@xfailIfTorchDynamo
|
||||
@float_double_default_dtype
|
||||
def test_inplace(self, device):
|
||||
int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from torch.testing._internal.common_utils import (
|
|||
IS_WINDOWS,
|
||||
gradcheck,
|
||||
is_iterable_of_tensors,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
from torch.testing._internal.common_methods_invocations import (
|
||||
unary_ufuncs,
|
||||
|
|
@ -788,6 +789,8 @@ class TestUnaryUfuncs(TestCase):
|
|||
_test(op, data[0:sz], data[1 : sz + 1])
|
||||
|
||||
# TODO: run on non-native device types
|
||||
# https://github.com/pytorch/pytorch/issues/126474
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(torch.double)
|
||||
def test_unary_out_op_mem_overlap(self, device, dtype):
|
||||
sz = 3
|
||||
|
|
|
|||
|
|
@ -24,6 +24,21 @@ def eager(gm, fake_tensor_inputs):
|
|||
return gm.forward
|
||||
|
||||
|
||||
@register_backend
|
||||
def eager_noexcept(gm, fake_tensor_inputs):
|
||||
# This backend is intended to check that dynamo-generated GraphModules
|
||||
# do not cause errors.
|
||||
def inner(*args):
|
||||
try:
|
||||
return gm(*args)
|
||||
except Exception as e:
|
||||
raise torch._dynamo.exc.TorchDynamoException(
|
||||
"Unexpected exception when running generated GraphModule"
|
||||
) from e
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
@register_backend
|
||||
def pre_dispatch_eager(gm, fake_tensor_inputs):
|
||||
from torch.fx.experimental.proxy_tensor import make_fx
|
||||
|
|
|
|||
|
|
@ -2816,7 +2816,9 @@ This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0"""
|
|||
super_run = torch._dynamo.optimize("aot_eager_decomp_partition")(super_run)
|
||||
elif TEST_WITH_TORCHDYNAMO: # noqa: F821
|
||||
# TorchDynamo optimize annotation
|
||||
super_run = torch._dynamo.optimize("eager", nopython=nopython)(super_run)
|
||||
# Assume eager-generated GraphModules will not error out.
|
||||
# If we do, this is probably a Dynamo bug!
|
||||
super_run = torch._dynamo.optimize("eager_noexcept", nopython=nopython)(super_run)
|
||||
key = f"{self.__class__.__name__}.{self._testMethodName}"
|
||||
from .dynamo_test_failures import dynamo_expected_failures, dynamo_skips
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue