Revert "[ONNX] Add upsample trilinear to skip decomp (#128259)"

This reverts commit b72989a2b5.

Reverted https://github.com/pytorch/pytorch/pull/128259 on behalf of https://github.com/huydhn due to Sorry for reverting your change but its ONNX job is failing in trunk b72989a2b5 ([comment](https://github.com/pytorch/pytorch/pull/128259#issuecomment-2167058937))
This commit is contained in:
PyTorch MergeBot 2024-06-14 01:44:24 +00:00
parent f48ca2561d
commit 0186b386cd
5 changed files with 14 additions and 86 deletions

View file

@ -33,9 +33,7 @@ pip_install coloredlogs packaging
pip_install onnxruntime==1.18
pip_install onnx==1.16.0
# pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@3e869ef8ccf19b5ebd21c10d3e9c267c9a9fa729" --no-deps
pip_install onnxscript==0.1.0.dev20240613 --no-deps
# required by onnxscript
pip_install ml_dtypes
pip_install onnxscript==0.1.0.dev20240523 --no-deps
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/

View file

@ -527,7 +527,8 @@ EXPECTED_SKIPS_OR_FAILS_WITH_DTYPES: Tuple[onnx_test_common.DecorateMeta, ...] =
),
xfail(
"gather",
reason="GatherElements op: Rank of input 'data' needs to be equal to rank of input 'indices'"
reason="HandleNegativeAxis(int64_t, int64_t) IsAxisInRange(axis, tensor_rank) was \
false. axis 0 is not in valid range [-0,-1]"
),
xfail(
"geometric",
@ -1516,6 +1517,7 @@ SKIP_XFAIL_SUBTESTS_WITH_MATCHER_AND_MODEL_TYPE: tuple[
"nn.functional.batch_norm",
matcher=lambda sample: sample.kwargs.get("training") is True
and any(arg is not None for arg in sample.args[2:4]),
model_type=pytorch_test_common.TorchModelType.TORCH_EXPORT_EXPORTEDPROGRAM,
reason="Flaky failure: https://github.com/pytorch/pytorch/issues/115106",
),
xfail(

View file

@ -171,9 +171,13 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase):
torch.argmax(input, dim=1, keepdim=True),
)
_ = dynamo_export(
ArgminArgmaxModel(), model_input, export_options=self.export_options
)
# NOTE: KeyError: dim raised in optimizer
with self.assertWarnsOnceRegex(
UserWarning, "ONNXScript optimizer failed. Skipping optimization."
):
_ = dynamo_export(
ArgminArgmaxModel(), model_input, export_options=self.export_options
)
def test_multiple_outputs_op_with_evaluator(self):
class TopKModel(torch.nn.Module):

View file

@ -18,12 +18,6 @@ def assert_op_in_onnx_model(model: onnx.ModelProto, op_type: str):
class TestDynamoExportDecompSkip(pytorch_test_common.ExportTestCase):
def _test_exported_program_forces_decomposition(self, model, input, op_type):
ep = torch.export.export(model, input)
onnx_program = torch.onnx.dynamo_export(ep, *input)
with self.assertRaises(AssertionError):
assert_op_in_onnx_model(onnx_program.model_proto, op_type)
def test_upsample_bilinear2d(self):
class TestModel(torch.nn.Module):
def __init__(self):
@ -36,9 +30,6 @@ class TestDynamoExportDecompSkip(pytorch_test_common.ExportTestCase):
onnx_program = torch.onnx.dynamo_export(TestModel(), torch.randn(1, 1, 2, 2))
# If decomposition is skipped, the model will contain a Resize op instead of fine grained subgraph.
assert_op_in_onnx_model(onnx_program.model_proto, "Resize")
self._test_exported_program_forces_decomposition(
TestModel(), (torch.randn(1, 1, 2, 2),), "Resize"
)
def test_upsample_bilinear2d_output_size(self):
def func(x: torch.Tensor):
@ -48,42 +39,14 @@ class TestDynamoExportDecompSkip(pytorch_test_common.ExportTestCase):
# If decomposition is skipped, the model will contain a Resize op instead of fine grained subgraph.
assert_op_in_onnx_model(onnx_program.model_proto, "Resize")
def test_upsample_trilinear3d(self):
class TestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.upsample = torch.nn.Upsample(scale_factor=2, mode="trilinear")
def forward(self, x):
return self.upsample(x)
onnx_program = torch.onnx.dynamo_export(TestModel(), torch.randn(1, 1, 2, 2, 3))
# If decomposition is skipped, the model will contain a Resize op instead of fine grained subgraph.
assert_op_in_onnx_model(onnx_program.model_proto, "Resize")
self._test_exported_program_forces_decomposition(
TestModel(), (torch.randn(1, 1, 2, 2, 3),), "Resize"
)
def test_upsample_trilinear3d_output_size(self):
def func(x: torch.Tensor):
return torch.nn.functional.interpolate(x, size=(4, 4, 4), mode="trilinear")
onnx_program = torch.onnx.dynamo_export(func, torch.randn(1, 1, 2, 2, 3))
# If decomposition is skipped, the model will contain a Resize op instead of fine grained subgraph.
assert_op_in_onnx_model(onnx_program.model_proto, "Resize")
def test_instance_norm(self):
class TestModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.instance_norm(x)
def func(x: torch.Tensor):
return torch.nn.functional.instance_norm(x)
onnx_program = torch.onnx.dynamo_export(TestModel(), torch.randn(1, 1, 2, 2))
onnx_program = torch.onnx.dynamo_export(func, torch.randn(1, 1, 2, 2))
# If decomposition is skipped, the model will contain an InstanceNormalization op
# instead of BatchNormalization op w/ training=True.
assert_op_in_onnx_model(onnx_program.model_proto, "InstanceNormalization")
self._test_exported_program_forces_decomposition(
TestModel(), (torch.randn(1, 1, 2, 2),), "InstanceNormalization"
)
if __name__ == "__main__":

View file

@ -121,44 +121,6 @@ class UpsampleBilinear2DDecompSkip(DecompSkip):
)
class UpsampleTrilinear3DDecompSkip(DecompSkip):
op_callable = torch._C._nn.upsample_trilinear3d # type: ignore[attr-defined]
onnxscript_function = torchlib_nn.aten_upsample_trilinear3d_vec # type: ignore[attr-defined]
new_op_name = "upsample_trilinear3d"
new_op_schema = "(Tensor self, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)"
@classmethod
def register(cls, export_options: torch.onnx.ExportOptions):
if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr(
torch.ops.onnx_export, cls.new_op_name
):
cls.register_custom_op()
torch._C._nn.upsample_trilinear3d = torch.ops.onnx_export.upsample_trilinear3d # type: ignore[attr-defined]
if export_options.onnx_registry is None:
export_options.onnx_registry = torch.onnx.OnnxRegistry()
registry = export_options.onnx_registry
registry.register_op(
function=cls.onnxscript_function,
namespace=_NEW_OP_NAMESPACE,
op_name=cls.new_op_name,
)
@classmethod
def unregister(cls):
torch._C._nn.upsample_trilinear3d = cls.op_callable # type: ignore[attr-defined]
@classmethod
def abstract(cls, input, output_size, align_corners, scale_factors):
osize = decompositions.upsample_compute_output_size(
input.size(), output_size, scale_factors
)
return torch.empty(
(input.size(0), input.size(1), input.size(2), *osize),
dtype=input.dtype,
device=input.device,
)
class InstanceNormDecompSkip(DecompSkip):
op_callable = torch.instance_norm # type: ignore[attr-defined]
onnxscript_function = torchlib_core.aten_instance_norm # type: ignore[attr-defined]
@ -214,7 +176,6 @@ class InstanceNormDecompSkip(DecompSkip):
_DEFAULT_SKIP_LIST = [
UpsampleBilinear2DDecompSkip,
InstanceNormDecompSkip,
UpsampleTrilinear3DDecompSkip,
]