Revert D23398534: [pytorch][PR] [ONNX] Improve error handling for adaptive_pool

Test Plan: revert-hammer

Differential Revision:
D23398534 (45ddeb5ce6)

Original commit changeset: f2d60d40340f

fbshipit-source-id: acc9d6c3d031662c37447fcee027b0c97b8492a7
This commit is contained in:
Dmytro Dzhulgakov 2020-10-05 15:07:16 -07:00 committed by Facebook GitHub Bot
parent f18cc9c57d
commit 5177f8de2b
3 changed files with 5 additions and 17 deletions

View file

@ -1667,14 +1667,7 @@ class TestONNXRuntime(unittest.TestCase):
y = torch.randn(16, 16, requires_grad=True)
self.run_test(MyModel(), (x, y))
def test_interpolate_adaptive_pooling_error(self):
x = torch.randn(1, 2, 6, requires_grad=True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", True, True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", False, True)
@disableScriptTest()
def test_groupnorm(self):
model = torch.nn.GroupNorm(3, 6, 0.002)
x = torch.randn(4, 6, 180, 180, 180)

View file

@ -826,6 +826,7 @@ avg_pool3d = _avg_pool('avg_pool3d', _triple)
def _adaptive_pool(name, type, tuple_fn, fn=None):
@parse_args('v', 'is')
def symbolic_fn(g, input, output_size):
# _adaptive_pool is supported for cases where output_size is 1 for all dimensions,
# by executing a GlobalPool.
@ -836,10 +837,6 @@ def _adaptive_pool(name, type, tuple_fn, fn=None):
# so we try using max_poolxd_with_indices, and if it is not possible
# (input is not a complete tensor or output size not factor of input size)
# then we call GlobalAveragePool and return None for the indices
try:
output_size = _parse_arg(output_size, 'is')
except Exception:
return sym_help._onnx_unsupported('adaptive pooling, since output_size is not constant.')
if output_size == [1] * len(output_size) and type == "AveragePool":
return g.op("GlobalAveragePool", input)
if not input.isCompleteTensor():
@ -852,10 +849,7 @@ def _adaptive_pool(name, type, tuple_fn, fn=None):
if mod != [0] * len(mod):
if output_size == [1] * len(output_size):
return g.op("GlobalMaxPool", input), None
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return _unimplemented(name, 'output size that are not factor of input size')
else:
return sym_help._onnx_unsupported(name, ', since output size is not factor of input size')
return _unimplemented(name, 'output size that are not factor of input size')
k = [int(dim[i] / output_size[i]) for i in range(0, len(dim))]
# call max_poolxd_with_indices to get indices in the output
if type == "MaxPool":

View file

@ -1003,7 +1003,8 @@ def _run_symbolic_function(g, n, inputs, env, operator_export_type=OperatorExpor
else:
raise RuntimeError("ONNX export failed on an operator with unrecognized namespace {}::{}. "
"If you are trying to export a custom operator, make sure you registered "
"it with the right domain and version.".format(ns, op_name))
"it with the right domain and version. "
"Otherwise, please report a bug.".format(ns, op_name))
except RuntimeError:
if operator_export_type == OperatorExportTypes.ONNX_FALLTHROUGH:
return None