[ONNX] subscribe onnx to our custom test infra (#79546)

Remove as many references as can be easily done of unittest in favor of our custom infra.

Left a todo where I could not easily replace unittest.main with run_tests()
Pull Request resolved: https://github.com/pytorch/pytorch/pull/79546
Approved by: https://github.com/seemethere
This commit is contained in:
Jane Xu 2022-06-15 15:00:01 +00:00 committed by PyTorch MergeBot
parent 6a96bda445
commit d2fbfe7fce
10 changed files with 44 additions and 46 deletions

View file

@ -1,7 +1,5 @@
# Owner(s): ["module: onnx"]
import unittest
import numpy as np
import onnx
from test_pytorch_onnx_caffe2 import do_export
@ -11,9 +9,10 @@ import caffe2.python.onnx.backend as c2
import torch
import torch.utils.cpp_extension
from torch.onnx.symbolic_helper import _unimplemented
from test_pytorch_common import TestCase, run_tests
class TestCustomOps(unittest.TestCase):
class TestCustomOps(TestCase):
def test_custom_add(self):
op_source = """
#include <torch/script.h>
@ -57,7 +56,7 @@ class TestCustomOps(unittest.TestCase):
np.testing.assert_array_equal(caffe2_out[0], model(x, y).cpu().numpy())
class TestCustomAutogradFunction(unittest.TestCase):
class TestCustomAutogradFunction(TestCase):
opset_version = 9
keep_initializers_as_inputs = False
onnx_shape_inference = True
@ -129,7 +128,7 @@ class TestCustomAutogradFunction(unittest.TestCase):
run_model_test(self, model, input_args=(x,))
class TestExportAsContribOps(unittest.TestCase):
class TestExportAsContribOps(TestCase):
opset_version = 14
keep_initializers_as_inputs = False
onnx_shape_inference = True
@ -164,4 +163,4 @@ class TestExportAsContribOps(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -1,9 +1,8 @@
# Owner(s): ["module: onnx"]
import unittest
import onnxruntime # noqa: F401
from test_models import TestModels
from test_pytorch_common import TestCase, run_tests
from test_pytorch_onnx_onnxruntime import run_model_test
import torch
@ -24,7 +23,7 @@ def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, opset_versions=None):
TestModels = type(
"TestModels",
(unittest.TestCase,),
(TestCase,),
dict(TestModels.__dict__, is_script_test_enabled=False, exportTest=exportTest),
)
@ -32,7 +31,7 @@ TestModels = type(
# model tests for scripting with new JIT APIs and shape inference
TestModels_new_jit_API = type(
"TestModels_new_jit_API",
(unittest.TestCase,),
(TestCase,),
dict(
TestModels.__dict__,
exportTest=exportTest,
@ -43,4 +42,4 @@ TestModels_new_jit_API = type(
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -16,6 +16,7 @@ from test_pytorch_common import (
TestCase,
flatten,
run_tests,
skipIfCaffe2,
skipIfNoLapack,
)
@ -35,7 +36,6 @@ from torch.onnx.symbolic_helper import (
_get_tensor_sizes,
parse_args,
)
from torch.testing._internal.common_utils import skipIfCaffe2
"""Usage: python test/onnx/test_operators.py [--no-onnx] [--produce-onnx-test-data]
--no-onnx: no onnx python dependence

View file

@ -5,7 +5,7 @@ import unittest
import numpy as np
from pytorch_helper import PyTorchModule
from test_pytorch_common import skipIfNoLapack
from test_pytorch_common import skipIfNoLapack, run_tests, TestCase
import torch.nn.init as init
import torch.onnx
@ -14,7 +14,7 @@ from caffe2.python.model_helper import ModelHelper
from torch import nn
class TestCaffe2Backend(unittest.TestCase):
class TestCaffe2Backend(TestCase):
@skipIfNoLapack
@unittest.skip("test broken because Lapack was always missing.")
def test_helper(self):
@ -67,4 +67,4 @@ class TestCaffe2Backend(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -1,11 +1,10 @@
# Owner(s): ["module: onnx"]
import unittest
import onnxruntime
import torch
from torch._C import parse_ir
from torch.onnx import verification
from test_pytorch_common import TestCase, run_tests
def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version):
@ -85,7 +84,7 @@ def MakeTestCase(opset_version: int) -> type:
name = f"TestJITIRToONNX_opset{opset_version}"
return type(
str(name),
(unittest.TestCase,),
(TestCase,),
dict(_TestJITIRToONNX.__dict__, opset_version=opset_version),
)
@ -93,4 +92,4 @@ def MakeTestCase(opset_version: int) -> type:
TestJITIRToONNX_opset14 = MakeTestCase(14)
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -29,6 +29,8 @@ from test_pytorch_common import (
skipIfTravis,
skipIfUnsupportedMinOpsetVersion,
skipIfUnsupportedOpsetVersion,
TestCase,
run_tests,
)
# Import various models for testing
@ -128,7 +130,7 @@ model_urls = {
}
class TestCaffe2Backend_opset9(unittest.TestCase):
class TestCaffe2Backend_opset9(TestCase):
opset_version = 9
embed_params = False
@ -3195,44 +3197,44 @@ setup_rnn_tests()
# to embed_params=True
TestCaffe2BackendEmbed_opset9 = type(
"TestCaffe2BackendEmbed_opset9",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True),
)
# opset 7 tests
TestCaffe2Backend_opset7 = type(
"TestCaffe2Backend_opset7",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=7),
)
TestCaffe2BackendEmbed_opset7 = type(
"TestCaffe2BackendEmbed_opset7",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=7),
)
# opset 8 tests
TestCaffe2Backend_opset8 = type(
"TestCaffe2Backend_opset8",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=8),
)
TestCaffe2BackendEmbed_opset8 = type(
"TestCaffe2BackendEmbed_opset8",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=8),
)
# opset 10 tests
TestCaffe2Backend_opset10 = type(
"TestCaffe2Backend_opset10",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=10),
)
TestCaffe2BackendEmbed_opset10 = type(
"TestCaffe2BackendEmbed_opset10",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=10),
)
@ -3240,9 +3242,9 @@ TestCaffe2BackendEmbed_opset10 = type(
# to embed_params=True
TestCaffe2BackendEmbed_opset9_new_jit_API = type(
"TestCaffe2BackendEmbed_opset9_new_jit_API",
(unittest.TestCase,),
(TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True),
)
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -1,8 +1,6 @@
# Owner(s): ["module: unknown"]
import io
import unittest
import numpy as np
import onnx
@ -10,9 +8,10 @@ import caffe2.python.onnx.backend as c2
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.onnx
from test_pytorch_common import TestCase, run_tests
class TestQuantizedOps(unittest.TestCase):
class TestQuantizedOps(TestCase):
def generic_test(
self, model, sample_inputs, input_names=None, decimal=3, relaxed_check=False
):
@ -378,4 +377,4 @@ class TestQuantizedOps(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -30,11 +30,13 @@ from test_pytorch_common import (
RNN_HIDDEN_SIZE,
RNN_INPUT_SIZE,
RNN_SEQUENCE_LENGTH,
run_tests,
skipIfNoLapack,
skipIfUnsupportedMaxOpsetVersion,
skipIfUnsupportedMinOpsetVersion,
skipIfUnsupportedOpsetVersion,
skipScriptTest,
TestCase,
)
from torchvision import ops
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
@ -62,9 +64,7 @@ from torch.onnx.symbolic_helper import _unimplemented
_ORT_PROVIDERS = ["CPUExecutionProvider"]
def run_model_test(
test_suite: Union[_TestONNXRuntime, unittest.TestCase], *args, **kwargs
):
def run_model_test(test_suite: Union[_TestONNXRuntime, TestCase], *args, **kwargs):
kwargs["ort_providers"] = _ORT_PROVIDERS
kwargs["opset_version"] = test_suite.opset_version
kwargs["keep_initializers_as_inputs"] = test_suite.keep_initializers_as_inputs
@ -72,7 +72,7 @@ def run_model_test(
def run_model_test_with_external_data(
test_suite: Union[_TestONNXRuntime, unittest.TestCase], *args, **kwargs
test_suite: Union[_TestONNXRuntime, TestCase], *args, **kwargs
):
kwargs["use_external_data"] = True
return run_model_test(test_suite, *args, **kwargs)
@ -12723,7 +12723,7 @@ def MakeTestCase(opset_version: int, keep_initializers_as_inputs: bool = True) -
name += "_IRv4"
return type(
str(name),
(unittest.TestCase,),
(TestCase,),
dict(
_TestONNXRuntime.__dict__,
opset_version=opset_version,
@ -12762,4 +12762,4 @@ TestONNXRuntime_opset16 = MakeTestCase(16, keep_initializers_as_inputs=False)
if __name__ == "__main__":
unittest.main()
run_tests()

View file

@ -8,6 +8,7 @@ from test_pytorch_common import (
skipIfNoCuda,
skipIfUnsupportedMinOpsetVersion,
skipScriptTest,
TestCase,
)
# TODO(justinchuby): Remove reference to other unit tests.
@ -18,7 +19,7 @@ from torch.cuda.amp import autocast
from torch.onnx._globals import GLOBALS
class TestONNXRuntime_cuda(unittest.TestCase):
class TestONNXRuntime_cuda(TestCase):
opset_version = GLOBALS.export_onnx_opset_version
keep_initializers_as_inputs = True
@ -150,4 +151,5 @@ TestONNXRuntime_cuda.setUp = TestONNXRuntime.setUp
TestONNXRuntime_cuda.run_test = TestONNXRuntime.run_test
if __name__ == "__main__":
# TODO: convert this to use common_utils.run_tests()
unittest.main(TestONNXRuntime_cuda())

View file

@ -1,9 +1,7 @@
# Owner(s): ["module: onnx"]
import unittest
import numpy as np
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import run_tests, skipIfUnsupportedMinOpsetVersion, TestCase
import torch
from torch.onnx import _constants
@ -21,9 +19,9 @@ def expect_tensor(scalar_type, shape=None):
return verify
class TestONNXShapeInference(unittest.TestCase):
class TestONNXShapeInference(TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
TestCase.__init__(self, *args, **kwargs)
self.opset_version = _constants.onnx_main_opset
_set_onnx_shape_inference(True)
_set_opset_version(self.opset_version)
@ -273,4 +271,4 @@ class TestONNXShapeInference(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
run_tests()