diff --git a/docs/source/conf.py b/docs/source/conf.py index 8fec5f16f98..44d95078703 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -238,7 +238,6 @@ coverage_ignore_classes = [ "StringType", "SymIntType", "SymBoolType", - "ThroughputBenchmark", "TracingState", "TupleType", "Type", diff --git a/docs/source/index.rst b/docs/source/index.rst index df662065833..a1dcc14bfeb 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -131,6 +131,7 @@ Features described in this documentation are classified by release status: sparse storage torch.testing + torch.utils torch.utils.benchmark torch.utils.bottleneck torch.utils.checkpoint diff --git a/docs/source/torch.rst b/docs/source/torch.rst index 04f0383fa92..ac85593f12d 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -725,9 +725,6 @@ Operator Tags .. py:module:: torch.contrib .. py:module:: torch.utils.backcompat -.. This submodule is split manually without a top level page. -.. py:module:: torch.utils - .. This module is only used internally for ROCm builds. .. py:module:: torch.utils.hipify @@ -735,14 +732,3 @@ Operator Tags .. for tracking purposes .. py:module:: torch.utils.model_dump .. py:module:: torch.utils.viz - -.. automodule:: torch.autograd -.. currentmodule:: torch.autograd - -Engine Configuration ----------------------------------- -.. autosummary:: - :toctree: generated - :nosignatures: - - set_multithreading_enabled diff --git a/docs/source/utils.rst b/docs/source/utils.rst new file mode 100644 index 00000000000..7f26dcadb82 --- /dev/null +++ b/docs/source/utils.rst @@ -0,0 +1,13 @@ +torch.utils +=================================== +.. automodule:: torch.utils +.. currentmodule:: torch.utils + +.. autosummary:: + :toctree: generated + :nosignatures: + + rename_privateuse1_backend + generate_methods_for_privateuse1_backend + get_cpp_backtrace + set_module diff --git a/test/allowlist_for_publicAPI.json b/test/allowlist_for_publicAPI.json index 6b424d34d70..7f61c3c1d1c 100644 --- a/test/allowlist_for_publicAPI.json +++ b/test/allowlist_for_publicAPI.json @@ -998,11 +998,6 @@ "Tuple", "Union" ], - "torch.utils": [ - "disable_minidumps", - "enable_minidumps", - "enable_minidumps_on_exceptions" - ], "torch.utils.benchmark.utils.compare": [ "Colorize", "Table", diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 1599a66f8a7..d30bfd50161 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1905,9 +1905,6 @@ def _c10d_init() -> _bool: ... # Defined in torch/csrc/distributed/rpc/testing/init.cpp def _faulty_agent_init() -> _bool: ... -def _enable_minidumps(directory: str) -> None: ... -def _disable_minidumps() -> None: ... -def _enable_minidumps_on_exceptions() -> None: ... def _register_py_class_for_device(device: str, cls: Any) -> None: ... def _activate_cuda_trace() -> None: ... diff --git a/torch/utils/__init__.py b/torch/utils/__init__.py index c8b62e64077..03ded2da1b7 100644 --- a/torch/utils/__init__.py +++ b/torch/utils/__init__.py @@ -2,12 +2,13 @@ import os.path as _osp import torch from .throughput_benchmark import ThroughputBenchmark -from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions from .cpp_backtrace import get_cpp_backtrace from .backend_registration import rename_privateuse1_backend, generate_methods_for_privateuse1_backend -# Set the module for a given object for nicer printing def set_module(obj, mod): + """ + Set the module attribute on a python object for a given object for nicer printing + """ if not isinstance(mod, str): raise TypeError("The mod argument should be a string") obj.__module__ = mod diff --git a/torch/utils/_crash_handler.py b/torch/utils/_crash_handler.py deleted file mode 100644 index 84b345229bd..00000000000 --- a/torch/utils/_crash_handler.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import sys -import pathlib - -import torch - -DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes" -if sys.platform == "win32": - DEFAULT_MINIDUMP_DIR = str(pathlib.Path.home() / "AppData" / "pytorch_crashes") - -def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR): - if directory == DEFAULT_MINIDUMP_DIR: - pathlib.Path(directory).mkdir(parents=True, exist_ok=True) - elif not os.path.exists(directory): - raise RuntimeError(f"Directory does not exist: {directory}") - - torch._C._enable_minidumps(directory) - - -def enable_minidumps_on_exceptions(): - torch._C._enable_minidumps_on_exceptions() - - -def disable_minidumps(): - torch._C._disable_minidumps() diff --git a/torch/utils/backend_registration.py b/torch/utils/backend_registration.py index 83edab47957..0536b7344b9 100644 --- a/torch/utils/backend_registration.py +++ b/torch/utils/backend_registration.py @@ -14,11 +14,11 @@ def rename_privateuse1_backend(backend_name: str) -> None: r""" rename_privateuse1_backend(backend_name) -> None - Note: support the custom device with privateuse1 - This is a registration API for external backends that would like to register their - own device and C++ kernels out of tree. + This API should be use to rename the privateuse1 backend device to make + it more convenient to use as a device name within PyTorch APIs. The steps are: + (1) (In C++) implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key. (2) (In python) call torch.register_privateuse1_backend("foo") @@ -29,52 +29,54 @@ def rename_privateuse1_backend(backend_name: str) -> None: the external backend after it's already been set will result in an error. Note(AMP): If you want to support AMP on your device, you can register a custom backend module. - The backend must register a custom backend module with `torch._register_device_module("foo", BackendModule)`. + The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``. BackendModule needs to have the following API's: - (1) get_amp_supported_dtype() -> List[torch.dtype] - get the supported dtypes on your `foo` device in AMP, maybe the `foo` device supports one more dtype. + (1) ``get_amp_supported_dtype() -> List[torch.dtype]`` + get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype. - (2) is_autocast_enabled() -> bool - check the AMP is enabled or not on your `foo` device. + (2) ``is_autocast_enabled() -> bool`` + check the AMP is enabled or not on your "foo" device. - (3) get_autocast_dtype() -> torch.dtype - get the supported dtype on your `foo` device in AMP, which is set by `set_autocast_dtype` or the - default dtype, and the default dtype is `torch.float16`. + (3) ``get_autocast_dtype() -> torch.dtype`` + get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the + default dtype, and the default dtype is ``torch.float16``. - (4) set_autocast_enabled(bool) -> None - enable the AMP or not on your `foo` device. + (4) ``set_autocast_enabled(bool) -> None`` + enable the AMP or not on your "foo" device. - (5) set_autocast_dtype(dtype) -> None - set the supported dtype on your `foo` device in AMP, and the dtype be contained in the dtypes got - from `get_amp_supported_dtype`. + (5) ``set_autocast_dtype(dtype) -> None`` + set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got + from ``get_amp_supported_dtype``. Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's: - (1) _is_in_bad_fork() -> bool - Return `True` if now it is in bad_fork, else return `False`. + (1) ``_is_in_bad_fork() -> bool`` + Return ``True`` if now it is in bad_fork, else return ``False``. - (2) manual_seed_all(seed: int) -> None + (2) ``manual_seed_all(seed int) -> None`` Sets the seed for generating random numbers for your devices. - (3) device_count() -> int: - Returns the number of `foo`s available. + (3) ``device_count() -> int`` + Returns the number of "foo"s available. - (4) get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor: + (4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor`` Returns a list of ByteTensor representing the random number states of all devices. - (5) set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None: - Sets the random number generator state of the specified `foo` device. + (5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None`` + Sets the random number generator state of the specified "foo" device. And there are some common funcs: - (1) is_available() -> bool: - Returns a bool indicating if `foo` is currently available. + + (1) ``is_available() -> bool`` + Returns a bool indicating if "foo" is currently available. + + (2) ``current_device() -> int`` + Returns the index of a currently selected device. + For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example - (2) current_device() -> int: - Returns the index of a currently selected device. - Example:: >>> # xdoctest: +SKIP("failing") @@ -82,6 +84,7 @@ def rename_privateuse1_backend(backend_name: str) -> None: # This will work, assuming that you've implemented the right C++ kernels # to implement torch.ones. >>> a = torch.ones(2, device="foo") + """ _rename_privateuse1_backend(backend_name) global _privateuse1_backend_name @@ -259,13 +262,6 @@ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module r""" generate_methods_for_privateuse1_backend(for_tensor, for_module, for_storage, unsupported_dtype) -> None - Args: - for_tensor (bool): whether register related methods for torch.Tensor class. - for_module (bool): whether register related methods for torch.nn.Module class. - for_storage (bool): whether register related methods for torch.Storage class. - unsupported_dtype(List[torch.dtype]): takes effect only when the storage method needs to be generated, - indicating that the storage does not support the torch.dtype type. - Automatically generate attributes and methods for the custom backend after rename privateuse1 backend. In the default scenario, storage-related methods will not be generated automatically. @@ -279,6 +275,13 @@ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage, you need to extend the implementation yourself. + Args: + for_tensor (bool): whether register related methods for torch.Tensor class. + for_module (bool): whether register related methods for torch.nn.Module class. + for_storage (bool): whether register related methods for torch.Storage class. + unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated, + indicating that the storage does not support the torch.dtype type. + Example:: >>> # xdoctest: +SKIP("failing")