mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[docs] Update sphinx to 3.5.4 (#61601)
Summary: Sphinx 4.x is out, but it seems that requires many more changes to adopt. So instead use the latest version of 3.x, which includes several nice features. * Add some noindex directives to deal with warnings that would otherwise be triggered by this change due to conflicts between the docstrings declaring a function and the autodoc extension declaring the same function. * Update distributions.utils.lazy_property to make it look like a regular property when sphinx autodoc inspects classes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/61601 Reviewed By: ejguan Differential Revision: D29801876 Pulled By: albanD fbshipit-source-id: 544d2434a15ceb77bff236e934dbd8e4dbd9d160
This commit is contained in:
parent
e352585f67
commit
9fdf7ec6a2
7 changed files with 78 additions and 36 deletions
|
|
@ -1,4 +1,4 @@
|
|||
sphinx==2.4.4
|
||||
sphinx==3.5.4
|
||||
docutils==0.16
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
|
||||
sphinxcontrib.katex
|
||||
|
|
|
|||
|
|
@ -262,9 +262,9 @@ def setup(app):
|
|||
|
||||
# From PyTorch 1.5, we now use autogenerated files to document classes and
|
||||
# functions. This breaks older references since
|
||||
# https://docs.pytorch.org/torch.html#torch.flip
|
||||
# https://pytorch.org/docs/stable/torch.html#torch.flip
|
||||
# moved to
|
||||
# https://docs.pytorch.org/torch/generated/torchflip.html
|
||||
# https://pytorch.org/docs/stable/generated/torch.flip.html
|
||||
# which breaks older links from blog posts, stack overflow answers and more.
|
||||
# To mitigate that, we add an id="torch.flip" in an appropriated place
|
||||
# in torch.html by overriding the visit_reference method of html writers.
|
||||
|
|
|
|||
|
|
@ -4204,37 +4204,39 @@ class TestLazyLogitsInitialization(TestCase):
|
|||
def test_lazy_logits_initialization(self):
|
||||
for Dist, params in self.examples:
|
||||
param = params[0].copy()
|
||||
if 'probs' in param:
|
||||
probs = param.pop('probs')
|
||||
param['logits'] = probs_to_logits(probs)
|
||||
dist = Dist(**param)
|
||||
# Create new instance to generate a valid sample
|
||||
dist.log_prob(Dist(**param).sample())
|
||||
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
|
||||
self.assertFalse('probs' in vars(dist), msg=message)
|
||||
try:
|
||||
dist.enumerate_support()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.assertFalse('probs' in vars(dist), msg=message)
|
||||
batch_shape, event_shape = dist.batch_shape, dist.event_shape
|
||||
self.assertFalse('probs' in vars(dist), msg=message)
|
||||
if 'probs' not in param:
|
||||
continue
|
||||
probs = param.pop('probs')
|
||||
param['logits'] = probs_to_logits(probs)
|
||||
dist = Dist(**param)
|
||||
# Create new instance to generate a valid sample
|
||||
dist.log_prob(Dist(**param).sample())
|
||||
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
|
||||
self.assertNotIn('probs', dist.__dict__, msg=message)
|
||||
try:
|
||||
dist.enumerate_support()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.assertNotIn('probs', dist.__dict__, msg=message)
|
||||
batch_shape, event_shape = dist.batch_shape, dist.event_shape
|
||||
self.assertNotIn('probs', dist.__dict__, msg=message)
|
||||
|
||||
def test_lazy_probs_initialization(self):
|
||||
for Dist, params in self.examples:
|
||||
param = params[0].copy()
|
||||
if 'probs' in param:
|
||||
dist = Dist(**param)
|
||||
dist.sample()
|
||||
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
|
||||
self.assertFalse('logits' in vars(dist), msg=message)
|
||||
try:
|
||||
dist.enumerate_support()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.assertFalse('logits' in vars(dist), msg=message)
|
||||
batch_shape, event_shape = dist.batch_shape, dist.event_shape
|
||||
self.assertFalse('logits' in vars(dist), msg=message)
|
||||
if 'probs' not in param:
|
||||
continue
|
||||
dist = Dist(**param)
|
||||
dist.sample()
|
||||
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
|
||||
self.assertNotIn('logits', dist.__dict__, msg=message)
|
||||
try:
|
||||
dist.enumerate_support()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.assertNotIn('logits', dist.__dict__, msg=message)
|
||||
batch_shape, event_shape = dist.batch_shape, dist.event_shape
|
||||
self.assertNotIn('logits', dist.__dict__, msg=message)
|
||||
|
||||
|
||||
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
|
||||
|
|
|
|||
|
|
@ -3812,14 +3812,16 @@ inferred from the arguments of ``self.to(*args, **kwargs)``.
|
|||
|
||||
Here are the ways to call ``to``:
|
||||
|
||||
.. function:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
|
||||
.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns a Tensor with the specified :attr:`dtype`
|
||||
|
||||
Args:
|
||||
{memory_format}
|
||||
|
||||
.. function:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
|
||||
.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns a Tensor with the specified :attr:`device` and (optional)
|
||||
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
|
||||
|
|
@ -3832,7 +3834,8 @@ Here are the ways to call ``to``:
|
|||
Args:
|
||||
{memory_format}
|
||||
|
||||
.. function:: to(other, non_blocking=False, copy=False) -> Tensor
|
||||
.. method:: to(other, non_blocking=False, copy=False) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
|
||||
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
|
||||
|
|
@ -4381,7 +4384,8 @@ Example::
|
|||
False
|
||||
|
||||
|
||||
.. function:: view(dtype) -> Tensor
|
||||
.. method:: view(dtype) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns a new tensor with the same data as the :attr:`self` tensor but of a
|
||||
different :attr:`dtype`. :attr:`dtype` must have the same number of bytes per
|
||||
|
|
|
|||
|
|
@ -241,6 +241,7 @@ Example::
|
|||
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
|
||||
|
||||
.. function:: add(input, other, *, alpha=1, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Each element of the tensor :attr:`other` is multiplied by the scalar
|
||||
:attr:`alpha` and added to each element of the tensor :attr:`input`.
|
||||
|
|
@ -635,6 +636,7 @@ Example::
|
|||
tensor(False)
|
||||
|
||||
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
For each row of :attr:`input` in the given dimension :attr:`dim`,
|
||||
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
|
||||
|
|
@ -690,6 +692,7 @@ Example::
|
|||
tensor(True)
|
||||
|
||||
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
For each row of :attr:`input` in the given dimension :attr:`dim`,
|
||||
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
|
||||
|
|
@ -2824,6 +2827,7 @@ Args:
|
|||
tensor (Tensor): A quantized Tensor
|
||||
|
||||
.. function:: dequantize(tensors) -> sequence of Tensors
|
||||
:noindex:
|
||||
|
||||
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
|
||||
|
||||
|
|
@ -5774,6 +5778,7 @@ Example::
|
|||
tensor(0.7445)
|
||||
|
||||
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
|
||||
:noindex:
|
||||
|
||||
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
|
||||
value of each row of the :attr:`input` tensor in the given dimension
|
||||
|
|
@ -5808,6 +5813,7 @@ Example::
|
|||
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
|
||||
|
||||
.. function:: max(input, other, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
See :func:`torch.maximum`.
|
||||
|
||||
|
|
@ -5929,6 +5935,7 @@ Example::
|
|||
tensor(0)
|
||||
|
||||
.. function:: argmax(input, dim, keepdim=False) -> LongTensor
|
||||
:noindex:
|
||||
|
||||
Returns the indices of the maximum values of a tensor across a dimension.
|
||||
|
||||
|
|
@ -5970,6 +5977,7 @@ Example::
|
|||
tensor(0.3367)
|
||||
|
||||
.. function:: mean(input, dim, keepdim=False, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns the mean value of each row of the :attr:`input` tensor in the given
|
||||
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
|
||||
|
|
@ -6028,6 +6036,7 @@ Example::
|
|||
tensor(0.2202)
|
||||
|
||||
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
|
||||
:noindex:
|
||||
|
||||
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
|
||||
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
|
||||
|
|
@ -6097,6 +6106,7 @@ Example::
|
|||
tensor(2.)
|
||||
|
||||
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
|
||||
:noindex:
|
||||
|
||||
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
|
||||
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
|
||||
|
|
@ -6233,6 +6243,7 @@ Example::
|
|||
tensor(0.6750)
|
||||
|
||||
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
|
||||
:noindex:
|
||||
|
||||
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
|
||||
value of each row of the :attr:`input` tensor in the given dimension
|
||||
|
|
@ -6267,6 +6278,7 @@ Example::
|
|||
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
|
||||
|
||||
.. function:: min(input, other, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
See :func:`torch.minimum`.
|
||||
""".format(**single_dim_common))
|
||||
|
|
@ -6584,6 +6596,7 @@ Example::
|
|||
tensor([ 20.1494, -42.5491, 260.8663])
|
||||
|
||||
.. function:: mul(input, other, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Each element of the tensor :attr:`input` is multiplied by the corresponding
|
||||
element of the Tensor :attr:`other`. The resulting tensor is returned.
|
||||
|
|
@ -7117,6 +7130,7 @@ Example::
|
|||
8.0505, 8.1408, 9.0563, 10.0566])
|
||||
|
||||
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Similar to the function above, but the means are shared among all drawn
|
||||
elements.
|
||||
|
|
@ -7134,6 +7148,7 @@ Example::
|
|||
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
|
||||
|
||||
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Similar to the function above, but the standard deviations are shared among
|
||||
all drawn elements.
|
||||
|
|
@ -7151,6 +7166,7 @@ Example::
|
|||
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
|
||||
|
||||
.. function:: normal(mean, std, size, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Similar to the function above, but the means and standard deviations are shared
|
||||
among all drawn elements. The resulting tensor has size given by :attr:`size`.
|
||||
|
|
@ -7413,6 +7429,7 @@ Example::
|
|||
tensor([ 1., 4., 27., 256.])
|
||||
|
||||
.. function:: pow(self, exponent, *, out=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
|
||||
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
|
||||
|
|
@ -7498,6 +7515,7 @@ Example::
|
|||
tensor(0.6902)
|
||||
|
||||
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns the product of each row of the :attr:`input` tensor in the given
|
||||
dimension :attr:`dim`.
|
||||
|
|
@ -8931,6 +8949,7 @@ Example::
|
|||
tensor(-0.5475)
|
||||
|
||||
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns the sum of each row of the :attr:`input` tensor in the given
|
||||
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
|
||||
|
|
@ -8980,6 +8999,7 @@ Example::
|
|||
tensor(7.)
|
||||
|
||||
.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
Returns the sum of each row of the :attr:`input` tensor in the given
|
||||
dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
|
||||
|
|
@ -10395,6 +10415,7 @@ Example::
|
|||
[0.0000, 0.0000]], dtype=torch.float64)
|
||||
|
||||
.. function:: where(condition) -> tuple of LongTensor
|
||||
:noindex:
|
||||
|
||||
``torch.where(condition)`` is identical to
|
||||
``torch.nonzero(condition, as_tuple=True)``.
|
||||
|
|
@ -10944,6 +10965,7 @@ Example::
|
|||
[3, 4]])
|
||||
|
||||
.. function:: repeat_interleave(repeats, *, output_size=None) -> Tensor
|
||||
:noindex:
|
||||
|
||||
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
|
||||
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ def probs_to_logits(probs, is_binary=False):
|
|||
return torch.log(ps_clamped)
|
||||
|
||||
|
||||
class lazy_property(object):
|
||||
class lazy_property:
|
||||
r"""
|
||||
Used as a decorator for lazy loading of class attributes. This uses a
|
||||
non-data descriptor that calls the wrapped method to compute the property on
|
||||
|
|
@ -105,13 +105,23 @@ class lazy_property(object):
|
|||
|
||||
def __get__(self, instance, obj_type=None):
|
||||
if instance is None:
|
||||
return self
|
||||
return _lazy_property_and_property(self.wrapped)
|
||||
with torch.enable_grad():
|
||||
value = self.wrapped(instance)
|
||||
setattr(instance, self.wrapped.__name__, value)
|
||||
return value
|
||||
|
||||
|
||||
class _lazy_property_and_property(lazy_property, property):
|
||||
"""We want lazy properties to look like multiple things.
|
||||
|
||||
* property when Sphinx autodoc looks
|
||||
* lazy_property when Distribution validate_args looks
|
||||
"""
|
||||
def __init__(self, wrapped):
|
||||
return property.__init__(self, wrapped)
|
||||
|
||||
|
||||
def tril_matrix_to_vec(mat, diag=0):
|
||||
r"""
|
||||
Convert a `D x D` matrix or a batch of matrices into a (batched) vector
|
||||
|
|
|
|||
|
|
@ -764,12 +764,16 @@ class Module:
|
|||
This can be called as
|
||||
|
||||
.. function:: to(device=None, dtype=None, non_blocking=False)
|
||||
:noindex:
|
||||
|
||||
.. function:: to(dtype, non_blocking=False)
|
||||
:noindex:
|
||||
|
||||
.. function:: to(tensor, non_blocking=False)
|
||||
:noindex:
|
||||
|
||||
.. function:: to(memory_format=torch.channels_last)
|
||||
:noindex:
|
||||
|
||||
Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
|
||||
floating point or complex :attr:`dtype`\ s. In addition, this method will
|
||||
|
|
|
|||
Loading…
Reference in a new issue