mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Revert "Add linalg.vander"
This reverts commit 1ea49c68d0.
Reverted https://github.com/pytorch/pytorch/pull/76303 on behalf of https://github.com/malfet
This commit is contained in:
parent
150c307140
commit
bc5307347f
7 changed files with 2 additions and 135 deletions
|
|
@ -4617,33 +4617,4 @@ Tensor linalg_solve_triangular(
|
|||
return out;
|
||||
}
|
||||
|
||||
Tensor linalg_vander(
|
||||
const Tensor& x,
|
||||
c10::optional<int64_t> N) {
|
||||
const auto x_ = x.dim() == 0 ? x.unsqueeze(-1) : x;
|
||||
|
||||
auto shape = x_.sizes().vec();
|
||||
const auto n = N.value_or(shape.back());
|
||||
TORCH_CHECK(n >= 0, "N must be non-negative.");
|
||||
|
||||
// Get the dtype of cumsum
|
||||
auto dtype = x_.scalar_type();
|
||||
auto options = x_.options()
|
||||
.dtype(at::isIntegralType(dtype, /*includeBool=*/true) ? kLong : dtype);
|
||||
|
||||
// n = 0 or n = 1 case (Empty or one row case)
|
||||
shape.push_back(std::min<int64_t>(n, 1LL));
|
||||
auto ones = x_.new_ones(shape, options);
|
||||
|
||||
if (n <= 1) {
|
||||
// new_ones does not propagate requires_grad. UGH
|
||||
ones.requires_grad_(x_.requires_grad());
|
||||
return ones;
|
||||
} else {
|
||||
// Append cumprod of the oher 0...n-1 powers
|
||||
shape.back() = n - 1;
|
||||
auto result = at::cumprod(x_.unsqueeze(-1).expand(shape), -1);
|
||||
return at::cat({ones, result}, /*dim=*/ -1);
|
||||
}
|
||||
}
|
||||
}} // namespace at::native
|
||||
|
|
|
|||
|
|
@ -7179,9 +7179,6 @@
|
|||
dispatch:
|
||||
CPU, CUDA: linalg_solve_triangular
|
||||
|
||||
- func: linalg_vander(Tensor x, *, int? N=None) -> Tensor
|
||||
python_module: linalg
|
||||
|
||||
- func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
|
||||
dispatch:
|
||||
CompositeExplicitAutograd: symeig_out
|
||||
|
|
|
|||
|
|
@ -95,15 +95,6 @@ Tensor Operations
|
|||
tensorinv
|
||||
tensorsolve
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
vander
|
||||
|
||||
Experimental Functions
|
||||
----------------------
|
||||
.. autosummary::
|
||||
|
|
|
|||
|
|
@ -2104,7 +2104,6 @@
|
|||
"svdvals",
|
||||
"tensorinv",
|
||||
"tensorsolve",
|
||||
"vander",
|
||||
"vector_norm"
|
||||
],
|
||||
"torch.multiprocessing": [
|
||||
|
|
|
|||
|
|
@ -2527,53 +2527,3 @@ Examples::
|
|||
>>> torch.dist(Q.mT @ Q, torch.eye(4))
|
||||
tensor(6.2158e-07)
|
||||
""")
|
||||
|
||||
vander = _add_docstr(_linalg.linalg_vander, r"""
|
||||
vander(x, N=None) -> Tensor
|
||||
|
||||
Generates a Vandermonde matrix.
|
||||
|
||||
Returns the Vandermonde matrix :math:`V`
|
||||
|
||||
.. math::
|
||||
|
||||
V = \begin{pmatrix}
|
||||
1 & x_1 & x_1^2 & \dots & x_1^{N-1}\\
|
||||
1 & x_2 & x_2^2 & \dots & x_2^{N-1}\\
|
||||
1 & x_3 & x_3^2 & \dots & x_3^{N-1}\\
|
||||
\vdots & \vdots & \vdots & \ddots &\vdots \\
|
||||
1 & x_n & x_n^2 & \dots & x_n^{N-1}
|
||||
\end{pmatrix}.
|
||||
|
||||
If :attr:`N`\ `= None`, then `N = x.size(-1)` so that the output is a square matrix.
|
||||
|
||||
Supports inputs of all dtypes.
|
||||
Also supports batches of vectors, and if :attr:`x` is a batch of vectors then
|
||||
the output has the same batch dimensions.
|
||||
|
||||
Differences with `numpy.vander`:
|
||||
|
||||
- Unlike `numpy.vander`, this function returns the powers of :attr:`x` in ascending order.
|
||||
To get them in the reverse order call ``linalg.vander(x, N).flip(-1)``.
|
||||
|
||||
Args:
|
||||
x (Tensor): tensor of shape `(*, n)` where `*` is zero or more batch dimensions
|
||||
consisting of vectors.
|
||||
|
||||
Keyword args:
|
||||
N (int, optional): Number of columns in the output. Default: `x.size(-1)`
|
||||
|
||||
Example::
|
||||
|
||||
>>> x = torch.tensor([1, 2, 3, 5])
|
||||
>>> linalg.vander(x)
|
||||
tensor([[ 1, 1, 1, 1],
|
||||
[ 1, 2, 4, 8],
|
||||
[ 1, 3, 9, 27],
|
||||
[ 1, 5, 25, 125]])
|
||||
>>> linalg.vander(x, N=3)
|
||||
tensor([[ 1, 1, 1],
|
||||
[ 1, 2, 4],
|
||||
[ 1, 3, 9],
|
||||
[ 1, 5, 25]])
|
||||
""")
|
||||
|
|
|
|||
|
|
@ -1037,7 +1037,6 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
|
|||
torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
|
||||
torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
|
||||
torch.unsqueeze: lambda input, dim, out=None: -1,
|
||||
torch.linalg.vander: lambda x, N=None: -1,
|
||||
torch.var: lambda input, dim=None: -1,
|
||||
torch.var_mean: lambda input, dim=None: -1,
|
||||
torch.vsplit: lambda input, indices_or_sections: -1,
|
||||
|
|
|
|||
|
|
@ -369,7 +369,7 @@ NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_va
|
|||
# "name".
|
||||
#
|
||||
# All the "sample_inputs" functions are invoked within a `torch.no_grad()`
|
||||
# environment for efficiency and correctness. As such remember to set the
|
||||
# environment for efficiency and correctness. As such remember to set the the
|
||||
# "requires_grad" flag on the inputs **after** performing any transformations
|
||||
# on them.
|
||||
#
|
||||
|
|
@ -402,7 +402,7 @@ NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_va
|
|||
# Sample inputs are designed to be used with many tests, some
|
||||
# that are very time consuming, so they should be a small
|
||||
# set with small tensors. An elaborated set of sample inputs
|
||||
# can be specified using the "reference_inputs_func" attribute.
|
||||
# can be specified using the the "reference_inputs_func" attribute.
|
||||
# The "reference inputs" for an operation are an extended
|
||||
# set of sample inputs that can more exhausively test an
|
||||
# operator. They are used by only a few tests that are careful
|
||||
|
|
@ -3550,37 +3550,6 @@ def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwa
|
|||
for shape in shapes:
|
||||
yield SampleInput(make_arg(shape))
|
||||
|
||||
def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs):
|
||||
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
|
||||
|
||||
shapes = ((),
|
||||
(1,),
|
||||
(S,),
|
||||
(2, S),)
|
||||
|
||||
for shape in shapes:
|
||||
yield SampleInput(make_arg(shape))
|
||||
n = shape[-1] if len(shape) > 0 else 1
|
||||
for i in range(3):
|
||||
# n-1, n, n+1
|
||||
N = n + i - 1
|
||||
if N < 0:
|
||||
continue
|
||||
yield SampleInput(make_arg(shape), kwargs=dict(N=N))
|
||||
|
||||
def np_vander_batched(x, N=None):
|
||||
# Wrapper around np.vander that supports batches of 1 dimension (enough for the tests)
|
||||
if x.ndim == 0:
|
||||
x = x[np.newaxis]
|
||||
if x.ndim == 1:
|
||||
y = np.vander(x, N=N, increasing=True)
|
||||
return y
|
||||
else:
|
||||
if N is None:
|
||||
N = x.shape[-1]
|
||||
y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N))
|
||||
return y
|
||||
|
||||
def np_sinc_with_fp16_as_fp32(x):
|
||||
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
|
||||
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
|
||||
|
|
@ -11292,15 +11261,6 @@ op_db: List[OpInfo] = [
|
|||
dtypes=floating_and_complex_types(),
|
||||
sample_inputs_func=sample_inputs_linalg_slogdet,
|
||||
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],),
|
||||
OpInfo('linalg.vander',
|
||||
aten_name='linalg_vander',
|
||||
ref=np_vander_batched,
|
||||
op=torch.linalg.vander,
|
||||
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
|
||||
supports_forward_ad=True,
|
||||
supports_fwgrad_bwgrad=True,
|
||||
supports_out=False,
|
||||
sample_inputs_func=sample_inputs_linalg_vander),
|
||||
OpInfo('linalg.vector_norm',
|
||||
op=torch.linalg.vector_norm,
|
||||
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
|
||||
|
|
|
|||
Loading…
Reference in a new issue