Revert D13252990: [pytorch][PR] [sparse] sparse.mm(S, D)

Differential Revision:
D13252990

Original commit changeset: 8fdb14144405

fbshipit-source-id: 49b8b0759a6e647854689962ffa72a205b4a2088
This commit is contained in:
Alyssa Wang 2018-11-30 18:50:56 -08:00 committed by Facebook Github Bot
parent c71edcc747
commit 1c21dc6e16
6 changed files with 7 additions and 80 deletions

View file

@ -1193,8 +1193,6 @@
- func: mm_out(Tensor result, Tensor self, Tensor mat2) -> Tensor
- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
- func: mode(Tensor self, int64_t dim=-1, bool keepdim=false) -> (Tensor, Tensor)
variants: function, method

View file

@ -593,16 +593,10 @@ Tensor _sparse_addmm(
Scalar beta,
Scalar alpha
) {
AT_CHECK(sparse.is_coalesced(), "_sparse_addmm doesn't support uncoalesced SparseTensor");
return at::s_native_addmm(t, sparse, dense, beta, alpha);
}
Tensor _sparse_mm(
const SparseTensor& sparse,
const Tensor& dense
) {
Tensor t = at::empty({sparse.size(0), dense.size(1)}, dense.options());
return at::_sparse_addmm(t, sparse, dense, 0, 1);
}
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)

View file

@ -141,5 +141,4 @@ Functions
----------------------------------
.. autofunction:: torch.sparse.addmm
.. autofunction:: torch.sparse.mm
.. autofunction:: torch.sparse.sum

View file

@ -818,28 +818,10 @@ class TestSparse(TestCase):
y1.backward()
y2.backward()
mask = (S_dense == 0)
self.assertTrue(S.grad.is_coalesced())
self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0))
test_shape(7, 8, 9, 20)
@skipIfRocm
def test_sparse_mm(self):
def test_shape(d1, d2, d3, nnz):
D = torch.randn(d2, d3, device=self.device).requires_grad_(True)
S = self._gen_sparse(2, nnz, [d1, d2])[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
y1 = torch.sparse.mm(S, D).sum()
y2 = torch.mm(S_dense, D).sum()
y1.backward()
y2.backward()
mask = (S_dense == 0)
self.assertTrue(S.grad.is_coalesced())
self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0))
test_shape(7, 8, 9, 20)
if not self.is_uncoalesced:
test_shape(7, 8, 9, 20)
@skipIfRocm
def test_dsmm(self):

View file

@ -511,9 +511,8 @@ Tensor mm_mat2_backward(const Tensor & grad, const Tensor & mat1, IntList sizes,
}
}
Tensor _sparse_addmm_sparse_backward(const Tensor& grad, const Tensor& sparse_, const Tensor& dense, const Scalar& alpha) {
AT_ASSERT(sparse_.is_sparse());
auto sparse = sparse_.coalesce();
Tensor _sparse_addmm_sparse_backward(const Tensor& grad, const Tensor& sparse, const Tensor& dense, const Scalar& alpha) {
AT_ASSERT(sparse.is_sparse());
Tensor grad_sparse = maybe_multiply(grad.mm(dense.t()), alpha);
return grad_sparse.sparse_mask(at::SparseTensorRef(sparse));
}

View file

@ -3,7 +3,6 @@ import torch
__all__ = [
'addmm',
'mm',
'sum',
]
@ -11,13 +10,11 @@ __all__ = [
def addmm(mat, mat1, mat2, beta=1, alpha=1):
r"""
This function does exact same thing as :func:`torch.addmm` in the forward,
except that it supports backward for sparse matrix :attr:`mat1`. :attr:`mat1`
need to have `sparse_dim = 2`. Note that the gradients of :attr:`mat1` is a
coalesced sparse tensor.
except that it supports backward for coalesced sparse matrix `mat1`.
Args:
mat (Tensor): a dense matrix to be added
mat1 (SparseTensor): a sparse matrix to be multiplied
mat1 (Tensor): a sparse matrix to be multiplied
mat2 (Tensor): a dense matrix be multiplied
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
@ -25,48 +22,6 @@ def addmm(mat, mat1, mat2, beta=1, alpha=1):
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
def mm(mat1, mat2):
r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
and dense matrix :attr:`mat2`. Similar to :func:`torch.mm`, If :attr:`mat1` is a
(n \times m)(n×m) tensor, :attr:`mat2` is a (m \times p)(m×p) tensor, out will be a
(n \times p)(n×p) dense tensor. :attr:`mat1` need to have `sparse_dim = 2`.
This function also supports backward for both matrices. Note that the gradients of
:attr:`mat1` is a coalesced sparse tensor.
Args:
mat1 (SparseTensor): the first sparse matrix to be multiplied
mat2 (Tensor): the second dense matrix to be multiplied
Example::
>>> a = torch.randn(2, 3).to_sparse().requires_grad_(True)
>>> a
tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]]),
values=tensor([ 1.5901, 0.0183, -0.6146, 1.8061, -0.0112, 0.6302]),
size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True)
>>> b = torch.randn(3, 2, requires_grad=True)
>>> b
tensor([[-0.6479, 0.7874],
[-1.2056, 0.5641],
[-1.1716, -0.9923]], requires_grad=True)
>>> y = torch.sparse.mm(a, b)
>>> y
tensor([[-0.3323, 1.8723],
[-1.8951, 0.7904]], grad_fn=<SparseAddmmBackward>)
>>> y.sum().backward()
>>> a.grad
tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]]),
values=tensor([ 0.1394, -0.6415, -2.1639, 0.1394, -0.6415, -2.1639]),
size=(2, 3), nnz=6, layout=torch.sparse_coo)
"""
return torch._sparse_mm(mat1, mat2)
def sum(input, dim=None, dtype=None):
r"""
Returns the sum of each row of SparseTensor :attr:`input` in the given