diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index f93847314dd..e428f51135f 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -1193,8 +1193,6 @@ - func: mm_out(Tensor result, Tensor self, Tensor mat2) -> Tensor -- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor - - func: mode(Tensor self, int64_t dim=-1, bool keepdim=false) -> (Tensor, Tensor) variants: function, method diff --git a/aten/src/ATen/native/sparse/SparseTensorMath.cpp b/aten/src/ATen/native/sparse/SparseTensorMath.cpp index 1737333ccf8..414e4d488c8 100644 --- a/aten/src/ATen/native/sparse/SparseTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseTensorMath.cpp @@ -593,16 +593,10 @@ Tensor _sparse_addmm( Scalar beta, Scalar alpha ) { + AT_CHECK(sparse.is_coalesced(), "_sparse_addmm doesn't support uncoalesced SparseTensor"); return at::s_native_addmm(t, sparse, dense, beta, alpha); } -Tensor _sparse_mm( - const SparseTensor& sparse, - const Tensor& dense -) { - Tensor t = at::empty({sparse.size(0), dense.size(1)}, dense.options()); - return at::_sparse_addmm(t, sparse, dense, 0, 1); -} // -------------------------------------------------------------------- // hspmm(SparseTensor mat1, Tensor mat2) diff --git a/docs/source/sparse.rst b/docs/source/sparse.rst index b746af7f7fd..1e6afde2620 100644 --- a/docs/source/sparse.rst +++ b/docs/source/sparse.rst @@ -141,5 +141,4 @@ Functions ---------------------------------- .. autofunction:: torch.sparse.addmm -.. autofunction:: torch.sparse.mm .. autofunction:: torch.sparse.sum diff --git a/test/test_sparse.py b/test/test_sparse.py index 5011f5cc705..60fc784896d 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -818,28 +818,10 @@ class TestSparse(TestCase): y1.backward() y2.backward() mask = (S_dense == 0) - self.assertTrue(S.grad.is_coalesced()) self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0)) - test_shape(7, 8, 9, 20) - - @skipIfRocm - def test_sparse_mm(self): - def test_shape(d1, d2, d3, nnz): - D = torch.randn(d2, d3, device=self.device).requires_grad_(True) - S = self._gen_sparse(2, nnz, [d1, d2])[0] - S_dense = S.to_dense().requires_grad_(True) - S.requires_grad_(True) - self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D)) - y1 = torch.sparse.mm(S, D).sum() - y2 = torch.mm(S_dense, D).sum() - y1.backward() - y2.backward() - mask = (S_dense == 0) - self.assertTrue(S.grad.is_coalesced()) - self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0)) - - test_shape(7, 8, 9, 20) + if not self.is_uncoalesced: + test_shape(7, 8, 9, 20) @skipIfRocm def test_dsmm(self): diff --git a/tools/autograd/templates/Functions.cpp b/tools/autograd/templates/Functions.cpp index 577bc0c0ed0..137f31d1a68 100644 --- a/tools/autograd/templates/Functions.cpp +++ b/tools/autograd/templates/Functions.cpp @@ -511,9 +511,8 @@ Tensor mm_mat2_backward(const Tensor & grad, const Tensor & mat1, IntList sizes, } } -Tensor _sparse_addmm_sparse_backward(const Tensor& grad, const Tensor& sparse_, const Tensor& dense, const Scalar& alpha) { - AT_ASSERT(sparse_.is_sparse()); - auto sparse = sparse_.coalesce(); +Tensor _sparse_addmm_sparse_backward(const Tensor& grad, const Tensor& sparse, const Tensor& dense, const Scalar& alpha) { + AT_ASSERT(sparse.is_sparse()); Tensor grad_sparse = maybe_multiply(grad.mm(dense.t()), alpha); return grad_sparse.sparse_mask(at::SparseTensorRef(sparse)); } diff --git a/torch/sparse/__init__.py b/torch/sparse/__init__.py index 9024a4daeb8..07553e9d13e 100644 --- a/torch/sparse/__init__.py +++ b/torch/sparse/__init__.py @@ -3,7 +3,6 @@ import torch __all__ = [ 'addmm', - 'mm', 'sum', ] @@ -11,13 +10,11 @@ __all__ = [ def addmm(mat, mat1, mat2, beta=1, alpha=1): r""" This function does exact same thing as :func:`torch.addmm` in the forward, - except that it supports backward for sparse matrix :attr:`mat1`. :attr:`mat1` - need to have `sparse_dim = 2`. Note that the gradients of :attr:`mat1` is a - coalesced sparse tensor. + except that it supports backward for coalesced sparse matrix `mat1`. Args: mat (Tensor): a dense matrix to be added - mat1 (SparseTensor): a sparse matrix to be multiplied + mat1 (Tensor): a sparse matrix to be multiplied mat2 (Tensor): a dense matrix be multiplied beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) @@ -25,48 +22,6 @@ def addmm(mat, mat1, mat2, beta=1, alpha=1): return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha) -def mm(mat1, mat2): - r""" - Performs a matrix multiplication of the sparse matrix :attr:`mat1` - and dense matrix :attr:`mat2`. Similar to :func:`torch.mm`, If :attr:`mat1` is a - (n \times m)(n×m) tensor, :attr:`mat2` is a (m \times p)(m×p) tensor, out will be a - (n \times p)(n×p) dense tensor. :attr:`mat1` need to have `sparse_dim = 2`. - This function also supports backward for both matrices. Note that the gradients of - :attr:`mat1` is a coalesced sparse tensor. - - Args: - mat1 (SparseTensor): the first sparse matrix to be multiplied - mat2 (Tensor): the second dense matrix to be multiplied - - Example:: - - >>> a = torch.randn(2, 3).to_sparse().requires_grad_(True) - >>> a - tensor(indices=tensor([[0, 0, 0, 1, 1, 1], - [0, 1, 2, 0, 1, 2]]), - values=tensor([ 1.5901, 0.0183, -0.6146, 1.8061, -0.0112, 0.6302]), - size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True) - - >>> b = torch.randn(3, 2, requires_grad=True) - >>> b - tensor([[-0.6479, 0.7874], - [-1.2056, 0.5641], - [-1.1716, -0.9923]], requires_grad=True) - - >>> y = torch.sparse.mm(a, b) - >>> y - tensor([[-0.3323, 1.8723], - [-1.8951, 0.7904]], grad_fn=) - >>> y.sum().backward() - >>> a.grad - tensor(indices=tensor([[0, 0, 0, 1, 1, 1], - [0, 1, 2, 0, 1, 2]]), - values=tensor([ 0.1394, -0.6415, -2.1639, 0.1394, -0.6415, -2.1639]), - size=(2, 3), nnz=6, layout=torch.sparse_coo) - """ - return torch._sparse_mm(mat1, mat2) - - def sum(input, dim=None, dtype=None): r""" Returns the sum of each row of SparseTensor :attr:`input` in the given