[numpy] torch.log1p : promote integer inputs to float (#48002)

Summary:
Reference https://github.com/pytorch/pytorch/issues/42515

Pull Request resolved: https://github.com/pytorch/pytorch/pull/48002

Reviewed By: ngimel

Differential Revision: D25148911

Pulled By: mruberry

fbshipit-source-id: 902d0ddf699debd6edd1b3d55f5c73932ca45e83
This commit is contained in:
kshitij12345 2020-11-24 21:58:56 -08:00 committed by Facebook GitHub Bot
parent 2e0a8b75d8
commit e9efd8df1b
7 changed files with 90 additions and 52 deletions

View file

@ -292,8 +292,8 @@ Tensor& log10_out(Tensor& result, const Tensor& self) { return unary_op_impl_flo
Tensor log10(const Tensor& self) { return unary_op_impl_float(self, log10_stub); }
Tensor& log10_(Tensor& self) { return unary_op_impl_(self, at::log10_out); }
Tensor& log1p_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, log1p_stub); }
Tensor log1p(const Tensor& self) { return unary_op_impl(self, at::log1p_out); }
Tensor& log1p_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, log1p_stub); }
Tensor log1p(const Tensor& self) { return unary_op_impl_float(self, log1p_stub); }
Tensor& log1p_(Tensor& self) { return unary_op_impl_(self, at::log1p_out); }
Tensor& log2_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, log2_stub); }

View file

@ -27,7 +27,7 @@ void log10_kernel_cuda(TensorIterator& iter) {
}
void log1p_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log1p_cuda", [&]() {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});

View file

@ -2346,7 +2346,8 @@
use_c10_dispatcher: full
variants: function, method
dispatch:
DefaultBackend: log1p
CPU, CUDA: log1p
SparseCPU, SparseCUDA: log1p_sparse
- func: log1p_(Tensor(a!) self) -> Tensor(a!)
use_c10_dispatcher: full

View file

@ -42,6 +42,12 @@ namespace {
return csr;
}
inline SparseTensor get_result_tensor_for_unary_op(const SparseTensor& input) {
if (c10::isIntegralType(input.scalar_type(), /*includeBool=*/true)) {
return at::empty_like(input, input.options().dtype(c10::get_default_dtype()));
}
return at::empty_like(input);
}
}
// --------------------------------------------------------------------
@ -102,6 +108,10 @@ SparseTensor& mul_out_sparse_scalar(SparseTensor& r, const SparseTensor& t, Scal
SparseTensor& log1p_out_sparse(SparseTensor& r, const SparseTensor& t) {
TORCH_CHECK(r.is_sparse(), "Tensor should be sparse");
TORCH_CHECK(t.is_sparse(), "Tensor should be sparse");
TORCH_CHECK(
!c10::isIntegralType(r.scalar_type(), /*includeBool=*/true),
"log1p: result type cannot be Integral, got:",
r.scalar_type());
if (is_same_tensor(r, t)) {
// don't have in-place log1p for uncoalesced input because coalesce() is not in-place
@ -114,6 +124,11 @@ SparseTensor& log1p_out_sparse(SparseTensor& r, const SparseTensor& t) {
return r;
}
SparseTensor log1p_sparse(const SparseTensor& t) {
auto result = get_result_tensor_for_unary_op(t);
return log1p_out_sparse(result, t);
}
SparseTensor& log1p_sparse_(SparseTensor& t) {
return log1p_out_sparse(t, t);
}

View file

@ -1813,63 +1813,83 @@ class TestSparse(TestCase):
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
def _test_log1p_tensor(self, sparse_tensor):
def is_integral(dtype):
return dtype in torch.testing.get_all_int_dtypes()
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.log1p()
is_integral_dtype = is_integral(sparse_tensor.dtype)
self.assertEqual(expected_output, sparse_tensor.log1p().to_dense())
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "log1p: result type cannot be Integral, got:"):
sparse_tensor.coalesce().log1p_()
else:
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if self.is_uncoalesced:
if self.is_uncoalesced and not is_integral_dtype:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "in-place on uncoalesced tensors is not supported"):
sparse_tensor.log1p_()
elif self.is_uncoalesced and is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "log1p: result type cannot be Integral, got"):
sparse_tensor.log1p_()
sparse_tensor.requires_grad_()
self.assertTrue(sparse_tensor.requires_grad)
if not is_integral_dtype:
sparse_tensor.requires_grad_()
self.assertTrue(sparse_tensor.requires_grad)
# test autograd
x = sparse_tensor.clone()
y = sparse_tensor.log1p()
with self.assertRaisesRegex(RuntimeError, "log1p of a sparse tensor is made to be non-differentiable"):
y.backward(x)
# test autograd
x = sparse_tensor.clone()
y = sparse_tensor.log1p()
with self.assertRaisesRegex(RuntimeError, "log1p of a sparse tensor is made to be non-differentiable"):
y.backward(x)
else:
with self.assertRaisesRegex(RuntimeError, "only Tensors of floating point dtype can require gradients"):
sparse_tensor.requires_grad_()
def test_log1p(self):
if not self.is_uncoalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=self.device
).coalesce()
self._test_log1p_tensor(input_coalesced)
for dtype in torch.testing.get_all_dtypes(include_bool=False, include_half=False,
include_bfloat16=False, include_complex=False):
if not self.is_uncoalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=self.device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=self.device
).coalesce()
self._test_log1p_tensor(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=self.device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced)
if self.is_uncoalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=self.device
)
self._test_log1p_tensor(input_uncoalesced)
if self.is_uncoalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=self.device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=self.device
)
self._test_log1p_tensor(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=self.device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced)
def _test_neg_negative(self, sparse_tensor):
dense_tensor = sparse_tensor.to_dense()

View file

@ -962,8 +962,9 @@ Tensor* TensorExprKernel::computeValue(const torch::jit::Value* v) {
} break;
case aten::log1p: {
return computeOneOperand(
"aten_log1p", v, [](const ExprHandle& a) { return log1p(a); });
return computeOneOperand("aten_log1p", v, [](const ExprHandle& a) {
return log1p(promoteIntegerToFloat(a));
});
} break;
case aten::log2: {

View file

@ -346,9 +346,10 @@ op_db = [
UnaryUfuncInfo('log1p',
ref=np.log1p,
domain=(-1, float('inf')),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),)),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
promotes_integers_to_float=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, float('inf')),