mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[Torch] Extract arange_out resizing logic into a helper function that can be used by other devices (#145747)
Summary: We want to use the resizing implementation for arange_out in other devices (in this case MTIA), to make sure that the computations match and to avoid off-by-one-errors. Test Plan: Existing CI tests pass. Differential Revision: D68694489 Pull Request resolved: https://github.com/pytorch/pytorch/pull/145747 Approved by: https://github.com/mortzur
This commit is contained in:
parent
99a0940991
commit
2f60f12f8b
2 changed files with 47 additions and 33 deletions
|
|
@ -1,12 +1,12 @@
|
|||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <ATen/native/RangeFactories.h>
|
||||
#include <ATen/native/RangeUtils.h>
|
||||
#include <ATen/AccumulateType.h>
|
||||
#include <ATen/Dispatch.h>
|
||||
#include <ATen/Parallel.h>
|
||||
#include <ATen/TensorIterator.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
#include <ATen/Functions.h>
|
||||
|
|
@ -195,38 +195,7 @@ Tensor& range_out_no_step(const Scalar& start, const Scalar& end, Tensor& result
|
|||
|
||||
Tensor& arange_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
|
||||
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, result.scalar_type(), "arange_cpu", [&]() {
|
||||
using accscalar_t = at::acc_type<scalar_t, false>;
|
||||
auto xstart = start.to<accscalar_t>();
|
||||
auto xend = end.to<accscalar_t>();
|
||||
auto xstep = step.to<accscalar_t>();
|
||||
|
||||
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
|
||||
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
|
||||
std::isfinite(static_cast<double>(xend)),
|
||||
"unsupported range: ", xstart, " -> ", xend);
|
||||
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
|
||||
"upper bound and larger bound inconsistent with step sign");
|
||||
|
||||
// we use double precision for (start - end) / step
|
||||
// to compute size_d for consistency across devices.
|
||||
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
|
||||
// but double on cpu for the same,
|
||||
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
|
||||
// we dont want.
|
||||
// the corner-case we do want to take into account is int64_t, which has higher precision than double
|
||||
double size_d;
|
||||
if constexpr (std::is_same_v<scalar_t, int64_t>) {
|
||||
int64_t sgn = (xstep > 0) - (xstep < 0);
|
||||
size_d = std::ceil((xend - xstart + xstep - sgn) / xstep);
|
||||
} else {
|
||||
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
|
||||
/ step.to<double>());
|
||||
}
|
||||
|
||||
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
|
||||
"invalid size, possible overflow?");
|
||||
|
||||
int64_t size = static_cast<int64_t>(size_d);
|
||||
int64_t size = compute_arange_size<scalar_t>(start, end, step);
|
||||
int64_t numel = result.numel();
|
||||
|
||||
if (numel != size) {
|
||||
|
|
|
|||
45
aten/src/ATen/native/RangeUtils.h
Normal file
45
aten/src/ATen/native/RangeUtils.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#include <ATen/AccumulateType.h>
|
||||
#include <c10/core/Scalar.h>
|
||||
#include <limits>
|
||||
|
||||
namespace at {
|
||||
|
||||
namespace native {
|
||||
|
||||
template <typename scalar_t>
|
||||
int64_t compute_arange_size(const Scalar& start, const Scalar& end, const Scalar& step) {
|
||||
using accscalar_t = at::acc_type<scalar_t, false>;
|
||||
auto xstart = start.to<accscalar_t>();
|
||||
auto xend = end.to<accscalar_t>();
|
||||
auto xstep = step.to<accscalar_t>();
|
||||
|
||||
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
|
||||
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
|
||||
std::isfinite(static_cast<double>(xend)),
|
||||
"unsupported range: ", xstart, " -> ", xend);
|
||||
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
|
||||
"upper bound and larger bound inconsistent with step sign");
|
||||
|
||||
// we use double precision for (start - end) / step
|
||||
// to compute size_d for consistency across devices.
|
||||
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
|
||||
// but double on cpu for the same,
|
||||
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
|
||||
// we dont want.
|
||||
// the corner-case we do want to take into account is int64_t, which has higher precision than double
|
||||
double size_d;
|
||||
if constexpr (std::is_same_v<scalar_t, int64_t>) {
|
||||
int64_t sgn = (xstep > 0) - (xstep < 0);
|
||||
size_d = std::ceil((xend - xstart + xstep - sgn) / xstep);
|
||||
} else {
|
||||
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
|
||||
/ step.to<double>());
|
||||
}
|
||||
|
||||
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
|
||||
"invalid size, possible overflow?");
|
||||
|
||||
return static_cast<int64_t>(size_d);
|
||||
}
|
||||
|
||||
}} // namespace at::native
|
||||
Loading…
Reference in a new issue