From c219fa5eb94d271b9fdf21720fc49ea07de9f92d Mon Sep 17 00:00:00 2001 From: cyy Date: Fri, 7 Jun 2024 16:13:16 +0000 Subject: [PATCH] [3/N] Remove unused functions (#128179) Following https://github.com/pytorch/pytorch/pull/128005, this PR continues to remove unused functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/128179 Approved by: https://github.com/ezyang --- .../ATen/functorch/PyTorchOperatorHacks.cpp | 40 ------------------- aten/src/ATen/native/MetaTensor.cpp | 12 ------ aten/src/ATen/native/TypeProperties.cpp | 16 -------- functorch/csrc/dim/dim.cpp | 10 ----- functorch/csrc/dim/minpybind.h | 4 -- torch/csrc/cuda/Stream.cpp | 6 --- torch/csrc/jit/ir/ir.cpp | 6 --- 7 files changed, 94 deletions(-) diff --git a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp index ce3f20ef97e..e9e7b2a9955 100644 --- a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp +++ b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp @@ -31,46 +31,6 @@ Tensor index_select_backward_hack(const Tensor& grad, IntArrayRef self_sizes, in return at::zeros(self_sizes, grad.options()).index_add(dim, index, grad); } -static optional> unwrap(const Tensor& tensor) { - auto* wrapped = maybeGetTensorWrapper(tensor); - if (wrapped) { - if (wrapped->level().has_value()) { - return std::make_tuple(wrapped->value(), *wrapped->level()); - } - return unwrap(wrapped->value()); - } - auto* batched = maybeGetBatchedImpl(tensor); - if (batched) { - return std::make_tuple(batched->value(), batched->level()); - } - return nullopt; -} - -static bool can_perform_inplace(const Tensor& a, const Tensor& b) { - // TODO: generalize this to more transforms - auto a_ = unwrap(a); - auto b_ = unwrap(b); - if (!a_.has_value() && b_.has_value()) { - return false; - } - if (!a_.has_value() && !b_.has_value()) { - return true; - } - if (a_.has_value() && !b_.has_value()) { - return true; - } - TORCH_INTERNAL_ASSERT(a_.has_value() && b_.has_value()); - - // If b has any wrapper that a does not, then we cannot do a.inplace_(b) - if (std::get<1>(*a_) < std::get<1>(*b_)) { - return false; - } - if (std::get<1>(*a_) > std::get<1>(*b_)) { - return can_perform_inplace(std::get<0>(*a_), b); - } - return can_perform_inplace(std::get<0>(*a_), std::get<0>(*b_)); -} - // TODO: linear is pretty important for performance, but I'm not sure how to work // around the in-place. Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional& bias_opt) { diff --git a/aten/src/ATen/native/MetaTensor.cpp b/aten/src/ATen/native/MetaTensor.cpp index 518466df84c..302a3f45bdf 100644 --- a/aten/src/ATen/native/MetaTensor.cpp +++ b/aten/src/ATen/native/MetaTensor.cpp @@ -28,18 +28,6 @@ Tensor empty_meta_symint( size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); } -// Kept only for BC with XLA -static Tensor empty_strided_meta( - IntArrayRef size, - IntArrayRef stride, - std::optional dtype_opt, - std::optional layout_opt, - std::optional device_opt, - std::optional pin_memory_opt -) { - return empty_strided_meta_symint(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype_opt, layout_opt, device_opt, pin_memory_opt); -} - Tensor empty_strided_meta_symint( SymIntArrayRef size, SymIntArrayRef stride, diff --git a/aten/src/ATen/native/TypeProperties.cpp b/aten/src/ATen/native/TypeProperties.cpp index 4afc7619c2e..6e694109a21 100644 --- a/aten/src/ATen/native/TypeProperties.cpp +++ b/aten/src/ATen/native/TypeProperties.cpp @@ -24,10 +24,6 @@ namespace at::native { -static bool is_cuda(const Tensor& self) { - return self.is_cuda(); -} - bool is_distributed(const Tensor& self) { return false; } @@ -60,18 +56,6 @@ bool is_neg(const Tensor& self) { return self.is_neg(); } -static bool is_sparse(const Tensor& self) { - return self.is_sparse(); -} - -static bool is_sparse_csr(const Tensor& self) { - return self.is_sparse_csr(); -} - -static bool is_quantized(const Tensor& self) { - return self.is_quantized(); -} - // True if `self` and `from` have compatible tensor type so that `from`'s // TensorImpl can be copied to `self`. bool _has_compatible_shallow_copy_type(const Tensor& self, const Tensor& from) { diff --git a/functorch/csrc/dim/dim.cpp b/functorch/csrc/dim/dim.cpp index 066f9517ace..7f5564c1366 100644 --- a/functorch/csrc/dim/dim.cpp +++ b/functorch/csrc/dim/dim.cpp @@ -1640,16 +1640,6 @@ static PyObject* _dims(PyObject *self, PY_END(nullptr) } -static int64_t dim_index(const std::vector>& dims, mpy::hdl dim) { - for (int64_t i = 0, N = dims.size(); i < N; ++i) { - if (dims[i].ptr() == dim.ptr()) { - return i; - } - } - return -1; -} - - struct DotPart { Slice dims; size_t total_size = 1; diff --git a/functorch/csrc/dim/minpybind.h b/functorch/csrc/dim/minpybind.h index de82b5af95a..f1eb8726537 100644 --- a/functorch/csrc/dim/minpybind.h +++ b/functorch/csrc/dim/minpybind.h @@ -385,10 +385,6 @@ bool is_int(handle h) { return PyLong_Check(h.ptr()); } -bool is_float(handle h) { - return PyFloat_Check(h.ptr()); -} - bool is_none(handle h) { return h.ptr() == Py_None; } diff --git a/torch/csrc/cuda/Stream.cpp b/torch/csrc/cuda/Stream.cpp index 65ea8a600b5..cbfa64af252 100644 --- a/torch/csrc/cuda/Stream.cpp +++ b/torch/csrc/cuda/Stream.cpp @@ -84,12 +84,6 @@ static void THCPStream_dealloc(THCPStream* self) { Py_TYPE(self)->tp_free((PyObject*)self); } -static PyObject* THCPStream_get_device(THCPStream* self, void* unused) { - HANDLE_TH_ERRORS - return THPDevice_New(self->cuda_stream.device()); - END_HANDLE_TH_ERRORS -} - static PyObject* THCPStream_get_cuda_stream(THCPStream* self, void* unused) { HANDLE_TH_ERRORS return PyLong_FromVoidPtr(self->cuda_stream.stream()); diff --git a/torch/csrc/jit/ir/ir.cpp b/torch/csrc/jit/ir/ir.cpp index c39ceb7e91f..a6b0116d7fb 100644 --- a/torch/csrc/jit/ir/ir.cpp +++ b/torch/csrc/jit/ir/ir.cpp @@ -128,12 +128,6 @@ static std::ostream& operator<<( return printValueRefs(out, nodes); } -static std::ostream& operator<<( - std::ostream& out, - const at::ArrayRef nodes) { - return printValueRefs(out, nodes); -} - struct const_value_list_with_types { const ArrayRef values; std::string delim;