diff --git a/aten/src/ATen/MatrixRef.h b/aten/src/ATen/MatrixRef.h index 538f73fd998..3df028fec3b 100644 --- a/aten/src/ATen/MatrixRef.h +++ b/aten/src/ATen/MatrixRef.h @@ -92,6 +92,7 @@ class MatrixRef { /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) std::enable_if_t, MatrixRef>& operator=( // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) U&& Temporary) = delete; diff --git a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp index 9a9eb41b009..cf2d6e24d4d 100644 --- a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp @@ -16,19 +16,18 @@ using namespace dnnl; using namespace at::native; using namespace at::native::onednn; -namespace at::native { -namespace xpu { +namespace at::native::xpu { namespace impl { struct ConvParams { std::vector stride; std::vector padding; std::vector dilation; - bool transposed; + bool transposed{}; std::vector output_padding; - int groups; - bool benchmark; - bool deterministic; + int64_t groups{}; + bool benchmark{}; + bool deterministic{}; bool is_strided() const; bool is_dilated() const; @@ -58,7 +57,7 @@ std::ostream& operator<<(std::ostream& out, const ConvParams& params) { bool ConvParams::is_strided() const { bool is_strided = false; - for (int s : stride) { + for (auto s : stride) { is_strided |= (s != 1); } return is_strided; @@ -66,7 +65,7 @@ bool ConvParams::is_strided() const { bool ConvParams::is_dilated() const { bool is_dilated = false; - for (int d : dilation) { + for (auto d : dilation) { is_dilated |= (d != 1); } return is_dilated; @@ -74,7 +73,7 @@ bool ConvParams::is_dilated() const { bool ConvParams::is_padded() const { bool is_padded = false; - for (int p : padding) { + for (auto p : padding) { is_padded |= (p != 0); } return is_padded; @@ -82,7 +81,7 @@ bool ConvParams::is_padded() const { bool ConvParams::is_output_padding_neg() const { bool is_non_neg = false; - for (int p : output_padding) { + for (auto p : output_padding) { is_non_neg |= (p < 0); } return is_non_neg; @@ -99,7 +98,7 @@ bool ConvParams::is_output_padding_big() const { bool ConvParams::is_padding_neg() const { bool is_non_neg = false; - for (int p : padding) { + for (auto p : padding) { is_non_neg |= (p < 0); } return is_non_neg; @@ -107,7 +106,7 @@ bool ConvParams::is_padding_neg() const { bool ConvParams::is_stride_nonpos() const { bool is_nonpos = false; - for (int s : stride) { + for (auto s : stride) { is_nonpos |= (s <= 0); } return is_nonpos; @@ -246,7 +245,7 @@ static void check_shape_forward( std::ostringstream output_ss; std::string separator = ""; - for (int i = 0, len = input_shape.size(); i < len; ++i) { + for (size_t i = 0, len = input_shape.size(); i < len; ++i) { input_ss << separator << input_shape[i]; kernel_ss << separator << kernel_shape[i]; separator = " x "; @@ -496,8 +495,8 @@ Tensor _convolution_out( // (padding_left, padding_right, // padding_top, padding_bottom, // padding_front, padding_back) - if (pad_nd.vec().size() > 0) { - for (int i = 0; i < dim; ++i) { + if (!pad_nd.vec().empty()) { + for (int64_t i = 0; i < dim; ++i) { padding_front_top_left[i] += pad_nd[2 * dim - 2 * i - 2]; // 4, 2, 0 padding_back_bottom_right[i] += pad_nd[2 * dim - 2 * i - 1]; // 5, 3, 1 } @@ -628,8 +627,8 @@ std::tuple convolution_backward_overrideable( Tensor grad_output_, input_, weight_; IntArrayRef stride_, padding_, dilation_, output_padding_; - bool transposed_; - int64_t groups_; + bool transposed_ = false; + int64_t groups_ = 0; ConvParams params; if (3 == ndim) { grad_output_ = view4d(grad_output); @@ -744,5 +743,4 @@ TORCH_LIBRARY_IMPL(aten, XPU, m) { TORCH_FN(convolution_backward_overrideable)); } -} // namespace xpu -} // namespace at::native +} // namespace at::native::xpu diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp index e0487c80d20..e0662a43f77 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp @@ -114,7 +114,7 @@ dnnl::memory::dims get_onednn_strides(const at::Tensor& tensor) { } dnnl::memory::desc get_onednn_md(const at::Tensor& tensor) { - Tensor t = tensor.sizes().size() == 0 ? tensor.unsqueeze(0) : tensor; + Tensor t = tensor.sizes().empty() ? tensor.unsqueeze(0) : tensor; return {get_onednn_dims(t), get_onednn_dtype(t), get_onednn_strides(t)}; } @@ -130,8 +130,8 @@ bool onednn_strides_check(const Tensor& src) { dnnl_memory_desc_t md; dnnl_memory_desc_create_with_strides(&md, ndims, dims, data_type, strides); dnnl_format_kind_t md_fmt_kind; - int md_ndims; - int md_inner_nblks; + int md_ndims = 0; + int md_inner_nblks = 0; dnnl_dims_t* md_padded_dims = nullptr; dnnl_memory_desc_query(md, dnnl_query_inner_nblks_s32, &md_inner_nblks); diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h index a096b4b9d8b..7531e39535a 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h @@ -33,6 +33,8 @@ struct TORCH_XPU_API GpuEngineManager { GpuEngineManager(GpuEngineManager const&) = delete; GpuEngineManager& operator=(GpuEngineManager const&) = delete; + GpuEngineManager(GpuEngineManager&&) = default; + GpuEngineManager& operator=(GpuEngineManager&&) = default; protected: GpuEngineManager() { @@ -44,7 +46,7 @@ struct TORCH_XPU_API GpuEngineManager { c10::xpu::get_raw_device(i), c10::xpu::get_device_context()))); } } - ~GpuEngineManager() {} + ~GpuEngineManager() = default; private: std::vector> engine_pool; @@ -71,6 +73,8 @@ struct TORCH_XPU_API GpuStreamManager { GpuStreamManager(GpuStreamManager const&) = delete; GpuStreamManager& operator=(GpuStreamManager const&) = delete; + GpuStreamManager(GpuStreamManager&&) = default; + GpuStreamManager& operator=(GpuStreamManager&&) = default; protected: GpuStreamManager() { @@ -78,7 +82,7 @@ struct TORCH_XPU_API GpuStreamManager { TORCH_INTERNAL_ASSERT(device_count > 0); stream_pool.resize(device_count); } - ~GpuStreamManager() {} + ~GpuStreamManager() = default; private: using stream_hash_map = diff --git a/torch/csrc/Generator.cpp b/torch/csrc/Generator.cpp index a00985c8aa1..6eb9a7644d7 100644 --- a/torch/csrc/Generator.cpp +++ b/torch/csrc/Generator.cpp @@ -20,7 +20,7 @@ using namespace torch; PyObject* THPGeneratorClass = nullptr; -PyObject* THPGenerator_initDefaultGenerator(at::Generator cdata) { +PyObject* THPGenerator_initDefaultGenerator(const at::Generator& cdata) { auto type = (PyTypeObject*)THPGeneratorClass; auto self = THPObjectPtr{type->tp_alloc(type, 0)}; if (!self) diff --git a/torch/csrc/Generator.h b/torch/csrc/Generator.h index ba50d4a2d96..b5f72cb47b7 100644 --- a/torch/csrc/Generator.h +++ b/torch/csrc/Generator.h @@ -14,7 +14,7 @@ struct THPGenerator { // is borrowed. The caller should ensure that the at::Generator object lifetime // last at least as long as the Python wrapper. TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator( - at::Generator cdata); + const at::Generator& cdata); #define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)