Enable clang-tidy on torch/csrc/api (#138437)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/138437
Approved by: https://github.com/r-barnes
This commit is contained in:
cyy 2024-10-21 14:22:36 +00:00 committed by PyTorch MergeBot
parent 8aacbee8e0
commit 7ec21a6f0f
11 changed files with 51 additions and 43 deletions

View file

@ -241,7 +241,9 @@ exclude_patterns = [
'c10/util/*inl.h',
'c10/test/**/*.h',
'third_party/**/*',
'torch/csrc/api/**',
'torch/csrc/api/include/torch/nn/modules/common.h',
'torch/csrc/api/include/torch/linalg.h',
'torch/csrc/api/include/torch/nn/pimpl-inl.h',
'torch/csrc/autograd/generated/**',
'torch/csrc/distributed/**/*',
'torch/csrc/dynamo/eval_frame.h',

View file

@ -8,6 +8,6 @@
// Some "exports".
namespace torch::data {
using datasets::BatchDataset;
using datasets::Dataset;
using datasets::BatchDataset; // NOLINT
using datasets::Dataset; // NOLINT
} // namespace torch::data

View file

@ -1055,8 +1055,8 @@ inline Tensor lp_pool2d(
ExpandingArray<2> kernel_size,
ExpandingArray<2> stride,
bool ceil_mode) {
int kw = (*kernel_size)[0];
int kh = (*kernel_size)[1];
auto kw = (*kernel_size)[0];
auto kh = (*kernel_size)[1];
Tensor out = detail::avg_pool2d(
input.pow(norm_type),
kernel_size,
@ -1104,9 +1104,9 @@ inline Tensor lp_pool3d(
ExpandingArray<3> kernel_size,
ExpandingArray<3> stride,
bool ceil_mode) {
int kd = (*kernel_size)[0];
int kw = (*kernel_size)[1];
int kh = (*kernel_size)[2];
auto kd = (*kernel_size)[0];
auto kw = (*kernel_size)[1];
auto kh = (*kernel_size)[2];
Tensor out = detail::avg_pool3d(
input.pow(norm_type),
kernel_size,

View file

@ -70,28 +70,30 @@
/// seq->forward(1); // This correctly populates the default arguments for
/// `MImpl::forward`
/// ```
#define FORWARD_HAS_DEFAULT_ARGS(...) \
template <typename ModuleType, typename... ArgumentTypes> \
friend struct torch::nn::AnyModuleHolder; \
bool _forward_has_default_args() override { \
return true; \
} \
unsigned int _forward_num_required_args() override { \
std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
return args_info[0].first; \
} \
std::vector<torch::nn::AnyValue> _forward_populate_default_args( \
std::vector<torch::nn::AnyValue>&& arguments) override { \
std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
unsigned int num_all_args = std::rbegin(args_info)->first + 1; \
TORCH_INTERNAL_ASSERT( \
arguments.size() >= _forward_num_required_args() && \
arguments.size() <= num_all_args); \
std::vector<torch::nn::AnyValue> ret = std::move(arguments); \
ret.reserve(num_all_args); \
for (auto& arg_info : args_info) { \
if (arg_info.first > ret.size() - 1) \
ret.emplace_back(std::move(arg_info.second)); \
} \
return ret; \
#define FORWARD_HAS_DEFAULT_ARGS(...) \
template <typename ModuleType, typename... ArgumentTypes> \
friend struct torch::nn::AnyModuleHolder; \
bool _forward_has_default_args() override { \
return true; \
} \
unsigned int _forward_num_required_args() override { \
std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info{ \
__VA_ARGS__}; \
return std::begin(args_info)->first; \
} \
std::vector<torch::nn::AnyValue> _forward_populate_default_args( \
std::vector<torch::nn::AnyValue>&& arguments) override { \
std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info{ \
__VA_ARGS__}; \
unsigned int num_all_args = std::rbegin(args_info)->first + 1; \
TORCH_INTERNAL_ASSERT( \
arguments.size() >= _forward_num_required_args() && \
arguments.size() <= num_all_args); \
std::vector<torch::nn::AnyValue> ret = std::move(arguments); \
ret.reserve(num_all_args); \
for (auto& arg_info : args_info) { \
if (arg_info.first > ret.size() - 1) \
ret.emplace_back(std::move(arg_info.second)); \
} \
return ret; \
}

View file

@ -1,5 +1,6 @@
#pragma once
#include <torch/nn/functional/instancenorm.h>
#include <torch/nn/modules/batchnorm.h>
#include <torch/nn/options/instancenorm.h>
@ -7,6 +8,7 @@ namespace torch::nn {
/// Base class for all (dimension-specialized) instance norm modules
template <size_t D, typename Derived>
// NOLINTNEXTLINE(bugprone-crtp-constructor-accessibility)
class InstanceNormImpl
: public torch::nn::NormImplBase<D, Derived, InstanceNormOptions> {
private:

View file

@ -10,7 +10,6 @@
#include <torch/types.h>
#include <ostream>
#include <utility>
namespace torch::nn {

View file

@ -42,7 +42,7 @@ class ModuleHolder : torch::detail::ModuleHolderIndicator {
/// actually used.
ModuleHolder() : impl_(default_construct()) {
static_assert(
std::is_default_constructible<Contained>::value,
std::is_default_constructible_v<Contained>,
"You are trying to default construct a module which has "
"no default constructor. Use = nullptr to give it the empty state "
"(e.g. `Linear linear = nullptr;` instead of `Linear linear;`).");

View file

@ -38,8 +38,8 @@ namespace torch {
// the `func()` function defined in `at::` namespace is always hidden.
using namespace at; // NOLINT
using std::nullopt;
using std::optional;
using std::nullopt; // NOLINT
using std::optional; // NOLINT
using Dtype = at::ScalarType;

View file

@ -6,6 +6,7 @@
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/profiler.h>
// NOLINTBEGIN(misc-unused-using-decls)
namespace torch {
/// A RAII, thread-local guard that disabled gradient calculation.
@ -113,3 +114,4 @@ using at::RecordFunctionGuard;
using at::removeCallback;
} // namespace torch
// NOLINTEND(misc-unused-using-decls)

View file

@ -54,11 +54,11 @@ double calculate_kaiming_std(
double calculate_gain(NonlinearityType nonlinearity, double param) {
if (std::holds_alternative<enumtype::kTanh>(nonlinearity)) {
return 5.0 / 3.0; // NOLINT
return 5.0 / 3.0;
} else if (std::holds_alternative<enumtype::kReLU>(nonlinearity)) {
return std::sqrt(2.0); // NOLINT
return std::sqrt(2.0);
} else if (std::holds_alternative<enumtype::kLeakyReLU>(nonlinearity)) {
return std::sqrt(2.0 / (1 + pow(param, 2))); // NOLINT
return std::sqrt(2.0 / (1 + pow(param, 2)));
}
return 1.0;
@ -207,16 +207,16 @@ Tensor xavier_normal_(Tensor tensor, double gain) {
NoGradGuard guard;
Fan fan(tensor);
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
const auto std = gain * std::sqrt(2.0 / (fan.in + fan.out));
const auto std =
gain * std::sqrt(2.0 / static_cast<double>(fan.in + fan.out));
return tensor.normal_(0, std);
}
Tensor xavier_uniform_(Tensor tensor, double gain) {
NoGradGuard guard;
Fan fan(tensor);
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
const auto std = gain * std::sqrt(2.0 / (fan.in + fan.out));
const auto std =
gain * std::sqrt(2.0 / static_cast<double>(fan.in + fan.out));
// Calculate uniform bounds from standard deviation with
const auto a = std::sqrt(3.0) * std;
return tensor.uniform_(-a, a);

View file

@ -34,6 +34,7 @@ Module::Module()
: parameters_("Parameter"), buffers_("Buffer"), children_("Submodule") {}
Module::Module(std::string name) : Module() {
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
name_ = std::move(name);
}