Remove NOLINTNEXTLINE (#146238)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/146238
Approved by: https://github.com/albanD
This commit is contained in:
cyy 2025-02-04 02:45:31 +00:00 committed by PyTorch MergeBot
parent 5451c9b7c9
commit f397c72697
9 changed files with 8 additions and 23 deletions

View file

@ -1615,9 +1615,7 @@ Tensor& narrow_copy_dense_cpu_out(
const int64_t num_blocks = c10::size_to_dim_(dim, self_sizes);
const auto itemsize = self_contig->dtype().itemsize();
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
size_t src_nbytes = itemsize * self_contig->numel();
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
size_t dst_nbytes = itemsize * output.numel();
size_t src_block_size = unit * self_sizes[dim];

View file

@ -512,8 +512,8 @@ void LayerNormBackwardKernelImplInternal(
const T2* gamma_data =
gamma.defined() ? gamma.template const_data_ptr<T2>() : nullptr;
T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr;
T2* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T2>() : nullptr;
T2* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T2>() : nullptr;
T2* const dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T2>() : nullptr;
T2* const dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T2>() : nullptr;
const opmath_t scale = opmath_t(1) / static_cast<opmath_t>(N);
const bool gamma_null = gamma_data == nullptr;
const bool dX_null = dX_data == nullptr;
@ -566,11 +566,9 @@ void LayerNormBackwardKernelImplInternal(
dbeta_v += buffer_data[num_threads * N + i * N + j];
}
if (!dgamma_null) {
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
dgamma_data[j] = dgamma_v;
}
if (!dbeta_null) {
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
dbeta_data[j] = dbeta_v;
}
}

View file

@ -161,7 +161,6 @@ Tensor _nested_select_backward_symint(
const Tensor& grad,
const Tensor& nested_self,
int64_t dim,
// NOLINTNEXTLINE(performance-unnecessary-value-param)
c10::SymInt index) {
auto nt_self = get_nested_tensor_impl(nested_self);
const Tensor& self_buffer = nt_self->get_buffer();

View file

@ -102,7 +102,6 @@ Tensor quantized_cat_impl(
const Tensor y = at::cat(xs, dim);
Tensor qy;
AT_DISPATCH_QINT_TYPES(x_dtype, "qcat", [&]() {
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
qy = at::quantize_per_tensor(y, scale, zero_point, SCALAR_TYPE);
if (ReLUFused) {
auto iter = TensorIterator::unary_op(qy, qy);

View file

@ -364,9 +364,8 @@ struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
bit_rate_(bit_rate),
q_scheme(q_scheme),
version_(version) {
// NOLINTNEXTLINE(clang-analyzer-cplusplus.Move)
if (!packed_w.is_contiguous()) {
packed_w = packed_w.contiguous();
if (!this->packed_w.is_contiguous()) {
this->packed_w = this->packed_w.contiguous();
}
}

View file

@ -115,7 +115,6 @@ Tensor sigmoid_quantized_cpu(const Tensor& qx) {
// optimizations
double output_scale = 0.00390625; // 1.0 / 2^8
int64_t output_zero_point = 0;
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
if (SCALAR_TYPE == at::kQInt32) {
output_scale = 2.3283064365386963e-10; // 1.0 / 2^32
} else if (SCALAR_TYPE == at::kQInt8) {

View file

@ -147,10 +147,8 @@ TEST(PytorchToCaffe2, Op) {
auto at_tensor_b = at::ones({5, 5}, at::dtype(at::kFloat));
auto at_tensor_c = at::ones({5, 5}, at::dtype(at::kFloat));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
[[maybe_unused]] auto* c2_tensor_a = BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
[[maybe_unused]] auto* c2_tensor_b = BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
// Test Alias
{
@ -186,10 +184,8 @@ TEST(PytorchToCaffe2, SharedStorageRead) {
auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat));
auto at_tensor_b = at_tensor_a.view({5, 5});
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
[[maybe_unused]] auto* c2_tensor_a = BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
[[maybe_unused]] auto* c2_tensor_b = BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
{
auto op = net.add_op();

View file

@ -419,7 +419,6 @@ static std::future<bool> launchAsyncGilCheck() {
try {
auto& gil_checker = get_gil_checker();
// NOLINTNEXTLINE(clang-analyzer-core*)
promise.set_value((*gil_checker)());
} catch (...) {
promise.set_exception(std::current_exception());

View file

@ -92,7 +92,6 @@ SymbolizedTracebacks symbolize(
if (e->python_) {
if (cur_python != e->python_ && !cur_py_frames.empty()) {
if (cur_python) {
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
cur_python->appendSymbolized(cur_py_frames, r);
}
cur_py_frames.clear();
@ -108,7 +107,6 @@ SymbolizedTracebacks symbolize(
}
if (!cur_py_frames.empty()) {
if (cur_python) {
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
cur_python->appendSymbolized(cur_py_frames, r);
}
cur_py_frames.clear();